GNU Linux-libre 6.1.24-gnu
[releases.git] / drivers / char / xillybus / xillyusb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2020 Xillybus Ltd, http://xillybus.com
4  *
5  * Driver for the XillyUSB FPGA/host framework.
6  *
7  * This driver interfaces with a special IP core in an FPGA, setting up
8  * a pipe between a hardware FIFO in the programmable logic and a device
9  * file in the host. The number of such pipes and their attributes are
10  * set up on the logic. This driver detects these automatically and
11  * creates the device files accordingly.
12  */
13
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <asm/byteorder.h>
20 #include <linux/io.h>
21 #include <linux/interrupt.h>
22 #include <linux/sched.h>
23 #include <linux/fs.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <linux/workqueue.h>
27 #include <linux/crc32.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
30 #include <linux/usb.h>
31
32 #include "xillybus_class.h"
33
34 MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
35 MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
36 MODULE_ALIAS("xillyusb");
37 MODULE_LICENSE("GPL v2");
38
39 #define XILLY_RX_TIMEOUT                (10 * HZ / 1000)
40 #define XILLY_RESPONSE_TIMEOUT          (500 * HZ / 1000)
41
42 #define BUF_SIZE_ORDER                  4
43 #define BUFNUM                          8
44 #define LOG2_IDT_FIFO_SIZE              16
45 #define LOG2_INITIAL_FIFO_BUF_SIZE      16
46
47 #define MSG_EP_NUM                      1
48 #define IN_EP_NUM                       1
49
50 static const char xillyname[] = "xillyusb";
51
52 static unsigned int fifo_buf_order;
53
54 #define USB_VENDOR_ID_XILINX            0x03fd
55 #define USB_VENDOR_ID_ALTERA            0x09fb
56
57 #define USB_PRODUCT_ID_XILLYUSB         0xebbe
58
59 static const struct usb_device_id xillyusb_table[] = {
60         { USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
61         { USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
62         { }
63 };
64
65 MODULE_DEVICE_TABLE(usb, xillyusb_table);
66
67 struct xillyusb_dev;
68
69 struct xillyfifo {
70         unsigned int bufsize; /* In bytes, always a power of 2 */
71         unsigned int bufnum;
72         unsigned int size; /* Lazy: Equals bufsize * bufnum */
73         unsigned int buf_order;
74
75         int fill; /* Number of bytes in the FIFO */
76         spinlock_t lock;
77         wait_queue_head_t waitq;
78
79         unsigned int readpos;
80         unsigned int readbuf;
81         unsigned int writepos;
82         unsigned int writebuf;
83         char **mem;
84 };
85
86 struct xillyusb_channel;
87
88 struct xillyusb_endpoint {
89         struct xillyusb_dev *xdev;
90
91         struct mutex ep_mutex; /* serialize operations on endpoint */
92
93         struct list_head buffers;
94         struct list_head filled_buffers;
95         spinlock_t buffers_lock; /* protect these two lists */
96
97         unsigned int order;
98         unsigned int buffer_size;
99
100         unsigned int fill_mask;
101
102         int outstanding_urbs;
103
104         struct usb_anchor anchor;
105
106         struct xillyfifo fifo;
107
108         struct work_struct workitem;
109
110         bool shutting_down;
111         bool drained;
112         bool wake_on_drain;
113
114         u8 ep_num;
115 };
116
117 struct xillyusb_channel {
118         struct xillyusb_dev *xdev;
119
120         struct xillyfifo *in_fifo;
121         struct xillyusb_endpoint *out_ep;
122         struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
123
124         struct mutex in_mutex; /* serialize fops on FPGA to host stream */
125         struct mutex out_mutex; /* serialize fops on host to FPGA stream */
126         wait_queue_head_t flushq;
127
128         int chan_idx;
129
130         u32 in_consumed_bytes;
131         u32 in_current_checkpoint;
132         u32 out_bytes;
133
134         unsigned int in_log2_element_size;
135         unsigned int out_log2_element_size;
136         unsigned int in_log2_fifo_size;
137         unsigned int out_log2_fifo_size;
138
139         unsigned int read_data_ok; /* EOF not arrived (yet) */
140         unsigned int poll_used;
141         unsigned int flushing;
142         unsigned int flushed;
143         unsigned int canceled;
144
145         /* Bit fields protected by @lock except for initialization */
146         unsigned readable:1;
147         unsigned writable:1;
148         unsigned open_for_read:1;
149         unsigned open_for_write:1;
150         unsigned in_synchronous:1;
151         unsigned out_synchronous:1;
152         unsigned in_seekable:1;
153         unsigned out_seekable:1;
154 };
155
156 struct xillybuffer {
157         struct list_head entry;
158         struct xillyusb_endpoint *ep;
159         void *buf;
160         unsigned int len;
161 };
162
163 struct xillyusb_dev {
164         struct xillyusb_channel *channels;
165
166         struct usb_device       *udev;
167         struct device           *dev; /* For dev_err() and such */
168         struct kref             kref;
169         struct workqueue_struct *workq;
170
171         int error;
172         spinlock_t error_lock; /* protect @error */
173         struct work_struct wakeup_workitem;
174
175         int num_channels;
176
177         struct xillyusb_endpoint *msg_ep;
178         struct xillyusb_endpoint *in_ep;
179
180         struct mutex msg_mutex; /* serialize opcode transmission */
181         int in_bytes_left;
182         int leftover_chan_num;
183         unsigned int in_counter;
184         struct mutex process_in_mutex; /* synchronize wakeup_all() */
185 };
186
187 /* FPGA to host opcodes */
188 enum {
189         OPCODE_DATA = 0,
190         OPCODE_QUIESCE_ACK = 1,
191         OPCODE_EOF = 2,
192         OPCODE_REACHED_CHECKPOINT = 3,
193         OPCODE_CANCELED_CHECKPOINT = 4,
194 };
195
196 /* Host to FPGA opcodes */
197 enum {
198         OPCODE_QUIESCE = 0,
199         OPCODE_REQ_IDT = 1,
200         OPCODE_SET_CHECKPOINT = 2,
201         OPCODE_CLOSE = 3,
202         OPCODE_SET_PUSH = 4,
203         OPCODE_UPDATE_PUSH = 5,
204         OPCODE_CANCEL_CHECKPOINT = 6,
205         OPCODE_SET_ADDR = 7,
206 };
207
208 /*
209  * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
210  * calls to each on the same FIFO is not allowed) however it's OK to have
211  * threads calling each of the two functions once on the same FIFO, and
212  * at the same time.
213  */
214
215 static int fifo_write(struct xillyfifo *fifo,
216                       const void *data, unsigned int len,
217                       int (*copier)(void *, const void *, int))
218 {
219         unsigned int done = 0;
220         unsigned int todo = len;
221         unsigned int nmax;
222         unsigned int writepos = fifo->writepos;
223         unsigned int writebuf = fifo->writebuf;
224         unsigned long flags;
225         int rc;
226
227         nmax = fifo->size - READ_ONCE(fifo->fill);
228
229         while (1) {
230                 unsigned int nrail = fifo->bufsize - writepos;
231                 unsigned int n = min(todo, nmax);
232
233                 if (n == 0) {
234                         spin_lock_irqsave(&fifo->lock, flags);
235                         fifo->fill += done;
236                         spin_unlock_irqrestore(&fifo->lock, flags);
237
238                         fifo->writepos = writepos;
239                         fifo->writebuf = writebuf;
240
241                         return done;
242                 }
243
244                 if (n > nrail)
245                         n = nrail;
246
247                 rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
248
249                 if (rc)
250                         return rc;
251
252                 done += n;
253                 todo -= n;
254
255                 writepos += n;
256                 nmax -= n;
257
258                 if (writepos == fifo->bufsize) {
259                         writepos = 0;
260                         writebuf++;
261
262                         if (writebuf == fifo->bufnum)
263                                 writebuf = 0;
264                 }
265         }
266 }
267
268 static int fifo_read(struct xillyfifo *fifo,
269                      void *data, unsigned int len,
270                      int (*copier)(void *, const void *, int))
271 {
272         unsigned int done = 0;
273         unsigned int todo = len;
274         unsigned int fill;
275         unsigned int readpos = fifo->readpos;
276         unsigned int readbuf = fifo->readbuf;
277         unsigned long flags;
278         int rc;
279
280         /*
281          * The spinlock here is necessary, because otherwise fifo->fill
282          * could have been increased by fifo_write() after writing data
283          * to the buffer, but this data would potentially not have been
284          * visible on this thread at the time the updated fifo->fill was.
285          * That could lead to reading invalid data.
286          */
287
288         spin_lock_irqsave(&fifo->lock, flags);
289         fill = fifo->fill;
290         spin_unlock_irqrestore(&fifo->lock, flags);
291
292         while (1) {
293                 unsigned int nrail = fifo->bufsize - readpos;
294                 unsigned int n = min(todo, fill);
295
296                 if (n == 0) {
297                         spin_lock_irqsave(&fifo->lock, flags);
298                         fifo->fill -= done;
299                         spin_unlock_irqrestore(&fifo->lock, flags);
300
301                         fifo->readpos = readpos;
302                         fifo->readbuf = readbuf;
303
304                         return done;
305                 }
306
307                 if (n > nrail)
308                         n = nrail;
309
310                 rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
311
312                 if (rc)
313                         return rc;
314
315                 done += n;
316                 todo -= n;
317
318                 readpos += n;
319                 fill -= n;
320
321                 if (readpos == fifo->bufsize) {
322                         readpos = 0;
323                         readbuf++;
324
325                         if (readbuf == fifo->bufnum)
326                                 readbuf = 0;
327                 }
328         }
329 }
330
331 /*
332  * These three wrapper functions are used as the @copier argument to
333  * fifo_write() and fifo_read(), so that they can work directly with
334  * user memory as well.
335  */
336
337 static int xilly_copy_from_user(void *dst, const void *src, int n)
338 {
339         if (copy_from_user(dst, (const void __user *)src, n))
340                 return -EFAULT;
341
342         return 0;
343 }
344
345 static int xilly_copy_to_user(void *dst, const void *src, int n)
346 {
347         if (copy_to_user((void __user *)dst, src, n))
348                 return -EFAULT;
349
350         return 0;
351 }
352
353 static int xilly_memcpy(void *dst, const void *src, int n)
354 {
355         memcpy(dst, src, n);
356
357         return 0;
358 }
359
360 static int fifo_init(struct xillyfifo *fifo,
361                      unsigned int log2_size)
362 {
363         unsigned int log2_bufnum;
364         unsigned int buf_order;
365         int i;
366
367         unsigned int log2_fifo_buf_size;
368
369 retry:
370         log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
371
372         if (log2_size > log2_fifo_buf_size) {
373                 log2_bufnum = log2_size - log2_fifo_buf_size;
374                 buf_order = fifo_buf_order;
375                 fifo->bufsize = 1 << log2_fifo_buf_size;
376         } else {
377                 log2_bufnum = 0;
378                 buf_order = (log2_size > PAGE_SHIFT) ?
379                         log2_size - PAGE_SHIFT : 0;
380                 fifo->bufsize = 1 << log2_size;
381         }
382
383         fifo->bufnum = 1 << log2_bufnum;
384         fifo->size = fifo->bufnum * fifo->bufsize;
385         fifo->buf_order = buf_order;
386
387         fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
388
389         if (!fifo->mem)
390                 return -ENOMEM;
391
392         for (i = 0; i < fifo->bufnum; i++) {
393                 fifo->mem[i] = (void *)
394                         __get_free_pages(GFP_KERNEL, buf_order);
395
396                 if (!fifo->mem[i])
397                         goto memfail;
398         }
399
400         fifo->fill = 0;
401         fifo->readpos = 0;
402         fifo->readbuf = 0;
403         fifo->writepos = 0;
404         fifo->writebuf = 0;
405         spin_lock_init(&fifo->lock);
406         init_waitqueue_head(&fifo->waitq);
407         return 0;
408
409 memfail:
410         for (i--; i >= 0; i--)
411                 free_pages((unsigned long)fifo->mem[i], buf_order);
412
413         kfree(fifo->mem);
414         fifo->mem = NULL;
415
416         if (fifo_buf_order) {
417                 fifo_buf_order--;
418                 goto retry;
419         } else {
420                 return -ENOMEM;
421         }
422 }
423
424 static void fifo_mem_release(struct xillyfifo *fifo)
425 {
426         int i;
427
428         if (!fifo->mem)
429                 return;
430
431         for (i = 0; i < fifo->bufnum; i++)
432                 free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
433
434         kfree(fifo->mem);
435 }
436
437 /*
438  * When endpoint_quiesce() returns, the endpoint has no URBs submitted,
439  * won't accept any new URB submissions, and its related work item doesn't
440  * and won't run anymore.
441  */
442
443 static void endpoint_quiesce(struct xillyusb_endpoint *ep)
444 {
445         mutex_lock(&ep->ep_mutex);
446         ep->shutting_down = true;
447         mutex_unlock(&ep->ep_mutex);
448
449         usb_kill_anchored_urbs(&ep->anchor);
450         cancel_work_sync(&ep->workitem);
451 }
452
453 /*
454  * Note that endpoint_dealloc() also frees fifo memory (if allocated), even
455  * though endpoint_alloc doesn't allocate that memory.
456  */
457
458 static void endpoint_dealloc(struct xillyusb_endpoint *ep)
459 {
460         struct list_head *this, *next;
461
462         fifo_mem_release(&ep->fifo);
463
464         /* Join @filled_buffers with @buffers to free these entries too */
465         list_splice(&ep->filled_buffers, &ep->buffers);
466
467         list_for_each_safe(this, next, &ep->buffers) {
468                 struct xillybuffer *xb =
469                         list_entry(this, struct xillybuffer, entry);
470
471                 free_pages((unsigned long)xb->buf, ep->order);
472                 kfree(xb);
473         }
474
475         kfree(ep);
476 }
477
478 static struct xillyusb_endpoint
479 *endpoint_alloc(struct xillyusb_dev *xdev,
480                 u8 ep_num,
481                 void (*work)(struct work_struct *),
482                 unsigned int order,
483                 int bufnum)
484 {
485         int i;
486
487         struct xillyusb_endpoint *ep;
488
489         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
490
491         if (!ep)
492                 return NULL;
493
494         INIT_LIST_HEAD(&ep->buffers);
495         INIT_LIST_HEAD(&ep->filled_buffers);
496
497         spin_lock_init(&ep->buffers_lock);
498         mutex_init(&ep->ep_mutex);
499
500         init_usb_anchor(&ep->anchor);
501         INIT_WORK(&ep->workitem, work);
502
503         ep->order = order;
504         ep->buffer_size =  1 << (PAGE_SHIFT + order);
505         ep->outstanding_urbs = 0;
506         ep->drained = true;
507         ep->wake_on_drain = false;
508         ep->xdev = xdev;
509         ep->ep_num = ep_num;
510         ep->shutting_down = false;
511
512         for (i = 0; i < bufnum; i++) {
513                 struct xillybuffer *xb;
514                 unsigned long addr;
515
516                 xb = kzalloc(sizeof(*xb), GFP_KERNEL);
517
518                 if (!xb) {
519                         endpoint_dealloc(ep);
520                         return NULL;
521                 }
522
523                 addr = __get_free_pages(GFP_KERNEL, order);
524
525                 if (!addr) {
526                         kfree(xb);
527                         endpoint_dealloc(ep);
528                         return NULL;
529                 }
530
531                 xb->buf = (void *)addr;
532                 xb->ep = ep;
533                 list_add_tail(&xb->entry, &ep->buffers);
534         }
535         return ep;
536 }
537
538 static void cleanup_dev(struct kref *kref)
539 {
540         struct xillyusb_dev *xdev =
541                 container_of(kref, struct xillyusb_dev, kref);
542
543         if (xdev->in_ep)
544                 endpoint_dealloc(xdev->in_ep);
545
546         if (xdev->msg_ep)
547                 endpoint_dealloc(xdev->msg_ep);
548
549         if (xdev->workq)
550                 destroy_workqueue(xdev->workq);
551
552         usb_put_dev(xdev->udev);
553         kfree(xdev->channels); /* Argument may be NULL, and that's fine */
554         kfree(xdev);
555 }
556
557 /*
558  * @process_in_mutex is taken to ensure that bulk_in_work() won't call
559  * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
560  * @read_data_ok entries, which will make process_bulk_in() report false
561  * errors if executed. The mechanism relies on that xdev->error is assigned
562  * a non-zero value by report_io_error() prior to queueing wakeup_all(),
563  * which prevents bulk_in_work() from calling process_bulk_in().
564  *
565  * The fact that wakeup_all() and bulk_in_work() are queued on the same
566  * workqueue makes their concurrent execution very unlikely, however the
567  * kernel's API doesn't seem to ensure this strictly.
568  */
569
570 static void wakeup_all(struct work_struct *work)
571 {
572         int i;
573         struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
574                                                  wakeup_workitem);
575
576         mutex_lock(&xdev->process_in_mutex);
577
578         for (i = 0; i < xdev->num_channels; i++) {
579                 struct xillyusb_channel *chan = &xdev->channels[i];
580
581                 mutex_lock(&chan->lock);
582
583                 if (chan->in_fifo) {
584                         /*
585                          * Fake an EOF: Even if such arrives, it won't be
586                          * processed.
587                          */
588                         chan->read_data_ok = 0;
589                         wake_up_interruptible(&chan->in_fifo->waitq);
590                 }
591
592                 if (chan->out_ep)
593                         wake_up_interruptible(&chan->out_ep->fifo.waitq);
594
595                 mutex_unlock(&chan->lock);
596
597                 wake_up_interruptible(&chan->flushq);
598         }
599
600         mutex_unlock(&xdev->process_in_mutex);
601
602         wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
603
604         kref_put(&xdev->kref, cleanup_dev);
605 }
606
607 static void report_io_error(struct xillyusb_dev *xdev,
608                             int errcode)
609 {
610         unsigned long flags;
611         bool do_once = false;
612
613         spin_lock_irqsave(&xdev->error_lock, flags);
614         if (!xdev->error) {
615                 xdev->error = errcode;
616                 do_once = true;
617         }
618         spin_unlock_irqrestore(&xdev->error_lock, flags);
619
620         if (do_once) {
621                 kref_get(&xdev->kref); /* xdev is used by work item */
622                 queue_work(xdev->workq, &xdev->wakeup_workitem);
623         }
624 }
625
626 /*
627  * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
628  * the previous pointer is never used after its return.
629  */
630
631 static void safely_assign_in_fifo(struct xillyusb_channel *chan,
632                                   struct xillyfifo *fifo)
633 {
634         mutex_lock(&chan->lock);
635         chan->in_fifo = fifo;
636         mutex_unlock(&chan->lock);
637
638         flush_work(&chan->xdev->in_ep->workitem);
639 }
640
641 static void bulk_in_completer(struct urb *urb)
642 {
643         struct xillybuffer *xb = urb->context;
644         struct xillyusb_endpoint *ep = xb->ep;
645         unsigned long flags;
646
647         if (urb->status) {
648                 if (!(urb->status == -ENOENT ||
649                       urb->status == -ECONNRESET ||
650                       urb->status == -ESHUTDOWN))
651                         report_io_error(ep->xdev, -EIO);
652
653                 spin_lock_irqsave(&ep->buffers_lock, flags);
654                 list_add_tail(&xb->entry, &ep->buffers);
655                 ep->outstanding_urbs--;
656                 spin_unlock_irqrestore(&ep->buffers_lock, flags);
657
658                 return;
659         }
660
661         xb->len = urb->actual_length;
662
663         spin_lock_irqsave(&ep->buffers_lock, flags);
664         list_add_tail(&xb->entry, &ep->filled_buffers);
665         spin_unlock_irqrestore(&ep->buffers_lock, flags);
666
667         if (!ep->shutting_down)
668                 queue_work(ep->xdev->workq, &ep->workitem);
669 }
670
671 static void bulk_out_completer(struct urb *urb)
672 {
673         struct xillybuffer *xb = urb->context;
674         struct xillyusb_endpoint *ep = xb->ep;
675         unsigned long flags;
676
677         if (urb->status &&
678             (!(urb->status == -ENOENT ||
679                urb->status == -ECONNRESET ||
680                urb->status == -ESHUTDOWN)))
681                 report_io_error(ep->xdev, -EIO);
682
683         spin_lock_irqsave(&ep->buffers_lock, flags);
684         list_add_tail(&xb->entry, &ep->buffers);
685         ep->outstanding_urbs--;
686         spin_unlock_irqrestore(&ep->buffers_lock, flags);
687
688         if (!ep->shutting_down)
689                 queue_work(ep->xdev->workq, &ep->workitem);
690 }
691
692 static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
693 {
694         struct xillyusb_dev *xdev = ep->xdev;
695         struct xillybuffer *xb;
696         struct urb *urb;
697
698         int rc;
699         unsigned long flags;
700         unsigned int bufsize = ep->buffer_size;
701
702         mutex_lock(&ep->ep_mutex);
703
704         if (ep->shutting_down || xdev->error)
705                 goto done;
706
707         while (1) {
708                 spin_lock_irqsave(&ep->buffers_lock, flags);
709
710                 if (list_empty(&ep->buffers)) {
711                         spin_unlock_irqrestore(&ep->buffers_lock, flags);
712                         goto done;
713                 }
714
715                 xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
716                 list_del(&xb->entry);
717                 ep->outstanding_urbs++;
718
719                 spin_unlock_irqrestore(&ep->buffers_lock, flags);
720
721                 urb = usb_alloc_urb(0, GFP_KERNEL);
722                 if (!urb) {
723                         report_io_error(xdev, -ENOMEM);
724                         goto relist;
725                 }
726
727                 usb_fill_bulk_urb(urb, xdev->udev,
728                                   usb_rcvbulkpipe(xdev->udev, ep->ep_num),
729                                   xb->buf, bufsize, bulk_in_completer, xb);
730
731                 usb_anchor_urb(urb, &ep->anchor);
732
733                 rc = usb_submit_urb(urb, GFP_KERNEL);
734
735                 if (rc) {
736                         report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
737                                         -EIO);
738                         goto unanchor;
739                 }
740
741                 usb_free_urb(urb); /* This just decrements reference count */
742         }
743
744 unanchor:
745         usb_unanchor_urb(urb);
746         usb_free_urb(urb);
747
748 relist:
749         spin_lock_irqsave(&ep->buffers_lock, flags);
750         list_add_tail(&xb->entry, &ep->buffers);
751         ep->outstanding_urbs--;
752         spin_unlock_irqrestore(&ep->buffers_lock, flags);
753
754 done:
755         mutex_unlock(&ep->ep_mutex);
756 }
757
758 static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
759 {
760         struct xillyfifo *fifo = &ep->fifo;
761         struct xillyusb_dev *xdev = ep->xdev;
762         struct xillybuffer *xb;
763         struct urb *urb;
764
765         int rc;
766         unsigned int fill;
767         unsigned long flags;
768         bool do_wake = false;
769
770         mutex_lock(&ep->ep_mutex);
771
772         if (ep->shutting_down || xdev->error)
773                 goto done;
774
775         fill = READ_ONCE(fifo->fill) & ep->fill_mask;
776
777         while (1) {
778                 int count;
779                 unsigned int max_read;
780
781                 spin_lock_irqsave(&ep->buffers_lock, flags);
782
783                 /*
784                  * Race conditions might have the FIFO filled while the
785                  * endpoint is marked as drained here. That doesn't matter,
786                  * because the sole purpose of @drained is to ensure that
787                  * certain data has been sent on the USB channel before
788                  * shutting it down. Hence knowing that the FIFO appears
789                  * to be empty with no outstanding URBs at some moment
790                  * is good enough.
791                  */
792
793                 if (!fill) {
794                         ep->drained = !ep->outstanding_urbs;
795                         if (ep->drained && ep->wake_on_drain)
796                                 do_wake = true;
797
798                         spin_unlock_irqrestore(&ep->buffers_lock, flags);
799                         goto done;
800                 }
801
802                 ep->drained = false;
803
804                 if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
805                     list_empty(&ep->buffers)) {
806                         spin_unlock_irqrestore(&ep->buffers_lock, flags);
807                         goto done;
808                 }
809
810                 xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
811                 list_del(&xb->entry);
812                 ep->outstanding_urbs++;
813
814                 spin_unlock_irqrestore(&ep->buffers_lock, flags);
815
816                 max_read = min(fill, ep->buffer_size);
817
818                 count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
819
820                 /*
821                  * xilly_memcpy always returns 0 => fifo_read can't fail =>
822                  * count > 0
823                  */
824
825                 urb = usb_alloc_urb(0, GFP_KERNEL);
826                 if (!urb) {
827                         report_io_error(xdev, -ENOMEM);
828                         goto relist;
829                 }
830
831                 usb_fill_bulk_urb(urb, xdev->udev,
832                                   usb_sndbulkpipe(xdev->udev, ep->ep_num),
833                                   xb->buf, count, bulk_out_completer, xb);
834
835                 usb_anchor_urb(urb, &ep->anchor);
836
837                 rc = usb_submit_urb(urb, GFP_KERNEL);
838
839                 if (rc) {
840                         report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
841                                         -EIO);
842                         goto unanchor;
843                 }
844
845                 usb_free_urb(urb); /* This just decrements reference count */
846
847                 fill -= count;
848                 do_wake = true;
849         }
850
851 unanchor:
852         usb_unanchor_urb(urb);
853         usb_free_urb(urb);
854
855 relist:
856         spin_lock_irqsave(&ep->buffers_lock, flags);
857         list_add_tail(&xb->entry, &ep->buffers);
858         ep->outstanding_urbs--;
859         spin_unlock_irqrestore(&ep->buffers_lock, flags);
860
861 done:
862         mutex_unlock(&ep->ep_mutex);
863
864         if (do_wake)
865                 wake_up_interruptible(&fifo->waitq);
866 }
867
868 static void bulk_out_work(struct work_struct *work)
869 {
870         struct xillyusb_endpoint *ep = container_of(work,
871                                                     struct xillyusb_endpoint,
872                                                     workitem);
873         try_queue_bulk_out(ep);
874 }
875
876 static int process_in_opcode(struct xillyusb_dev *xdev,
877                              int opcode,
878                              int chan_num)
879 {
880         struct xillyusb_channel *chan;
881         struct device *dev = xdev->dev;
882         int chan_idx = chan_num >> 1;
883
884         if (chan_idx >= xdev->num_channels) {
885                 dev_err(dev, "Received illegal channel ID %d from FPGA\n",
886                         chan_num);
887                 return -EIO;
888         }
889
890         chan = &xdev->channels[chan_idx];
891
892         switch (opcode) {
893         case OPCODE_EOF:
894                 if (!chan->read_data_ok) {
895                         dev_err(dev, "Received unexpected EOF for channel %d\n",
896                                 chan_num);
897                         return -EIO;
898                 }
899
900                 /*
901                  * A write memory barrier ensures that the FIFO's fill level
902                  * is visible before read_data_ok turns zero, so the data in
903                  * the FIFO isn't missed by the consumer.
904                  */
905                 smp_wmb();
906                 WRITE_ONCE(chan->read_data_ok, 0);
907                 wake_up_interruptible(&chan->in_fifo->waitq);
908                 break;
909
910         case OPCODE_REACHED_CHECKPOINT:
911                 chan->flushing = 0;
912                 wake_up_interruptible(&chan->flushq);
913                 break;
914
915         case OPCODE_CANCELED_CHECKPOINT:
916                 chan->canceled = 1;
917                 wake_up_interruptible(&chan->flushq);
918                 break;
919
920         default:
921                 dev_err(dev, "Received illegal opcode %d from FPGA\n",
922                         opcode);
923                 return -EIO;
924         }
925
926         return 0;
927 }
928
929 static int process_bulk_in(struct xillybuffer *xb)
930 {
931         struct xillyusb_endpoint *ep = xb->ep;
932         struct xillyusb_dev *xdev = ep->xdev;
933         struct device *dev = xdev->dev;
934         int dws = xb->len >> 2;
935         __le32 *p = xb->buf;
936         u32 ctrlword;
937         struct xillyusb_channel *chan;
938         struct xillyfifo *fifo;
939         int chan_num = 0, opcode;
940         int chan_idx;
941         int bytes, count, dwconsume;
942         int in_bytes_left = 0;
943         int rc;
944
945         if ((dws << 2) != xb->len) {
946                 dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
947                         xb->len);
948                 return -EIO;
949         }
950
951         if (xdev->in_bytes_left) {
952                 bytes = min(xdev->in_bytes_left, dws << 2);
953                 in_bytes_left = xdev->in_bytes_left - bytes;
954                 chan_num = xdev->leftover_chan_num;
955                 goto resume_leftovers;
956         }
957
958         while (dws) {
959                 ctrlword = le32_to_cpu(*p++);
960                 dws--;
961
962                 chan_num = ctrlword & 0xfff;
963                 count = (ctrlword >> 12) & 0x3ff;
964                 opcode = (ctrlword >> 24) & 0xf;
965
966                 if (opcode != OPCODE_DATA) {
967                         unsigned int in_counter = xdev->in_counter++ & 0x3ff;
968
969                         if (count != in_counter) {
970                                 dev_err(dev, "Expected opcode counter %d, got %d\n",
971                                         in_counter, count);
972                                 return -EIO;
973                         }
974
975                         rc = process_in_opcode(xdev, opcode, chan_num);
976
977                         if (rc)
978                                 return rc;
979
980                         continue;
981                 }
982
983                 bytes = min(count + 1, dws << 2);
984                 in_bytes_left = count + 1 - bytes;
985
986 resume_leftovers:
987                 chan_idx = chan_num >> 1;
988
989                 if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
990                     !xdev->channels[chan_idx].read_data_ok) {
991                         dev_err(dev, "Received illegal channel ID %d from FPGA\n",
992                                 chan_num);
993                         return -EIO;
994                 }
995                 chan = &xdev->channels[chan_idx];
996
997                 fifo = chan->in_fifo;
998
999                 if (unlikely(!fifo))
1000                         return -EIO; /* We got really unexpected data */
1001
1002                 if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
1003                         dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
1004                         return -EIO;
1005                 }
1006
1007                 wake_up_interruptible(&fifo->waitq);
1008
1009                 dwconsume = (bytes + 3) >> 2;
1010                 dws -= dwconsume;
1011                 p += dwconsume;
1012         }
1013
1014         xdev->in_bytes_left = in_bytes_left;
1015         xdev->leftover_chan_num = chan_num;
1016         return 0;
1017 }
1018
1019 static void bulk_in_work(struct work_struct *work)
1020 {
1021         struct xillyusb_endpoint *ep =
1022                 container_of(work, struct xillyusb_endpoint, workitem);
1023         struct xillyusb_dev *xdev = ep->xdev;
1024         unsigned long flags;
1025         struct xillybuffer *xb;
1026         bool consumed = false;
1027         int rc = 0;
1028
1029         mutex_lock(&xdev->process_in_mutex);
1030
1031         spin_lock_irqsave(&ep->buffers_lock, flags);
1032
1033         while (1) {
1034                 if (rc || list_empty(&ep->filled_buffers)) {
1035                         spin_unlock_irqrestore(&ep->buffers_lock, flags);
1036                         mutex_unlock(&xdev->process_in_mutex);
1037
1038                         if (rc)
1039                                 report_io_error(xdev, rc);
1040                         else if (consumed)
1041                                 try_queue_bulk_in(ep);
1042
1043                         return;
1044                 }
1045
1046                 xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
1047                                       entry);
1048                 list_del(&xb->entry);
1049
1050                 spin_unlock_irqrestore(&ep->buffers_lock, flags);
1051
1052                 consumed = true;
1053
1054                 if (!xdev->error)
1055                         rc = process_bulk_in(xb);
1056
1057                 spin_lock_irqsave(&ep->buffers_lock, flags);
1058                 list_add_tail(&xb->entry, &ep->buffers);
1059                 ep->outstanding_urbs--;
1060         }
1061 }
1062
1063 static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
1064                                 int chan_num, char opcode, u32 data)
1065 {
1066         struct xillyusb_endpoint *ep = xdev->msg_ep;
1067         struct xillyfifo *fifo = &ep->fifo;
1068         __le32 msg[2];
1069
1070         int rc = 0;
1071
1072         msg[0] = cpu_to_le32((chan_num & 0xfff) |
1073                              ((opcode & 0xf) << 24));
1074         msg[1] = cpu_to_le32(data);
1075
1076         mutex_lock(&xdev->msg_mutex);
1077
1078         /*
1079          * The wait queue is woken with the interruptible variant, so the
1080          * wait function matches, however returning because of an interrupt
1081          * will mess things up considerably, in particular when the caller is
1082          * the release method. And the xdev->error part prevents being stuck
1083          * forever in the event of a bizarre hardware bug: Pull the USB plug.
1084          */
1085
1086         while (wait_event_interruptible(fifo->waitq,
1087                                         fifo->fill <= (fifo->size - 8) ||
1088                                         xdev->error))
1089                 ; /* Empty loop */
1090
1091         if (xdev->error) {
1092                 rc = xdev->error;
1093                 goto unlock_done;
1094         }
1095
1096         fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
1097
1098         try_queue_bulk_out(ep);
1099
1100 unlock_done:
1101         mutex_unlock(&xdev->msg_mutex);
1102
1103         return rc;
1104 }
1105
1106 /*
1107  * Note that flush_downstream() merely waits for the data to arrive to
1108  * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
1109  * it does nothing to make it happen (and neither is it necessary).
1110  *
1111  * This function is not reentrant for the same @chan, but this is covered
1112  * by the fact that for any given @chan, it's called either by the open,
1113  * write, llseek and flush fops methods, which can't run in parallel (and the
1114  * write + flush and llseek method handlers are protected with out_mutex).
1115  *
1116  * chan->flushed is there to avoid multiple flushes at the same position,
1117  * in particular as a result of programs that close the file descriptor
1118  * e.g. after a dup2() for redirection.
1119  */
1120
1121 static int flush_downstream(struct xillyusb_channel *chan,
1122                             long timeout,
1123                             bool interruptible)
1124 {
1125         struct xillyusb_dev *xdev = chan->xdev;
1126         int chan_num = chan->chan_idx << 1;
1127         long deadline, left_to_sleep;
1128         int rc;
1129
1130         if (chan->flushed)
1131                 return 0;
1132
1133         deadline = jiffies + 1 + timeout;
1134
1135         if (chan->flushing) {
1136                 long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
1137
1138                 chan->canceled = 0;
1139                 rc = xillyusb_send_opcode(xdev, chan_num,
1140                                           OPCODE_CANCEL_CHECKPOINT, 0);
1141
1142                 if (rc)
1143                         return rc; /* Only real error, never -EINTR */
1144
1145                 /* Ignoring interrupts. Cancellation must be handled */
1146                 while (!chan->canceled) {
1147                         left_to_sleep = cancel_deadline - ((long)jiffies);
1148
1149                         if (left_to_sleep <= 0) {
1150                                 report_io_error(xdev, -EIO);
1151                                 return -EIO;
1152                         }
1153
1154                         rc = wait_event_interruptible_timeout(chan->flushq,
1155                                                               chan->canceled ||
1156                                                               xdev->error,
1157                                                               left_to_sleep);
1158
1159                         if (xdev->error)
1160                                 return xdev->error;
1161                 }
1162         }
1163
1164         chan->flushing = 1;
1165
1166         /*
1167          * The checkpoint is given in terms of data elements, not bytes. As
1168          * a result, if less than an element's worth of data is stored in the
1169          * FIFO, it's not flushed, including the flush before closing, which
1170          * means that such data is lost. This is consistent with PCIe Xillybus.
1171          */
1172
1173         rc = xillyusb_send_opcode(xdev, chan_num,
1174                                   OPCODE_SET_CHECKPOINT,
1175                                   chan->out_bytes >>
1176                                   chan->out_log2_element_size);
1177
1178         if (rc)
1179                 return rc; /* Only real error, never -EINTR */
1180
1181         if (!timeout) {
1182                 while (chan->flushing) {
1183                         rc = wait_event_interruptible(chan->flushq,
1184                                                       !chan->flushing ||
1185                                                       xdev->error);
1186                         if (xdev->error)
1187                                 return xdev->error;
1188
1189                         if (interruptible && rc)
1190                                 return -EINTR;
1191                 }
1192
1193                 goto done;
1194         }
1195
1196         while (chan->flushing) {
1197                 left_to_sleep = deadline - ((long)jiffies);
1198
1199                 if (left_to_sleep <= 0)
1200                         return -ETIMEDOUT;
1201
1202                 rc = wait_event_interruptible_timeout(chan->flushq,
1203                                                       !chan->flushing ||
1204                                                       xdev->error,
1205                                                       left_to_sleep);
1206
1207                 if (xdev->error)
1208                         return xdev->error;
1209
1210                 if (interruptible && rc < 0)
1211                         return -EINTR;
1212         }
1213
1214 done:
1215         chan->flushed = 1;
1216         return 0;
1217 }
1218
1219 /* request_read_anything(): Ask the FPGA for any little amount of data */
1220 static int request_read_anything(struct xillyusb_channel *chan,
1221                                  char opcode)
1222 {
1223         struct xillyusb_dev *xdev = chan->xdev;
1224         unsigned int sh = chan->in_log2_element_size;
1225         int chan_num = (chan->chan_idx << 1) | 1;
1226         u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
1227
1228         return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
1229 }
1230
1231 static int xillyusb_open(struct inode *inode, struct file *filp)
1232 {
1233         struct xillyusb_dev *xdev;
1234         struct xillyusb_channel *chan;
1235         struct xillyfifo *in_fifo = NULL;
1236         struct xillyusb_endpoint *out_ep = NULL;
1237         int rc;
1238         int index;
1239
1240         rc = xillybus_find_inode(inode, (void **)&xdev, &index);
1241         if (rc)
1242                 return rc;
1243
1244         chan = &xdev->channels[index];
1245         filp->private_data = chan;
1246
1247         mutex_lock(&chan->lock);
1248
1249         rc = -ENODEV;
1250
1251         if (xdev->error)
1252                 goto unmutex_fail;
1253
1254         if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
1255             ((filp->f_mode & FMODE_WRITE) && !chan->writable))
1256                 goto unmutex_fail;
1257
1258         if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
1259             chan->in_synchronous) {
1260                 dev_err(xdev->dev,
1261                         "open() failed: O_NONBLOCK not allowed for read on this device\n");
1262                 goto unmutex_fail;
1263         }
1264
1265         if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
1266             chan->out_synchronous) {
1267                 dev_err(xdev->dev,
1268                         "open() failed: O_NONBLOCK not allowed for write on this device\n");
1269                 goto unmutex_fail;
1270         }
1271
1272         rc = -EBUSY;
1273
1274         if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
1275             ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
1276                 goto unmutex_fail;
1277
1278         kref_get(&xdev->kref);
1279
1280         if (filp->f_mode & FMODE_READ)
1281                 chan->open_for_read = 1;
1282
1283         if (filp->f_mode & FMODE_WRITE)
1284                 chan->open_for_write = 1;
1285
1286         mutex_unlock(&chan->lock);
1287
1288         if (filp->f_mode & FMODE_WRITE) {
1289                 out_ep = endpoint_alloc(xdev,
1290                                         (chan->chan_idx + 2) | USB_DIR_OUT,
1291                                         bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
1292
1293                 if (!out_ep) {
1294                         rc = -ENOMEM;
1295                         goto unopen;
1296                 }
1297
1298                 rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
1299
1300                 if (rc)
1301                         goto late_unopen;
1302
1303                 out_ep->fill_mask = -(1 << chan->out_log2_element_size);
1304                 chan->out_bytes = 0;
1305                 chan->flushed = 0;
1306
1307                 /*
1308                  * Sending a flush request to a previously closed stream
1309                  * effectively opens it, and also waits until the command is
1310                  * confirmed by the FPGA. The latter is necessary because the
1311                  * data is sent through a separate BULK OUT endpoint, and the
1312                  * xHCI controller is free to reorder transmissions.
1313                  *
1314                  * This can't go wrong unless there's a serious hardware error
1315                  * (or the computer is stuck for 500 ms?)
1316                  */
1317                 rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
1318
1319                 if (rc == -ETIMEDOUT) {
1320                         rc = -EIO;
1321                         report_io_error(xdev, rc);
1322                 }
1323
1324                 if (rc)
1325                         goto late_unopen;
1326         }
1327
1328         if (filp->f_mode & FMODE_READ) {
1329                 in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
1330
1331                 if (!in_fifo) {
1332                         rc = -ENOMEM;
1333                         goto late_unopen;
1334                 }
1335
1336                 rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
1337
1338                 if (rc) {
1339                         kfree(in_fifo);
1340                         goto late_unopen;
1341                 }
1342         }
1343
1344         mutex_lock(&chan->lock);
1345         if (in_fifo) {
1346                 chan->in_fifo = in_fifo;
1347                 chan->read_data_ok = 1;
1348         }
1349         if (out_ep)
1350                 chan->out_ep = out_ep;
1351         mutex_unlock(&chan->lock);
1352
1353         if (in_fifo) {
1354                 u32 in_checkpoint = 0;
1355
1356                 if (!chan->in_synchronous)
1357                         in_checkpoint = in_fifo->size >>
1358                                 chan->in_log2_element_size;
1359
1360                 chan->in_consumed_bytes = 0;
1361                 chan->poll_used = 0;
1362                 chan->in_current_checkpoint = in_checkpoint;
1363                 rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
1364                                           OPCODE_SET_CHECKPOINT,
1365                                           in_checkpoint);
1366
1367                 if (rc) /* Failure guarantees that opcode wasn't sent */
1368                         goto unfifo;
1369
1370                 /*
1371                  * In non-blocking mode, request the FPGA to send any data it
1372                  * has right away. Otherwise, the first read() will always
1373                  * return -EAGAIN, which is OK strictly speaking, but ugly.
1374                  * Checking and unrolling if this fails isn't worth the
1375                  * effort -- the error is propagated to the first read()
1376                  * anyhow.
1377                  */
1378                 if (filp->f_flags & O_NONBLOCK)
1379                         request_read_anything(chan, OPCODE_SET_PUSH);
1380         }
1381
1382         return 0;
1383
1384 unfifo:
1385         chan->read_data_ok = 0;
1386         safely_assign_in_fifo(chan, NULL);
1387         fifo_mem_release(in_fifo);
1388         kfree(in_fifo);
1389
1390         if (out_ep) {
1391                 mutex_lock(&chan->lock);
1392                 chan->out_ep = NULL;
1393                 mutex_unlock(&chan->lock);
1394         }
1395
1396 late_unopen:
1397         if (out_ep)
1398                 endpoint_dealloc(out_ep);
1399
1400 unopen:
1401         mutex_lock(&chan->lock);
1402
1403         if (filp->f_mode & FMODE_READ)
1404                 chan->open_for_read = 0;
1405
1406         if (filp->f_mode & FMODE_WRITE)
1407                 chan->open_for_write = 0;
1408
1409         mutex_unlock(&chan->lock);
1410
1411         kref_put(&xdev->kref, cleanup_dev);
1412
1413         return rc;
1414
1415 unmutex_fail:
1416         mutex_unlock(&chan->lock);
1417         return rc;
1418 }
1419
1420 static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
1421                              size_t count, loff_t *f_pos)
1422 {
1423         struct xillyusb_channel *chan = filp->private_data;
1424         struct xillyusb_dev *xdev = chan->xdev;
1425         struct xillyfifo *fifo = chan->in_fifo;
1426         int chan_num = (chan->chan_idx << 1) | 1;
1427
1428         long deadline, left_to_sleep;
1429         int bytes_done = 0;
1430         bool sent_set_push = false;
1431         int rc;
1432
1433         deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
1434
1435         rc = mutex_lock_interruptible(&chan->in_mutex);
1436
1437         if (rc)
1438                 return rc;
1439
1440         while (1) {
1441                 u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
1442                 u32 complete_checkpoint, fifo_checkpoint;
1443                 u32 checkpoint;
1444                 s32 diff, leap;
1445                 unsigned int sh = chan->in_log2_element_size;
1446                 bool checkpoint_for_complete;
1447
1448                 rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
1449                                count - bytes_done, xilly_copy_to_user);
1450
1451                 if (rc < 0)
1452                         break;
1453
1454                 bytes_done += rc;
1455                 chan->in_consumed_bytes += rc;
1456
1457                 left_to_sleep = deadline - ((long)jiffies);
1458
1459                 /*
1460                  * Some 32-bit arithmetic that may wrap. Note that
1461                  * complete_checkpoint is rounded up to the closest element
1462                  * boundary, because the read() can't be completed otherwise.
1463                  * fifo_checkpoint_bytes is rounded down, because it protects
1464                  * in_fifo from overflowing.
1465                  */
1466
1467                 fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
1468                 complete_checkpoint_bytes =
1469                         chan->in_consumed_bytes + count - bytes_done;
1470
1471                 fifo_checkpoint = fifo_checkpoint_bytes >> sh;
1472                 complete_checkpoint =
1473                         (complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
1474
1475                 diff = (fifo_checkpoint - complete_checkpoint) << sh;
1476
1477                 if (chan->in_synchronous && diff >= 0) {
1478                         checkpoint = complete_checkpoint;
1479                         checkpoint_for_complete = true;
1480                 } else {
1481                         checkpoint = fifo_checkpoint;
1482                         checkpoint_for_complete = false;
1483                 }
1484
1485                 leap = (checkpoint - chan->in_current_checkpoint) << sh;
1486
1487                 /*
1488                  * To prevent flooding of OPCODE_SET_CHECKPOINT commands as
1489                  * data is consumed, it's issued only if it moves the
1490                  * checkpoint by at least an 8th of the FIFO's size, or if
1491                  * it's necessary to complete the number of bytes requested by
1492                  * the read() call.
1493                  *
1494                  * chan->read_data_ok is checked to spare an unnecessary
1495                  * submission after receiving EOF, however it's harmless if
1496                  * such slips away.
1497                  */
1498
1499                 if (chan->read_data_ok &&
1500                     (leap > (fifo->size >> 3) ||
1501                      (checkpoint_for_complete && leap > 0))) {
1502                         chan->in_current_checkpoint = checkpoint;
1503                         rc = xillyusb_send_opcode(xdev, chan_num,
1504                                                   OPCODE_SET_CHECKPOINT,
1505                                                   checkpoint);
1506
1507                         if (rc)
1508                                 break;
1509                 }
1510
1511                 if (bytes_done == count ||
1512                     (left_to_sleep <= 0 && bytes_done))
1513                         break;
1514
1515                 /*
1516                  * Reaching here means that the FIFO was empty when
1517                  * fifo_read() returned, but not necessarily right now. Error
1518                  * and EOF are checked and reported only now, so that no data
1519                  * that managed its way to the FIFO is lost.
1520                  */
1521
1522                 if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
1523                         /* Has data slipped into the FIFO since fifo_read()? */
1524                         smp_rmb();
1525                         if (READ_ONCE(fifo->fill))
1526                                 continue;
1527
1528                         rc = 0;
1529                         break;
1530                 }
1531
1532                 if (xdev->error) {
1533                         rc = xdev->error;
1534                         break;
1535                 }
1536
1537                 if (filp->f_flags & O_NONBLOCK) {
1538                         rc = -EAGAIN;
1539                         break;
1540                 }
1541
1542                 if (!sent_set_push) {
1543                         rc = xillyusb_send_opcode(xdev, chan_num,
1544                                                   OPCODE_SET_PUSH,
1545                                                   complete_checkpoint);
1546
1547                         if (rc)
1548                                 break;
1549
1550                         sent_set_push = true;
1551                 }
1552
1553                 if (left_to_sleep > 0) {
1554                         /*
1555                          * Note that when xdev->error is set (e.g. when the
1556                          * device is unplugged), read_data_ok turns zero and
1557                          * fifo->waitq is awaken.
1558                          * Therefore no special attention to xdev->error.
1559                          */
1560
1561                         rc = wait_event_interruptible_timeout
1562                                 (fifo->waitq,
1563                                  fifo->fill || !chan->read_data_ok,
1564                                  left_to_sleep);
1565                 } else { /* bytes_done == 0 */
1566                         /* Tell FPGA to send anything it has */
1567                         rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
1568
1569                         if (rc)
1570                                 break;
1571
1572                         rc = wait_event_interruptible
1573                                 (fifo->waitq,
1574                                  fifo->fill || !chan->read_data_ok);
1575                 }
1576
1577                 if (rc < 0) {
1578                         rc = -EINTR;
1579                         break;
1580                 }
1581         }
1582
1583         if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
1584             !READ_ONCE(fifo->fill))
1585                 request_read_anything(chan, OPCODE_SET_PUSH);
1586
1587         mutex_unlock(&chan->in_mutex);
1588
1589         if (bytes_done)
1590                 return bytes_done;
1591
1592         return rc;
1593 }
1594
1595 static int xillyusb_flush(struct file *filp, fl_owner_t id)
1596 {
1597         struct xillyusb_channel *chan = filp->private_data;
1598         int rc;
1599
1600         if (!(filp->f_mode & FMODE_WRITE))
1601                 return 0;
1602
1603         rc = mutex_lock_interruptible(&chan->out_mutex);
1604
1605         if (rc)
1606                 return rc;
1607
1608         /*
1609          * One second's timeout on flushing. Interrupts are ignored, because if
1610          * the user pressed CTRL-C, that interrupt will still be in flight by
1611          * the time we reach here, and the opportunity to flush is lost.
1612          */
1613         rc = flush_downstream(chan, HZ, false);
1614
1615         mutex_unlock(&chan->out_mutex);
1616
1617         if (rc == -ETIMEDOUT) {
1618                 /* The things you do to use dev_warn() and not pr_warn() */
1619                 struct xillyusb_dev *xdev = chan->xdev;
1620
1621                 mutex_lock(&chan->lock);
1622                 if (!xdev->error)
1623                         dev_warn(xdev->dev,
1624                                  "Timed out while flushing. Output data may be lost.\n");
1625                 mutex_unlock(&chan->lock);
1626         }
1627
1628         return rc;
1629 }
1630
1631 static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
1632                               size_t count, loff_t *f_pos)
1633 {
1634         struct xillyusb_channel *chan = filp->private_data;
1635         struct xillyusb_dev *xdev = chan->xdev;
1636         struct xillyfifo *fifo = &chan->out_ep->fifo;
1637         int rc;
1638
1639         rc = mutex_lock_interruptible(&chan->out_mutex);
1640
1641         if (rc)
1642                 return rc;
1643
1644         while (1) {
1645                 if (xdev->error) {
1646                         rc = xdev->error;
1647                         break;
1648                 }
1649
1650                 if (count == 0)
1651                         break;
1652
1653                 rc = fifo_write(fifo, (__force void *)userbuf, count,
1654                                 xilly_copy_from_user);
1655
1656                 if (rc != 0)
1657                         break;
1658
1659                 if (filp->f_flags & O_NONBLOCK) {
1660                         rc = -EAGAIN;
1661                         break;
1662                 }
1663
1664                 if (wait_event_interruptible
1665                     (fifo->waitq,
1666                      fifo->fill != fifo->size || xdev->error)) {
1667                         rc = -EINTR;
1668                         break;
1669                 }
1670         }
1671
1672         if (rc < 0)
1673                 goto done;
1674
1675         chan->out_bytes += rc;
1676
1677         if (rc) {
1678                 try_queue_bulk_out(chan->out_ep);
1679                 chan->flushed = 0;
1680         }
1681
1682         if (chan->out_synchronous) {
1683                 int flush_rc = flush_downstream(chan, 0, true);
1684
1685                 if (flush_rc && !rc)
1686                         rc = flush_rc;
1687         }
1688
1689 done:
1690         mutex_unlock(&chan->out_mutex);
1691
1692         return rc;
1693 }
1694
1695 static int xillyusb_release(struct inode *inode, struct file *filp)
1696 {
1697         struct xillyusb_channel *chan = filp->private_data;
1698         struct xillyusb_dev *xdev = chan->xdev;
1699         int rc_read = 0, rc_write = 0;
1700
1701         if (filp->f_mode & FMODE_READ) {
1702                 struct xillyfifo *in_fifo = chan->in_fifo;
1703
1704                 rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
1705                                                OPCODE_CLOSE, 0);
1706                 /*
1707                  * If rc_read is nonzero, xdev->error indicates a global
1708                  * device error. The error is reported later, so that
1709                  * resources are freed.
1710                  *
1711                  * Looping on wait_event_interruptible() kinda breaks the idea
1712                  * of being interruptible, and this should have been
1713                  * wait_event(). Only it's being waken with
1714                  * wake_up_interruptible() for the sake of other uses. If
1715                  * there's a global device error, chan->read_data_ok is
1716                  * deasserted and the wait queue is awaken, so this is covered.
1717                  */
1718
1719                 while (wait_event_interruptible(in_fifo->waitq,
1720                                                 !chan->read_data_ok))
1721                         ; /* Empty loop */
1722
1723                 safely_assign_in_fifo(chan, NULL);
1724                 fifo_mem_release(in_fifo);
1725                 kfree(in_fifo);
1726
1727                 mutex_lock(&chan->lock);
1728                 chan->open_for_read = 0;
1729                 mutex_unlock(&chan->lock);
1730         }
1731
1732         if (filp->f_mode & FMODE_WRITE) {
1733                 struct xillyusb_endpoint *ep = chan->out_ep;
1734                 /*
1735                  * chan->flushing isn't zeroed. If the pre-release flush timed
1736                  * out, a cancel request will be sent before the next
1737                  * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
1738                  * This is despite that the FPGA forgets about the checkpoint
1739                  * request as the file closes. Still, in an exceptional race
1740                  * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
1741                  * just before closing that would reach the host after the
1742                  * file has re-opened.
1743                  */
1744
1745                 mutex_lock(&chan->lock);
1746                 chan->out_ep = NULL;
1747                 mutex_unlock(&chan->lock);
1748
1749                 endpoint_quiesce(ep);
1750                 endpoint_dealloc(ep);
1751
1752                 /* See comments on rc_read above */
1753                 rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
1754                                                 OPCODE_CLOSE, 0);
1755
1756                 mutex_lock(&chan->lock);
1757                 chan->open_for_write = 0;
1758                 mutex_unlock(&chan->lock);
1759         }
1760
1761         kref_put(&xdev->kref, cleanup_dev);
1762
1763         return rc_read ? rc_read : rc_write;
1764 }
1765
1766 /*
1767  * Xillybus' API allows device nodes to be seekable, giving the user
1768  * application access to a RAM array on the FPGA (or logic emulating it).
1769  */
1770
1771 static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
1772 {
1773         struct xillyusb_channel *chan = filp->private_data;
1774         struct xillyusb_dev *xdev = chan->xdev;
1775         loff_t pos = filp->f_pos;
1776         int rc = 0;
1777         unsigned int log2_element_size = chan->readable ?
1778                 chan->in_log2_element_size : chan->out_log2_element_size;
1779
1780         /*
1781          * Take both mutexes not allowing interrupts, since it seems like
1782          * common applications don't expect an -EINTR here. Besides, multiple
1783          * access to a single file descriptor on seekable devices is a mess
1784          * anyhow.
1785          */
1786
1787         mutex_lock(&chan->out_mutex);
1788         mutex_lock(&chan->in_mutex);
1789
1790         switch (whence) {
1791         case SEEK_SET:
1792                 pos = offset;
1793                 break;
1794         case SEEK_CUR:
1795                 pos += offset;
1796                 break;
1797         case SEEK_END:
1798                 pos = offset; /* Going to the end => to the beginning */
1799                 break;
1800         default:
1801                 rc = -EINVAL;
1802                 goto end;
1803         }
1804
1805         /* In any case, we must finish on an element boundary */
1806         if (pos & ((1 << log2_element_size) - 1)) {
1807                 rc = -EINVAL;
1808                 goto end;
1809         }
1810
1811         rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
1812                                   OPCODE_SET_ADDR,
1813                                   pos >> log2_element_size);
1814
1815         if (rc)
1816                 goto end;
1817
1818         if (chan->writable) {
1819                 chan->flushed = 0;
1820                 rc = flush_downstream(chan, HZ, false);
1821         }
1822
1823 end:
1824         mutex_unlock(&chan->out_mutex);
1825         mutex_unlock(&chan->in_mutex);
1826
1827         if (rc) /* Return error after releasing mutexes */
1828                 return rc;
1829
1830         filp->f_pos = pos;
1831
1832         return pos;
1833 }
1834
1835 static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
1836 {
1837         struct xillyusb_channel *chan = filp->private_data;
1838         __poll_t mask = 0;
1839
1840         if (chan->in_fifo)
1841                 poll_wait(filp, &chan->in_fifo->waitq, wait);
1842
1843         if (chan->out_ep)
1844                 poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
1845
1846         /*
1847          * If this is the first time poll() is called, and the file is
1848          * readable, set the relevant flag. Also tell the FPGA to send all it
1849          * has, to kickstart the mechanism that ensures there's always some
1850          * data in in_fifo unless the stream is dry end-to-end. Note that the
1851          * first poll() may not return a EPOLLIN, even if there's data on the
1852          * FPGA. Rather, the data will arrive soon, and trigger the relevant
1853          * wait queue.
1854          */
1855
1856         if (!chan->poll_used && chan->in_fifo) {
1857                 chan->poll_used = 1;
1858                 request_read_anything(chan, OPCODE_SET_PUSH);
1859         }
1860
1861         /*
1862          * poll() won't play ball regarding read() channels which
1863          * are synchronous. Allowing that will create situations where data has
1864          * been delivered at the FPGA, and users expecting select() to wake up,
1865          * which it may not. So make it never work.
1866          */
1867
1868         if (chan->in_fifo && !chan->in_synchronous &&
1869             (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
1870                 mask |= EPOLLIN | EPOLLRDNORM;
1871
1872         if (chan->out_ep &&
1873             (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
1874                 mask |= EPOLLOUT | EPOLLWRNORM;
1875
1876         if (chan->xdev->error)
1877                 mask |= EPOLLERR;
1878
1879         return mask;
1880 }
1881
1882 static const struct file_operations xillyusb_fops = {
1883         .owner      = THIS_MODULE,
1884         .read       = xillyusb_read,
1885         .write      = xillyusb_write,
1886         .open       = xillyusb_open,
1887         .flush      = xillyusb_flush,
1888         .release    = xillyusb_release,
1889         .llseek     = xillyusb_llseek,
1890         .poll       = xillyusb_poll,
1891 };
1892
1893 static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
1894 {
1895         xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
1896                                       bulk_out_work, 1, 2);
1897         if (!xdev->msg_ep)
1898                 return -ENOMEM;
1899
1900         if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
1901                 goto dealloc;
1902
1903         xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
1904
1905         xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
1906                                      bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
1907         if (!xdev->in_ep)
1908                 goto dealloc;
1909
1910         try_queue_bulk_in(xdev->in_ep);
1911
1912         return 0;
1913
1914 dealloc:
1915         endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
1916         xdev->msg_ep = NULL;
1917         return -ENOMEM;
1918 }
1919
1920 static int setup_channels(struct xillyusb_dev *xdev,
1921                           __le16 *chandesc,
1922                           int num_channels)
1923 {
1924         struct xillyusb_channel *chan;
1925         int i;
1926
1927         chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
1928         if (!chan)
1929                 return -ENOMEM;
1930
1931         xdev->channels = chan;
1932
1933         for (i = 0; i < num_channels; i++, chan++) {
1934                 unsigned int in_desc = le16_to_cpu(*chandesc++);
1935                 unsigned int out_desc = le16_to_cpu(*chandesc++);
1936
1937                 chan->xdev = xdev;
1938                 mutex_init(&chan->in_mutex);
1939                 mutex_init(&chan->out_mutex);
1940                 mutex_init(&chan->lock);
1941                 init_waitqueue_head(&chan->flushq);
1942
1943                 chan->chan_idx = i;
1944
1945                 if (in_desc & 0x80) { /* Entry is valid */
1946                         chan->readable = 1;
1947                         chan->in_synchronous = !!(in_desc & 0x40);
1948                         chan->in_seekable = !!(in_desc & 0x20);
1949                         chan->in_log2_element_size = in_desc & 0x0f;
1950                         chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
1951                 }
1952
1953                 /*
1954                  * A downstream channel should never exist above index 13,
1955                  * as it would request a nonexistent BULK endpoint > 15.
1956                  * In the peculiar case that it does, it's ignored silently.
1957                  */
1958
1959                 if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
1960                         chan->writable = 1;
1961                         chan->out_synchronous = !!(out_desc & 0x40);
1962                         chan->out_seekable = !!(out_desc & 0x20);
1963                         chan->out_log2_element_size = out_desc & 0x0f;
1964                         chan->out_log2_fifo_size =
1965                                 ((out_desc >> 8) & 0x1f) + 16;
1966                 }
1967         }
1968
1969         return 0;
1970 }
1971
1972 static int xillyusb_discovery(struct usb_interface *interface)
1973 {
1974         int rc;
1975         struct xillyusb_dev *xdev = usb_get_intfdata(interface);
1976         __le16 bogus_chandesc[2];
1977         struct xillyfifo idt_fifo;
1978         struct xillyusb_channel *chan;
1979         unsigned int idt_len, names_offset;
1980         unsigned char *idt;
1981         int num_channels;
1982
1983         rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
1984
1985         if (rc) {
1986                 dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
1987                 return rc;
1988         }
1989
1990         /* Phase I: Set up one fake upstream channel and obtain IDT */
1991
1992         /* Set up a fake IDT with one async IN stream */
1993         bogus_chandesc[0] = cpu_to_le16(0x80);
1994         bogus_chandesc[1] = cpu_to_le16(0);
1995
1996         rc = setup_channels(xdev, bogus_chandesc, 1);
1997
1998         if (rc)
1999                 return rc;
2000
2001         rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
2002
2003         if (rc)
2004                 return rc;
2005
2006         chan = xdev->channels;
2007
2008         chan->in_fifo = &idt_fifo;
2009         chan->read_data_ok = 1;
2010
2011         xdev->num_channels = 1;
2012
2013         rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
2014
2015         if (rc) {
2016                 dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
2017                 goto unfifo;
2018         }
2019
2020         rc = wait_event_interruptible_timeout(idt_fifo.waitq,
2021                                               !chan->read_data_ok,
2022                                               XILLY_RESPONSE_TIMEOUT);
2023
2024         if (xdev->error) {
2025                 rc = xdev->error;
2026                 goto unfifo;
2027         }
2028
2029         if (rc < 0) {
2030                 rc = -EINTR; /* Interrupt on probe method? Interesting. */
2031                 goto unfifo;
2032         }
2033
2034         if (chan->read_data_ok) {
2035                 rc = -ETIMEDOUT;
2036                 dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
2037                 goto unfifo;
2038         }
2039
2040         idt_len = READ_ONCE(idt_fifo.fill);
2041         idt = kmalloc(idt_len, GFP_KERNEL);
2042
2043         if (!idt) {
2044                 rc = -ENOMEM;
2045                 goto unfifo;
2046         }
2047
2048         fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
2049
2050         if (crc32_le(~0, idt, idt_len) != 0) {
2051                 dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
2052                 rc = -ENODEV;
2053                 goto unidt;
2054         }
2055
2056         if (*idt > 0x90) {
2057                 dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
2058                         (int)*idt);
2059                 rc = -ENODEV;
2060                 goto unidt;
2061         }
2062
2063         /* Phase II: Set up the streams as defined in IDT */
2064
2065         num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
2066         names_offset = 3 + num_channels * 4;
2067         idt_len -= 4; /* Exclude CRC */
2068
2069         if (idt_len < names_offset) {
2070                 dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
2071                 rc = -ENODEV;
2072                 goto unidt;
2073         }
2074
2075         rc = setup_channels(xdev, (void *)idt + 3, num_channels);
2076
2077         if (rc)
2078                 goto unidt;
2079
2080         /*
2081          * Except for wildly misbehaving hardware, or if it was disconnected
2082          * just after responding with the IDT, there is no reason for any
2083          * work item to be running now. To be sure that xdev->channels
2084          * is updated on anything that might run in parallel, flush the
2085          * workqueue, which rarely does anything.
2086          */
2087         flush_workqueue(xdev->workq);
2088
2089         xdev->num_channels = num_channels;
2090
2091         fifo_mem_release(&idt_fifo);
2092         kfree(chan);
2093
2094         rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
2095                                   THIS_MODULE, xdev,
2096                                   idt + names_offset,
2097                                   idt_len - names_offset,
2098                                   num_channels,
2099                                   xillyname, true);
2100
2101         kfree(idt);
2102
2103         return rc;
2104
2105 unidt:
2106         kfree(idt);
2107
2108 unfifo:
2109         safely_assign_in_fifo(chan, NULL);
2110         fifo_mem_release(&idt_fifo);
2111
2112         return rc;
2113 }
2114
2115 static int xillyusb_probe(struct usb_interface *interface,
2116                           const struct usb_device_id *id)
2117 {
2118         struct xillyusb_dev *xdev;
2119         int rc;
2120
2121         xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
2122         if (!xdev)
2123                 return -ENOMEM;
2124
2125         kref_init(&xdev->kref);
2126         mutex_init(&xdev->process_in_mutex);
2127         mutex_init(&xdev->msg_mutex);
2128
2129         xdev->udev = usb_get_dev(interface_to_usbdev(interface));
2130         xdev->dev = &interface->dev;
2131         xdev->error = 0;
2132         spin_lock_init(&xdev->error_lock);
2133         xdev->in_counter = 0;
2134         xdev->in_bytes_left = 0;
2135         xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
2136
2137         if (!xdev->workq) {
2138                 dev_err(&interface->dev, "Failed to allocate work queue\n");
2139                 rc = -ENOMEM;
2140                 goto fail;
2141         }
2142
2143         INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
2144
2145         usb_set_intfdata(interface, xdev);
2146
2147         rc = xillyusb_setup_base_eps(xdev);
2148         if (rc)
2149                 goto fail;
2150
2151         rc = xillyusb_discovery(interface);
2152         if (rc)
2153                 goto latefail;
2154
2155         return 0;
2156
2157 latefail:
2158         endpoint_quiesce(xdev->in_ep);
2159         endpoint_quiesce(xdev->msg_ep);
2160
2161 fail:
2162         usb_set_intfdata(interface, NULL);
2163         kref_put(&xdev->kref, cleanup_dev);
2164         return rc;
2165 }
2166
2167 static void xillyusb_disconnect(struct usb_interface *interface)
2168 {
2169         struct xillyusb_dev *xdev = usb_get_intfdata(interface);
2170         struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
2171         struct xillyfifo *fifo = &msg_ep->fifo;
2172         int rc;
2173         int i;
2174
2175         xillybus_cleanup_chrdev(xdev, &interface->dev);
2176
2177         /*
2178          * Try to send OPCODE_QUIESCE, which will fail silently if the device
2179          * was disconnected, but makes sense on module unload.
2180          */
2181
2182         msg_ep->wake_on_drain = true;
2183         xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
2184
2185         /*
2186          * If the device has been disconnected, sending the opcode causes
2187          * a global device error with xdev->error, if such error didn't
2188          * occur earlier. Hence timing out means that the USB link is fine,
2189          * but somehow the message wasn't sent. Should never happen.
2190          */
2191
2192         rc = wait_event_interruptible_timeout(fifo->waitq,
2193                                               msg_ep->drained || xdev->error,
2194                                               XILLY_RESPONSE_TIMEOUT);
2195
2196         if (!rc)
2197                 dev_err(&interface->dev,
2198                         "Weird timeout condition on sending quiesce request.\n");
2199
2200         report_io_error(xdev, -ENODEV); /* Discourage further activity */
2201
2202         /*
2203          * This device driver is declared with soft_unbind set, or else
2204          * sending OPCODE_QUIESCE above would always fail. The price is
2205          * that the USB framework didn't kill outstanding URBs, so it has
2206          * to be done explicitly before returning from this call.
2207          */
2208
2209         for (i = 0; i < xdev->num_channels; i++) {
2210                 struct xillyusb_channel *chan = &xdev->channels[i];
2211
2212                 /*
2213                  * Lock taken to prevent chan->out_ep from changing. It also
2214                  * ensures xillyusb_open() and xillyusb_flush() don't access
2215                  * xdev->dev after being nullified below.
2216                  */
2217                 mutex_lock(&chan->lock);
2218                 if (chan->out_ep)
2219                         endpoint_quiesce(chan->out_ep);
2220                 mutex_unlock(&chan->lock);
2221         }
2222
2223         endpoint_quiesce(xdev->in_ep);
2224         endpoint_quiesce(xdev->msg_ep);
2225
2226         usb_set_intfdata(interface, NULL);
2227
2228         xdev->dev = NULL;
2229
2230         kref_put(&xdev->kref, cleanup_dev);
2231 }
2232
2233 static struct usb_driver xillyusb_driver = {
2234         .name = xillyname,
2235         .id_table = xillyusb_table,
2236         .probe = xillyusb_probe,
2237         .disconnect = xillyusb_disconnect,
2238         .soft_unbind = 1,
2239 };
2240
2241 static int __init xillyusb_init(void)
2242 {
2243         int rc = 0;
2244
2245         if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
2246                 fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
2247         else
2248                 fifo_buf_order = 0;
2249
2250         rc = usb_register(&xillyusb_driver);
2251
2252         return rc;
2253 }
2254
2255 static void __exit xillyusb_exit(void)
2256 {
2257         usb_deregister(&xillyusb_driver);
2258 }
2259
2260 module_init(xillyusb_init);
2261 module_exit(xillyusb_exit);