GNU Linux-libre 4.9.332-gnu1
[releases.git] / drivers / usb / gadget / udc / gr_udc.c
1 /*
2  * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
3  *
4  * 2013 (c) Aeroflex Gaisler AB
5  *
6  * This driver supports GRUSBDC USB Device Controller cores available in the
7  * GRLIB VHDL IP core library.
8  *
9  * Full documentation of the GRUSBDC core can be found here:
10  * http://www.gaisler.com/products/grlib/grip.pdf
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License as published by the
14  * Free Software Foundation; either version 2 of the License, or (at your
15  * option) any later version.
16  *
17  * Contributors:
18  * - Andreas Larsson <andreas@gaisler.com>
19  * - Marko Isomaki
20  */
21
22 /*
23  * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
24  * individually configurable to any of the four USB transfer types. This driver
25  * only supports cores in DMA mode.
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/interrupt.h>
35 #include <linux/device.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/gadget.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmapool.h>
40 #include <linux/debugfs.h>
41 #include <linux/seq_file.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_address.h>
45
46 #include <asm/byteorder.h>
47
48 #include "gr_udc.h"
49
50 #define DRIVER_NAME     "gr_udc"
51 #define DRIVER_DESC     "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
52
53 static const char driver_name[] = DRIVER_NAME;
54 static const char driver_desc[] = DRIVER_DESC;
55
56 #define gr_read32(x) (ioread32be((x)))
57 #define gr_write32(x, v) (iowrite32be((v), (x)))
58
59 /* USB speed and corresponding string calculated from status register value */
60 #define GR_SPEED(status) \
61         ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
62 #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
63
64 /* Size of hardware buffer calculated from epctrl register value */
65 #define GR_BUFFER_SIZE(epctrl)                                        \
66         ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
67          GR_EPCTRL_BUFSZ_SCALER)
68
69 /* ---------------------------------------------------------------------- */
70 /* Debug printout functionality */
71
72 static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
73
74 static const char *gr_ep0state_string(enum gr_ep0state state)
75 {
76         static const char *const names[] = {
77                 [GR_EP0_DISCONNECT] = "disconnect",
78                 [GR_EP0_SETUP] = "setup",
79                 [GR_EP0_IDATA] = "idata",
80                 [GR_EP0_ODATA] = "odata",
81                 [GR_EP0_ISTATUS] = "istatus",
82                 [GR_EP0_OSTATUS] = "ostatus",
83                 [GR_EP0_STALL] = "stall",
84                 [GR_EP0_SUSPEND] = "suspend",
85         };
86
87         if (state < 0 || state >= ARRAY_SIZE(names))
88                 return "UNKNOWN";
89
90         return names[state];
91 }
92
93 #ifdef VERBOSE_DEBUG
94
95 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
96                                 struct gr_request *req)
97 {
98         int buflen = ep->is_in ? req->req.length : req->req.actual;
99         int rowlen = 32;
100         int plen = min(rowlen, buflen);
101
102         dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
103                 (buflen > plen ? " (truncated)" : ""));
104         print_hex_dump_debug("   ", DUMP_PREFIX_NONE,
105                              rowlen, 4, req->req.buf, plen, false);
106 }
107
108 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
109                                u16 value, u16 index, u16 length)
110 {
111         dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
112                  type, request, value, index, length);
113 }
114 #else /* !VERBOSE_DEBUG */
115
116 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
117                                 struct gr_request *req) {}
118
119 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
120                                u16 value, u16 index, u16 length) {}
121
122 #endif /* VERBOSE_DEBUG */
123
124 /* ---------------------------------------------------------------------- */
125 /* Debugfs functionality */
126
127 #ifdef CONFIG_USB_GADGET_DEBUG_FS
128
129 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
130 {
131         u32 epctrl = gr_read32(&ep->regs->epctrl);
132         u32 epstat = gr_read32(&ep->regs->epstat);
133         int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
134         struct gr_request *req;
135
136         seq_printf(seq, "%s:\n", ep->ep.name);
137         seq_printf(seq, "  mode = %s\n", gr_modestring[mode]);
138         seq_printf(seq, "  halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
139         seq_printf(seq, "  disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
140         seq_printf(seq, "  valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
141         seq_printf(seq, "  dma_start = %d\n", ep->dma_start);
142         seq_printf(seq, "  stopped = %d\n", ep->stopped);
143         seq_printf(seq, "  wedged = %d\n", ep->wedged);
144         seq_printf(seq, "  callback = %d\n", ep->callback);
145         seq_printf(seq, "  maxpacket = %d\n", ep->ep.maxpacket);
146         seq_printf(seq, "  maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
147         seq_printf(seq, "  bytes_per_buffer = %d\n", ep->bytes_per_buffer);
148         if (mode == 1 || mode == 3)
149                 seq_printf(seq, "  nt = %d\n",
150                            (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
151
152         seq_printf(seq, "  Buffer 0: %s %s%d\n",
153                    epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
154                    epstat & GR_EPSTAT_BS ? " " : "selected ",
155                    (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
156         seq_printf(seq, "  Buffer 1: %s %s%d\n",
157                    epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
158                    epstat & GR_EPSTAT_BS ? "selected " : " ",
159                    (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
160
161         if (list_empty(&ep->queue)) {
162                 seq_puts(seq, "  Queue: empty\n\n");
163                 return;
164         }
165
166         seq_puts(seq, "  Queue:\n");
167         list_for_each_entry(req, &ep->queue, queue) {
168                 struct gr_dma_desc *desc;
169                 struct gr_dma_desc *next;
170
171                 seq_printf(seq, "    0x%p: 0x%p %d %d\n", req,
172                            &req->req.buf, req->req.actual, req->req.length);
173
174                 next = req->first_desc;
175                 do {
176                         desc = next;
177                         next = desc->next_desc;
178                         seq_printf(seq, "    %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
179                                    desc == req->curr_desc ? 'c' : ' ',
180                                    desc, desc->paddr, desc->ctrl, desc->data);
181                 } while (desc != req->last_desc);
182         }
183         seq_puts(seq, "\n");
184 }
185
186
187 static int gr_seq_show(struct seq_file *seq, void *v)
188 {
189         struct gr_udc *dev = seq->private;
190         u32 control = gr_read32(&dev->regs->control);
191         u32 status = gr_read32(&dev->regs->status);
192         struct gr_ep *ep;
193
194         seq_printf(seq, "usb state = %s\n",
195                    usb_state_string(dev->gadget.state));
196         seq_printf(seq, "address = %d\n",
197                    (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
198         seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
199         seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
200         seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
201         seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
202         seq_printf(seq, "test_mode = %d\n", dev->test_mode);
203         seq_puts(seq, "\n");
204
205         list_for_each_entry(ep, &dev->ep_list, ep_list)
206                 gr_seq_ep_show(seq, ep);
207
208         return 0;
209 }
210
211 static int gr_dfs_open(struct inode *inode, struct file *file)
212 {
213         return single_open(file, gr_seq_show, inode->i_private);
214 }
215
216 static const struct file_operations gr_dfs_fops = {
217         .owner          = THIS_MODULE,
218         .open           = gr_dfs_open,
219         .read           = seq_read,
220         .llseek         = seq_lseek,
221         .release        = single_release,
222 };
223
224 static void gr_dfs_create(struct gr_udc *dev)
225 {
226         const char *name = "gr_udc_state";
227
228         dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
229         dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
230                                              &gr_dfs_fops);
231 }
232
233 static void gr_dfs_delete(struct gr_udc *dev)
234 {
235         /* Handles NULL and ERR pointers internally */
236         debugfs_remove(dev->dfs_state);
237         debugfs_remove(dev->dfs_root);
238 }
239
240 #else /* !CONFIG_USB_GADGET_DEBUG_FS */
241
242 static void gr_dfs_create(struct gr_udc *dev) {}
243 static void gr_dfs_delete(struct gr_udc *dev) {}
244
245 #endif /* CONFIG_USB_GADGET_DEBUG_FS */
246
247 /* ---------------------------------------------------------------------- */
248 /* DMA and request handling */
249
250 /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
251 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
252 {
253         dma_addr_t paddr;
254         struct gr_dma_desc *dma_desc;
255
256         dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
257         if (!dma_desc) {
258                 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
259                 return NULL;
260         }
261
262         dma_desc->paddr = paddr;
263
264         return dma_desc;
265 }
266
267 static inline void gr_free_dma_desc(struct gr_udc *dev,
268                                     struct gr_dma_desc *desc)
269 {
270         dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
271 }
272
273 /* Frees the chain of struct gr_dma_desc for the given request */
274 static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
275 {
276         struct gr_dma_desc *desc;
277         struct gr_dma_desc *next;
278
279         next = req->first_desc;
280         if (!next)
281                 return;
282
283         do {
284                 desc = next;
285                 next = desc->next_desc;
286                 gr_free_dma_desc(dev, desc);
287         } while (desc != req->last_desc);
288
289         req->first_desc = NULL;
290         req->curr_desc = NULL;
291         req->last_desc = NULL;
292 }
293
294 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
295
296 /*
297  * Frees allocated resources and calls the appropriate completion function/setup
298  * package handler for a finished request.
299  *
300  * Must be called with dev->lock held and irqs disabled.
301  */
302 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
303                               int status)
304         __releases(&dev->lock)
305         __acquires(&dev->lock)
306 {
307         struct gr_udc *dev;
308
309         list_del_init(&req->queue);
310
311         if (likely(req->req.status == -EINPROGRESS))
312                 req->req.status = status;
313         else
314                 status = req->req.status;
315
316         dev = ep->dev;
317         usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
318         gr_free_dma_desc_chain(dev, req);
319
320         if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
321                 req->req.actual = req->req.length;
322         } else if (req->oddlen && req->req.actual > req->evenlen) {
323                 /*
324                  * Copy to user buffer in this case where length was not evenly
325                  * divisible by ep->ep.maxpacket and the last descriptor was
326                  * actually used.
327                  */
328                 char *buftail = ((char *)req->req.buf + req->evenlen);
329
330                 memcpy(buftail, ep->tailbuf, req->oddlen);
331
332                 if (req->req.actual > req->req.length) {
333                         /* We got more data than was requested */
334                         dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
335                                 ep->ep.name);
336                         gr_dbgprint_request("OVFL", ep, req);
337                         req->req.status = -EOVERFLOW;
338                 }
339         }
340
341         if (!status) {
342                 if (ep->is_in)
343                         gr_dbgprint_request("SENT", ep, req);
344                 else
345                         gr_dbgprint_request("RECV", ep, req);
346         }
347
348         /* Prevent changes to ep->queue during callback */
349         ep->callback = 1;
350         if (req == dev->ep0reqo && !status) {
351                 if (req->setup)
352                         gr_ep0_setup(dev, req);
353                 else
354                         dev_err(dev->dev,
355                                 "Unexpected non setup packet on ep0in\n");
356         } else if (req->req.complete) {
357                 spin_unlock(&dev->lock);
358
359                 usb_gadget_giveback_request(&ep->ep, &req->req);
360
361                 spin_lock(&dev->lock);
362         }
363         ep->callback = 0;
364 }
365
366 static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
367 {
368         struct gr_request *req;
369
370         req = kzalloc(sizeof(*req), gfp_flags);
371         if (!req)
372                 return NULL;
373
374         INIT_LIST_HEAD(&req->queue);
375
376         return &req->req;
377 }
378
379 /*
380  * Starts DMA for endpoint ep if there are requests in the queue.
381  *
382  * Must be called with dev->lock held and with !ep->stopped.
383  */
384 static void gr_start_dma(struct gr_ep *ep)
385 {
386         struct gr_request *req;
387         u32 dmactrl;
388
389         if (list_empty(&ep->queue)) {
390                 ep->dma_start = 0;
391                 return;
392         }
393
394         req = list_first_entry(&ep->queue, struct gr_request, queue);
395
396         /* A descriptor should already have been allocated */
397         BUG_ON(!req->curr_desc);
398
399         /*
400          * The DMA controller can not handle smaller OUT buffers than
401          * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
402          * long packet are received. Therefore an internal bounce buffer gets
403          * used when such a request gets enabled.
404          */
405         if (!ep->is_in && req->oddlen)
406                 req->last_desc->data = ep->tailbuf_paddr;
407
408         wmb(); /* Make sure all is settled before handing it over to DMA */
409
410         /* Set the descriptor pointer in the hardware */
411         gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
412
413         /* Announce available descriptors */
414         dmactrl = gr_read32(&ep->regs->dmactrl);
415         gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
416
417         ep->dma_start = 1;
418 }
419
420 /*
421  * Finishes the first request in the ep's queue and, if available, starts the
422  * next request in queue.
423  *
424  * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
425  */
426 static void gr_dma_advance(struct gr_ep *ep, int status)
427 {
428         struct gr_request *req;
429
430         req = list_first_entry(&ep->queue, struct gr_request, queue);
431         gr_finish_request(ep, req, status);
432         gr_start_dma(ep); /* Regardless of ep->dma_start */
433 }
434
435 /*
436  * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
437  * transfer to be canceled and clears GR_DMACTRL_DA.
438  *
439  * Must be called with dev->lock held.
440  */
441 static void gr_abort_dma(struct gr_ep *ep)
442 {
443         u32 dmactrl;
444
445         dmactrl = gr_read32(&ep->regs->dmactrl);
446         gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
447 }
448
449 /*
450  * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
451  * chain.
452  *
453  * Size is not used for OUT endpoints. Hardware can not be instructed to handle
454  * smaller buffer than MAXPL in the OUT direction.
455  */
456 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
457                            dma_addr_t data, unsigned size, gfp_t gfp_flags)
458 {
459         struct gr_dma_desc *desc;
460
461         desc = gr_alloc_dma_desc(ep, gfp_flags);
462         if (!desc)
463                 return -ENOMEM;
464
465         desc->data = data;
466         if (ep->is_in)
467                 desc->ctrl =
468                         (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
469         else
470                 desc->ctrl = GR_DESC_OUT_CTRL_IE;
471
472         if (!req->first_desc) {
473                 req->first_desc = desc;
474                 req->curr_desc = desc;
475         } else {
476                 req->last_desc->next_desc = desc;
477                 req->last_desc->next = desc->paddr;
478                 req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
479         }
480         req->last_desc = desc;
481
482         return 0;
483 }
484
485 /*
486  * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
487  * together covers req->req.length bytes of the buffer at DMA address
488  * req->req.dma for the OUT direction.
489  *
490  * The first descriptor in the chain is enabled, the rest disabled. The
491  * interrupt handler will later enable them one by one when needed so we can
492  * find out when the transfer is finished. For OUT endpoints, all descriptors
493  * therefore generate interrutps.
494  */
495 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
496                                   gfp_t gfp_flags)
497 {
498         u16 bytes_left; /* Bytes left to provide descriptors for */
499         u16 bytes_used; /* Bytes accommodated for */
500         int ret = 0;
501
502         req->first_desc = NULL; /* Signals that no allocation is done yet */
503         bytes_left = req->req.length;
504         bytes_used = 0;
505         while (bytes_left > 0) {
506                 dma_addr_t start = req->req.dma + bytes_used;
507                 u16 size = min(bytes_left, ep->bytes_per_buffer);
508
509                 if (size < ep->bytes_per_buffer) {
510                         /* Prepare using bounce buffer */
511                         req->evenlen = req->req.length - bytes_left;
512                         req->oddlen = size;
513                 }
514
515                 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
516                 if (ret)
517                         goto alloc_err;
518
519                 bytes_left -= size;
520                 bytes_used += size;
521         }
522
523         req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
524
525         return 0;
526
527 alloc_err:
528         gr_free_dma_desc_chain(ep->dev, req);
529
530         return ret;
531 }
532
533 /*
534  * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
535  * together covers req->req.length bytes of the buffer at DMA address
536  * req->req.dma for the IN direction.
537  *
538  * When more data is provided than the maximum payload size, the hardware splits
539  * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
540  * is always set to a multiple of the maximum payload (restricted to the valid
541  * number of maximum payloads during high bandwidth isochronous or interrupt
542  * transfers)
543  *
544  * All descriptors are enabled from the beginning and we only generate an
545  * interrupt for the last one indicating that the entire request has been pushed
546  * to hardware.
547  */
548 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
549                                  gfp_t gfp_flags)
550 {
551         u16 bytes_left; /* Bytes left in req to provide descriptors for */
552         u16 bytes_used; /* Bytes in req accommodated for */
553         int ret = 0;
554
555         req->first_desc = NULL; /* Signals that no allocation is done yet */
556         bytes_left = req->req.length;
557         bytes_used = 0;
558         do { /* Allow for zero length packets */
559                 dma_addr_t start = req->req.dma + bytes_used;
560                 u16 size = min(bytes_left, ep->bytes_per_buffer);
561
562                 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
563                 if (ret)
564                         goto alloc_err;
565
566                 bytes_left -= size;
567                 bytes_used += size;
568         } while (bytes_left > 0);
569
570         /*
571          * Send an extra zero length packet to indicate that no more data is
572          * available when req->req.zero is set and the data length is even
573          * multiples of ep->ep.maxpacket.
574          */
575         if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
576                 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
577                 if (ret)
578                         goto alloc_err;
579         }
580
581         /*
582          * For IN packets we only want to know when the last packet has been
583          * transmitted (not just put into internal buffers).
584          */
585         req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
586
587         return 0;
588
589 alloc_err:
590         gr_free_dma_desc_chain(ep->dev, req);
591
592         return ret;
593 }
594
595 /* Must be called with dev->lock held */
596 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
597 {
598         struct gr_udc *dev = ep->dev;
599         int ret;
600
601         if (unlikely(!ep->ep.desc && ep->num != 0)) {
602                 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
603                 return -EINVAL;
604         }
605
606         if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
607                 dev_err(dev->dev,
608                         "Invalid request for %s: buf=%p list_empty=%d\n",
609                         ep->ep.name, req->req.buf, list_empty(&req->queue));
610                 return -EINVAL;
611         }
612
613         if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
614                 dev_err(dev->dev, "-ESHUTDOWN");
615                 return -ESHUTDOWN;
616         }
617
618         /* Can't touch registers when suspended */
619         if (dev->ep0state == GR_EP0_SUSPEND) {
620                 dev_err(dev->dev, "-EBUSY");
621                 return -EBUSY;
622         }
623
624         /* Set up DMA mapping in case the caller didn't */
625         ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
626         if (ret) {
627                 dev_err(dev->dev, "usb_gadget_map_request");
628                 return ret;
629         }
630
631         if (ep->is_in)
632                 ret = gr_setup_in_desc_list(ep, req, gfp_flags);
633         else
634                 ret = gr_setup_out_desc_list(ep, req, gfp_flags);
635         if (ret)
636                 return ret;
637
638         req->req.status = -EINPROGRESS;
639         req->req.actual = 0;
640         list_add_tail(&req->queue, &ep->queue);
641
642         /* Start DMA if not started, otherwise interrupt handler handles it */
643         if (!ep->dma_start && likely(!ep->stopped))
644                 gr_start_dma(ep);
645
646         return 0;
647 }
648
649 /*
650  * Queue a request from within the driver.
651  *
652  * Must be called with dev->lock held.
653  */
654 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
655                                gfp_t gfp_flags)
656 {
657         if (ep->is_in)
658                 gr_dbgprint_request("RESP", ep, req);
659
660         return gr_queue(ep, req, gfp_flags);
661 }
662
663 /* ---------------------------------------------------------------------- */
664 /* General helper functions */
665
666 /*
667  * Dequeue ALL requests.
668  *
669  * Must be called with dev->lock held and irqs disabled.
670  */
671 static void gr_ep_nuke(struct gr_ep *ep)
672 {
673         struct gr_request *req;
674
675         ep->stopped = 1;
676         ep->dma_start = 0;
677         gr_abort_dma(ep);
678
679         while (!list_empty(&ep->queue)) {
680                 req = list_first_entry(&ep->queue, struct gr_request, queue);
681                 gr_finish_request(ep, req, -ESHUTDOWN);
682         }
683 }
684
685 /*
686  * Reset the hardware state of this endpoint.
687  *
688  * Must be called with dev->lock held.
689  */
690 static void gr_ep_reset(struct gr_ep *ep)
691 {
692         gr_write32(&ep->regs->epctrl, 0);
693         gr_write32(&ep->regs->dmactrl, 0);
694
695         ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
696         ep->ep.desc = NULL;
697         ep->stopped = 1;
698         ep->dma_start = 0;
699 }
700
701 /*
702  * Generate STALL on ep0in/out.
703  *
704  * Must be called with dev->lock held.
705  */
706 static void gr_control_stall(struct gr_udc *dev)
707 {
708         u32 epctrl;
709
710         epctrl = gr_read32(&dev->epo[0].regs->epctrl);
711         gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
712         epctrl = gr_read32(&dev->epi[0].regs->epctrl);
713         gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
714
715         dev->ep0state = GR_EP0_STALL;
716 }
717
718 /*
719  * Halts, halts and wedges, or clears halt for an endpoint.
720  *
721  * Must be called with dev->lock held.
722  */
723 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
724 {
725         u32 epctrl;
726         int retval = 0;
727
728         if (ep->num && !ep->ep.desc)
729                 return -EINVAL;
730
731         if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
732                 return -EOPNOTSUPP;
733
734         /* Never actually halt ep0, and therefore never clear halt for ep0 */
735         if (!ep->num) {
736                 if (halt && !fromhost) {
737                         /* ep0 halt from gadget - generate protocol stall */
738                         gr_control_stall(ep->dev);
739                         dev_dbg(ep->dev->dev, "EP: stall ep0\n");
740                         return 0;
741                 }
742                 return -EINVAL;
743         }
744
745         dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
746                 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
747
748         epctrl = gr_read32(&ep->regs->epctrl);
749         if (halt) {
750                 /* Set HALT */
751                 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
752                 ep->stopped = 1;
753                 if (wedge)
754                         ep->wedged = 1;
755         } else {
756                 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
757                 ep->stopped = 0;
758                 ep->wedged = 0;
759
760                 /* Things might have been queued up in the meantime */
761                 if (!ep->dma_start)
762                         gr_start_dma(ep);
763         }
764
765         return retval;
766 }
767
768 /* Must be called with dev->lock held */
769 static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
770 {
771         if (dev->ep0state != value)
772                 dev_vdbg(dev->dev, "STATE:  ep0state=%s\n",
773                          gr_ep0state_string(value));
774         dev->ep0state = value;
775 }
776
777 /*
778  * Should only be called when endpoints can not generate interrupts.
779  *
780  * Must be called with dev->lock held.
781  */
782 static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
783 {
784         gr_write32(&dev->regs->control, 0);
785         wmb(); /* Make sure that we do not deny one of our interrupts */
786         dev->irq_enabled = 0;
787 }
788
789 /*
790  * Stop all device activity and disable data line pullup.
791  *
792  * Must be called with dev->lock held and irqs disabled.
793  */
794 static void gr_stop_activity(struct gr_udc *dev)
795 {
796         struct gr_ep *ep;
797
798         list_for_each_entry(ep, &dev->ep_list, ep_list)
799                 gr_ep_nuke(ep);
800
801         gr_disable_interrupts_and_pullup(dev);
802
803         gr_set_ep0state(dev, GR_EP0_DISCONNECT);
804         usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
805 }
806
807 /* ---------------------------------------------------------------------- */
808 /* ep0 setup packet handling */
809
810 static void gr_ep0_testmode_complete(struct usb_ep *_ep,
811                                      struct usb_request *_req)
812 {
813         struct gr_ep *ep;
814         struct gr_udc *dev;
815         u32 control;
816
817         ep = container_of(_ep, struct gr_ep, ep);
818         dev = ep->dev;
819
820         spin_lock(&dev->lock);
821
822         control = gr_read32(&dev->regs->control);
823         control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
824         gr_write32(&dev->regs->control, control);
825
826         spin_unlock(&dev->lock);
827 }
828
829 static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
830 {
831         /* Nothing needs to be done here */
832 }
833
834 /*
835  * Queue a response on ep0in.
836  *
837  * Must be called with dev->lock held.
838  */
839 static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
840                           void (*complete)(struct usb_ep *ep,
841                                            struct usb_request *req))
842 {
843         u8 *reqbuf = dev->ep0reqi->req.buf;
844         int status;
845         int i;
846
847         for (i = 0; i < length; i++)
848                 reqbuf[i] = buf[i];
849         dev->ep0reqi->req.length = length;
850         dev->ep0reqi->req.complete = complete;
851
852         status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
853         if (status < 0)
854                 dev_err(dev->dev,
855                         "Could not queue ep0in setup response: %d\n", status);
856
857         return status;
858 }
859
860 /*
861  * Queue a 2 byte response on ep0in.
862  *
863  * Must be called with dev->lock held.
864  */
865 static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
866 {
867         __le16 le_response = cpu_to_le16(response);
868
869         return gr_ep0_respond(dev, (u8 *)&le_response, 2,
870                               gr_ep0_dummy_complete);
871 }
872
873 /*
874  * Queue a ZLP response on ep0in.
875  *
876  * Must be called with dev->lock held.
877  */
878 static inline int gr_ep0_respond_empty(struct gr_udc *dev)
879 {
880         return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
881 }
882
883 /*
884  * This is run when a SET_ADDRESS request is received. First writes
885  * the new address to the control register which is updated internally
886  * when the next IN packet is ACKED.
887  *
888  * Must be called with dev->lock held.
889  */
890 static void gr_set_address(struct gr_udc *dev, u8 address)
891 {
892         u32 control;
893
894         control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
895         control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
896         control |= GR_CONTROL_SU;
897         gr_write32(&dev->regs->control, control);
898 }
899
900 /*
901  * Returns negative for STALL, 0 for successful handling and positive for
902  * delegation.
903  *
904  * Must be called with dev->lock held.
905  */
906 static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
907                              u16 value, u16 index)
908 {
909         u16 response;
910         u8 test;
911
912         switch (request) {
913         case USB_REQ_SET_ADDRESS:
914                 dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
915                 gr_set_address(dev, value & 0xff);
916                 if (value)
917                         usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
918                 else
919                         usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
920                 return gr_ep0_respond_empty(dev);
921
922         case USB_REQ_GET_STATUS:
923                 /* Self powered | remote wakeup */
924                 response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
925                 return gr_ep0_respond_u16(dev, response);
926
927         case USB_REQ_SET_FEATURE:
928                 switch (value) {
929                 case USB_DEVICE_REMOTE_WAKEUP:
930                         /* Allow remote wakeup */
931                         dev->remote_wakeup = 1;
932                         return gr_ep0_respond_empty(dev);
933
934                 case USB_DEVICE_TEST_MODE:
935                         /* The hardware does not support TEST_FORCE_EN */
936                         test = index >> 8;
937                         if (test >= TEST_J && test <= TEST_PACKET) {
938                                 dev->test_mode = test;
939                                 return gr_ep0_respond(dev, NULL, 0,
940                                                       gr_ep0_testmode_complete);
941                         }
942                 }
943                 break;
944
945         case USB_REQ_CLEAR_FEATURE:
946                 switch (value) {
947                 case USB_DEVICE_REMOTE_WAKEUP:
948                         /* Disallow remote wakeup */
949                         dev->remote_wakeup = 0;
950                         return gr_ep0_respond_empty(dev);
951                 }
952                 break;
953         }
954
955         return 1; /* Delegate the rest */
956 }
957
958 /*
959  * Returns negative for STALL, 0 for successful handling and positive for
960  * delegation.
961  *
962  * Must be called with dev->lock held.
963  */
964 static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
965                                 u16 value, u16 index)
966 {
967         if (dev->gadget.state != USB_STATE_CONFIGURED)
968                 return -1;
969
970         /*
971          * Should return STALL for invalid interfaces, but udc driver does not
972          * know anything about that. However, many gadget drivers do not handle
973          * GET_STATUS so we need to take care of that.
974          */
975
976         switch (request) {
977         case USB_REQ_GET_STATUS:
978                 return gr_ep0_respond_u16(dev, 0x0000);
979
980         case USB_REQ_SET_FEATURE:
981         case USB_REQ_CLEAR_FEATURE:
982                 /*
983                  * No possible valid standard requests. Still let gadget drivers
984                  * have a go at it.
985                  */
986                 break;
987         }
988
989         return 1; /* Delegate the rest */
990 }
991
992 /*
993  * Returns negative for STALL, 0 for successful handling and positive for
994  * delegation.
995  *
996  * Must be called with dev->lock held.
997  */
998 static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
999                                u16 value, u16 index)
1000 {
1001         struct gr_ep *ep;
1002         int status;
1003         int halted;
1004         u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
1005         u8 is_in = index & USB_ENDPOINT_DIR_MASK;
1006
1007         if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
1008                 return -1;
1009
1010         if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
1011                 return -1;
1012
1013         ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
1014
1015         switch (request) {
1016         case USB_REQ_GET_STATUS:
1017                 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
1018                 return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
1019
1020         case USB_REQ_SET_FEATURE:
1021                 switch (value) {
1022                 case USB_ENDPOINT_HALT:
1023                         status = gr_ep_halt_wedge(ep, 1, 0, 1);
1024                         if (status >= 0)
1025                                 status = gr_ep0_respond_empty(dev);
1026                         return status;
1027                 }
1028                 break;
1029
1030         case USB_REQ_CLEAR_FEATURE:
1031                 switch (value) {
1032                 case USB_ENDPOINT_HALT:
1033                         if (ep->wedged)
1034                                 return -1;
1035                         status = gr_ep_halt_wedge(ep, 0, 0, 1);
1036                         if (status >= 0)
1037                                 status = gr_ep0_respond_empty(dev);
1038                         return status;
1039                 }
1040                 break;
1041         }
1042
1043         return 1; /* Delegate the rest */
1044 }
1045
1046 /* Must be called with dev->lock held */
1047 static void gr_ep0out_requeue(struct gr_udc *dev)
1048 {
1049         int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
1050
1051         if (ret)
1052                 dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
1053                         ret);
1054 }
1055
1056 /*
1057  * The main function dealing with setup requests on ep0.
1058  *
1059  * Must be called with dev->lock held and irqs disabled
1060  */
1061 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1062         __releases(&dev->lock)
1063         __acquires(&dev->lock)
1064 {
1065         union {
1066                 struct usb_ctrlrequest ctrl;
1067                 u8 raw[8];
1068                 u32 word[2];
1069         } u;
1070         u8 type;
1071         u8 request;
1072         u16 value;
1073         u16 index;
1074         u16 length;
1075         int i;
1076         int status;
1077
1078         /* Restore from ep0 halt */
1079         if (dev->ep0state == GR_EP0_STALL) {
1080                 gr_set_ep0state(dev, GR_EP0_SETUP);
1081                 if (!req->req.actual)
1082                         goto out;
1083         }
1084
1085         if (dev->ep0state == GR_EP0_ISTATUS) {
1086                 gr_set_ep0state(dev, GR_EP0_SETUP);
1087                 if (req->req.actual > 0)
1088                         dev_dbg(dev->dev,
1089                                 "Unexpected setup packet at state %s\n",
1090                                 gr_ep0state_string(GR_EP0_ISTATUS));
1091                 else
1092                         goto out; /* Got expected ZLP */
1093         } else if (dev->ep0state != GR_EP0_SETUP) {
1094                 dev_info(dev->dev,
1095                          "Unexpected ep0out request at state %s - stalling\n",
1096                          gr_ep0state_string(dev->ep0state));
1097                 gr_control_stall(dev);
1098                 gr_set_ep0state(dev, GR_EP0_SETUP);
1099                 goto out;
1100         } else if (!req->req.actual) {
1101                 dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
1102                         gr_ep0state_string(dev->ep0state));
1103                 goto out;
1104         }
1105
1106         /* Handle SETUP packet */
1107         for (i = 0; i < req->req.actual; i++)
1108                 u.raw[i] = ((u8 *)req->req.buf)[i];
1109
1110         type = u.ctrl.bRequestType;
1111         request = u.ctrl.bRequest;
1112         value = le16_to_cpu(u.ctrl.wValue);
1113         index = le16_to_cpu(u.ctrl.wIndex);
1114         length = le16_to_cpu(u.ctrl.wLength);
1115
1116         gr_dbgprint_devreq(dev, type, request, value, index, length);
1117
1118         /* Check for data stage */
1119         if (length) {
1120                 if (type & USB_DIR_IN)
1121                         gr_set_ep0state(dev, GR_EP0_IDATA);
1122                 else
1123                         gr_set_ep0state(dev, GR_EP0_ODATA);
1124         }
1125
1126         status = 1; /* Positive status flags delegation */
1127         if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1128                 switch (type & USB_RECIP_MASK) {
1129                 case USB_RECIP_DEVICE:
1130                         status = gr_device_request(dev, type, request,
1131                                                    value, index);
1132                         break;
1133                 case USB_RECIP_ENDPOINT:
1134                         status =  gr_endpoint_request(dev, type, request,
1135                                                       value, index);
1136                         break;
1137                 case USB_RECIP_INTERFACE:
1138                         status = gr_interface_request(dev, type, request,
1139                                                       value, index);
1140                         break;
1141                 }
1142         }
1143
1144         if (status > 0) {
1145                 spin_unlock(&dev->lock);
1146
1147                 dev_vdbg(dev->dev, "DELEGATE\n");
1148                 status = dev->driver->setup(&dev->gadget, &u.ctrl);
1149
1150                 spin_lock(&dev->lock);
1151         }
1152
1153         /* Generate STALL on both ep0out and ep0in if requested */
1154         if (unlikely(status < 0)) {
1155                 dev_vdbg(dev->dev, "STALL\n");
1156                 gr_control_stall(dev);
1157         }
1158
1159         if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
1160             request == USB_REQ_SET_CONFIGURATION) {
1161                 if (!value) {
1162                         dev_dbg(dev->dev, "STATUS: deconfigured\n");
1163                         usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
1164                 } else if (status >= 0) {
1165                         /* Not configured unless gadget OK:s it */
1166                         dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
1167                         usb_gadget_set_state(&dev->gadget,
1168                                              USB_STATE_CONFIGURED);
1169                 }
1170         }
1171
1172         /* Get ready for next stage */
1173         if (dev->ep0state == GR_EP0_ODATA)
1174                 gr_set_ep0state(dev, GR_EP0_OSTATUS);
1175         else if (dev->ep0state == GR_EP0_IDATA)
1176                 gr_set_ep0state(dev, GR_EP0_ISTATUS);
1177         else
1178                 gr_set_ep0state(dev, GR_EP0_SETUP);
1179
1180 out:
1181         gr_ep0out_requeue(dev);
1182 }
1183
1184 /* ---------------------------------------------------------------------- */
1185 /* VBUS and USB reset handling */
1186
1187 /* Must be called with dev->lock held and irqs disabled  */
1188 static void gr_vbus_connected(struct gr_udc *dev, u32 status)
1189 {
1190         u32 control;
1191
1192         dev->gadget.speed = GR_SPEED(status);
1193         usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
1194
1195         /* Turn on full interrupts and pullup */
1196         control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
1197                    GR_CONTROL_SP | GR_CONTROL_EP);
1198         gr_write32(&dev->regs->control, control);
1199 }
1200
1201 /* Must be called with dev->lock held */
1202 static void gr_enable_vbus_detect(struct gr_udc *dev)
1203 {
1204         u32 status;
1205
1206         dev->irq_enabled = 1;
1207         wmb(); /* Make sure we do not ignore an interrupt */
1208         gr_write32(&dev->regs->control, GR_CONTROL_VI);
1209
1210         /* Take care of the case we are already plugged in at this point */
1211         status = gr_read32(&dev->regs->status);
1212         if (status & GR_STATUS_VB)
1213                 gr_vbus_connected(dev, status);
1214 }
1215
1216 /* Must be called with dev->lock held and irqs disabled */
1217 static void gr_vbus_disconnected(struct gr_udc *dev)
1218 {
1219         gr_stop_activity(dev);
1220
1221         /* Report disconnect */
1222         if (dev->driver && dev->driver->disconnect) {
1223                 spin_unlock(&dev->lock);
1224
1225                 dev->driver->disconnect(&dev->gadget);
1226
1227                 spin_lock(&dev->lock);
1228         }
1229
1230         gr_enable_vbus_detect(dev);
1231 }
1232
1233 /* Must be called with dev->lock held and irqs disabled */
1234 static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
1235 {
1236         gr_set_address(dev, 0);
1237         gr_set_ep0state(dev, GR_EP0_SETUP);
1238         usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
1239         dev->gadget.speed = GR_SPEED(status);
1240
1241         gr_ep_nuke(&dev->epo[0]);
1242         gr_ep_nuke(&dev->epi[0]);
1243         dev->epo[0].stopped = 0;
1244         dev->epi[0].stopped = 0;
1245         gr_ep0out_requeue(dev);
1246 }
1247
1248 /* ---------------------------------------------------------------------- */
1249 /* Irq handling */
1250
1251 /*
1252  * Handles interrupts from in endpoints. Returns whether something was handled.
1253  *
1254  * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1255  */
1256 static int gr_handle_in_ep(struct gr_ep *ep)
1257 {
1258         struct gr_request *req;
1259
1260         req = list_first_entry(&ep->queue, struct gr_request, queue);
1261         if (!req->last_desc)
1262                 return 0;
1263
1264         if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
1265                 return 0; /* Not put in hardware buffers yet */
1266
1267         if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1268                 return 0; /* Not transmitted yet, still in hardware buffers */
1269
1270         /* Write complete */
1271         gr_dma_advance(ep, 0);
1272
1273         return 1;
1274 }
1275
1276 /*
1277  * Handles interrupts from out endpoints. Returns whether something was handled.
1278  *
1279  * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1280  */
1281 static int gr_handle_out_ep(struct gr_ep *ep)
1282 {
1283         u32 ep_dmactrl;
1284         u32 ctrl;
1285         u16 len;
1286         struct gr_request *req;
1287         struct gr_udc *dev = ep->dev;
1288
1289         req = list_first_entry(&ep->queue, struct gr_request, queue);
1290         if (!req->curr_desc)
1291                 return 0;
1292
1293         ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
1294         if (ctrl & GR_DESC_OUT_CTRL_EN)
1295                 return 0; /* Not received yet */
1296
1297         /* Read complete */
1298         len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
1299         req->req.actual += len;
1300         if (ctrl & GR_DESC_OUT_CTRL_SE)
1301                 req->setup = 1;
1302
1303         if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
1304                 /* Short packet or >= expected size - we are done */
1305
1306                 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1307                         /*
1308                          * Send a status stage ZLP to ack the DATA stage in the
1309                          * OUT direction. This needs to be done before
1310                          * gr_dma_advance as that can lead to a call to
1311                          * ep0_setup that can change dev->ep0state.
1312                          */
1313                         gr_ep0_respond_empty(dev);
1314                         gr_set_ep0state(dev, GR_EP0_SETUP);
1315                 }
1316
1317                 gr_dma_advance(ep, 0);
1318         } else {
1319                 /* Not done yet. Enable the next descriptor to receive more. */
1320                 req->curr_desc = req->curr_desc->next_desc;
1321                 req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
1322
1323                 ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1324                 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1325         }
1326
1327         return 1;
1328 }
1329
1330 /*
1331  * Handle state changes. Returns whether something was handled.
1332  *
1333  * Must be called with dev->lock held and irqs disabled.
1334  */
1335 static int gr_handle_state_changes(struct gr_udc *dev)
1336 {
1337         u32 status = gr_read32(&dev->regs->status);
1338         int handled = 0;
1339         int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
1340                          dev->gadget.state == USB_STATE_ATTACHED);
1341
1342         /* VBUS valid detected */
1343         if (!powstate && (status & GR_STATUS_VB)) {
1344                 dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
1345                 gr_vbus_connected(dev, status);
1346                 handled = 1;
1347         }
1348
1349         /* Disconnect */
1350         if (powstate && !(status & GR_STATUS_VB)) {
1351                 dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
1352                 gr_vbus_disconnected(dev);
1353                 handled = 1;
1354         }
1355
1356         /* USB reset detected */
1357         if (status & GR_STATUS_UR) {
1358                 dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
1359                         GR_SPEED_STR(status));
1360                 gr_write32(&dev->regs->status, GR_STATUS_UR);
1361                 gr_udc_usbreset(dev, status);
1362                 handled = 1;
1363         }
1364
1365         /* Speed change */
1366         if (dev->gadget.speed != GR_SPEED(status)) {
1367                 dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
1368                         GR_SPEED_STR(status));
1369                 dev->gadget.speed = GR_SPEED(status);
1370                 handled = 1;
1371         }
1372
1373         /* Going into suspend */
1374         if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
1375                 dev_dbg(dev->dev, "STATUS: USB suspend\n");
1376                 gr_set_ep0state(dev, GR_EP0_SUSPEND);
1377                 dev->suspended_from = dev->gadget.state;
1378                 usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
1379
1380                 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1381                     dev->driver && dev->driver->suspend) {
1382                         spin_unlock(&dev->lock);
1383
1384                         dev->driver->suspend(&dev->gadget);
1385
1386                         spin_lock(&dev->lock);
1387                 }
1388                 handled = 1;
1389         }
1390
1391         /* Coming out of suspend */
1392         if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
1393                 dev_dbg(dev->dev, "STATUS: USB resume\n");
1394                 if (dev->suspended_from == USB_STATE_POWERED)
1395                         gr_set_ep0state(dev, GR_EP0_DISCONNECT);
1396                 else
1397                         gr_set_ep0state(dev, GR_EP0_SETUP);
1398                 usb_gadget_set_state(&dev->gadget, dev->suspended_from);
1399
1400                 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1401                     dev->driver && dev->driver->resume) {
1402                         spin_unlock(&dev->lock);
1403
1404                         dev->driver->resume(&dev->gadget);
1405
1406                         spin_lock(&dev->lock);
1407                 }
1408                 handled = 1;
1409         }
1410
1411         return handled;
1412 }
1413
1414 /* Non-interrupt context irq handler */
1415 static irqreturn_t gr_irq_handler(int irq, void *_dev)
1416 {
1417         struct gr_udc *dev = _dev;
1418         struct gr_ep *ep;
1419         int handled = 0;
1420         int i;
1421         unsigned long flags;
1422
1423         spin_lock_irqsave(&dev->lock, flags);
1424
1425         if (!dev->irq_enabled)
1426                 goto out;
1427
1428         /*
1429          * Check IN ep interrupts. We check these before the OUT eps because
1430          * some gadgets reuse the request that might already be currently
1431          * outstanding and needs to be completed (mainly setup requests).
1432          */
1433         for (i = 0; i < dev->nepi; i++) {
1434                 ep = &dev->epi[i];
1435                 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1436                         handled = gr_handle_in_ep(ep) || handled;
1437         }
1438
1439         /* Check OUT ep interrupts */
1440         for (i = 0; i < dev->nepo; i++) {
1441                 ep = &dev->epo[i];
1442                 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1443                         handled = gr_handle_out_ep(ep) || handled;
1444         }
1445
1446         /* Check status interrupts */
1447         handled = gr_handle_state_changes(dev) || handled;
1448
1449         /*
1450          * Check AMBA DMA errors. Only check if we didn't find anything else to
1451          * handle because this shouldn't happen if we did everything right.
1452          */
1453         if (!handled) {
1454                 list_for_each_entry(ep, &dev->ep_list, ep_list) {
1455                         if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1456                                 dev_err(dev->dev,
1457                                         "AMBA Error occurred for %s\n",
1458                                         ep->ep.name);
1459                                 handled = 1;
1460                         }
1461                 }
1462         }
1463
1464 out:
1465         spin_unlock_irqrestore(&dev->lock, flags);
1466
1467         return handled ? IRQ_HANDLED : IRQ_NONE;
1468 }
1469
1470 /* Interrupt context irq handler */
1471 static irqreturn_t gr_irq(int irq, void *_dev)
1472 {
1473         struct gr_udc *dev = _dev;
1474
1475         if (!dev->irq_enabled)
1476                 return IRQ_NONE;
1477
1478         return IRQ_WAKE_THREAD;
1479 }
1480
1481 /* ---------------------------------------------------------------------- */
1482 /* USB ep ops */
1483
1484 /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
1485 static int gr_ep_enable(struct usb_ep *_ep,
1486                         const struct usb_endpoint_descriptor *desc)
1487 {
1488         struct gr_udc *dev;
1489         struct gr_ep *ep;
1490         u8 mode;
1491         u8 nt;
1492         u16 max;
1493         u16 buffer_size = 0;
1494         u32 epctrl;
1495
1496         ep = container_of(_ep, struct gr_ep, ep);
1497         if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1498                 return -EINVAL;
1499
1500         dev = ep->dev;
1501
1502         /* 'ep0' IN and OUT are reserved */
1503         if (ep == &dev->epo[0] || ep == &dev->epi[0])
1504                 return -EINVAL;
1505
1506         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1507                 return -ESHUTDOWN;
1508
1509         /* Make sure we are clear for enabling */
1510         epctrl = gr_read32(&ep->regs->epctrl);
1511         if (epctrl & GR_EPCTRL_EV)
1512                 return -EBUSY;
1513
1514         /* Check that directions match */
1515         if (!ep->is_in != !usb_endpoint_dir_in(desc))
1516                 return -EINVAL;
1517
1518         /* Check ep num */
1519         if ((!ep->is_in && ep->num >= dev->nepo) ||
1520             (ep->is_in && ep->num >= dev->nepi))
1521                 return -EINVAL;
1522
1523         if (usb_endpoint_xfer_control(desc)) {
1524                 mode = 0;
1525         } else if (usb_endpoint_xfer_isoc(desc)) {
1526                 mode = 1;
1527         } else if (usb_endpoint_xfer_bulk(desc)) {
1528                 mode = 2;
1529         } else if (usb_endpoint_xfer_int(desc)) {
1530                 mode = 3;
1531         } else {
1532                 dev_err(dev->dev, "Unknown transfer type for %s\n",
1533                         ep->ep.name);
1534                 return -EINVAL;
1535         }
1536
1537         /*
1538          * Bits 10-0 set the max payload. 12-11 set the number of
1539          * additional transactions.
1540          */
1541         max = 0x7ff & usb_endpoint_maxp(desc);
1542         nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
1543         buffer_size = GR_BUFFER_SIZE(epctrl);
1544         if (nt && (mode == 0 || mode == 2)) {
1545                 dev_err(dev->dev,
1546                         "%s mode: multiple trans./microframe not valid\n",
1547                         (mode == 2 ? "Bulk" : "Control"));
1548                 return -EINVAL;
1549         } else if (nt == 0x3) {
1550                 dev_err(dev->dev,
1551                         "Invalid value 0x3 for additional trans./microframe\n");
1552                 return -EINVAL;
1553         } else if ((nt + 1) * max > buffer_size) {
1554                 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
1555                         buffer_size, (nt + 1), max);
1556                 return -EINVAL;
1557         } else if (max == 0) {
1558                 dev_err(dev->dev, "Max payload cannot be set to 0\n");
1559                 return -EINVAL;
1560         } else if (max > ep->ep.maxpacket_limit) {
1561                 dev_err(dev->dev, "Requested max payload %d > limit %d\n",
1562                         max, ep->ep.maxpacket_limit);
1563                 return -EINVAL;
1564         }
1565
1566         spin_lock(&ep->dev->lock);
1567
1568         if (!ep->stopped) {
1569                 spin_unlock(&ep->dev->lock);
1570                 return -EBUSY;
1571         }
1572
1573         ep->stopped = 0;
1574         ep->wedged = 0;
1575         ep->ep.desc = desc;
1576         ep->ep.maxpacket = max;
1577         ep->dma_start = 0;
1578
1579
1580         if (nt) {
1581                 /*
1582                  * Maximum possible size of all payloads in one microframe
1583                  * regardless of direction when using high-bandwidth mode.
1584                  */
1585                 ep->bytes_per_buffer = (nt + 1) * max;
1586         } else if (ep->is_in) {
1587                 /*
1588                  * The biggest multiple of maximum packet size that fits into
1589                  * the buffer. The hardware will split up into many packets in
1590                  * the IN direction.
1591                  */
1592                 ep->bytes_per_buffer = (buffer_size / max) * max;
1593         } else {
1594                 /*
1595                  * Only single packets will be placed the buffers in the OUT
1596                  * direction.
1597                  */
1598                 ep->bytes_per_buffer = max;
1599         }
1600
1601         epctrl = (max << GR_EPCTRL_MAXPL_POS)
1602                 | (nt << GR_EPCTRL_NT_POS)
1603                 | (mode << GR_EPCTRL_TT_POS)
1604                 | GR_EPCTRL_EV;
1605         if (ep->is_in)
1606                 epctrl |= GR_EPCTRL_PI;
1607         gr_write32(&ep->regs->epctrl, epctrl);
1608
1609         gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1610
1611         spin_unlock(&ep->dev->lock);
1612
1613         dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1614                 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1615         return 0;
1616 }
1617
1618 /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
1619 static int gr_ep_disable(struct usb_ep *_ep)
1620 {
1621         struct gr_ep *ep;
1622         struct gr_udc *dev;
1623         unsigned long flags;
1624
1625         ep = container_of(_ep, struct gr_ep, ep);
1626         if (!_ep || !ep->ep.desc)
1627                 return -ENODEV;
1628
1629         dev = ep->dev;
1630
1631         /* 'ep0' IN and OUT are reserved */
1632         if (ep == &dev->epo[0] || ep == &dev->epi[0])
1633                 return -EINVAL;
1634
1635         if (dev->ep0state == GR_EP0_SUSPEND)
1636                 return -EBUSY;
1637
1638         dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1639
1640         spin_lock_irqsave(&dev->lock, flags);
1641
1642         gr_ep_nuke(ep);
1643         gr_ep_reset(ep);
1644         ep->ep.desc = NULL;
1645
1646         spin_unlock_irqrestore(&dev->lock, flags);
1647
1648         return 0;
1649 }
1650
1651 /*
1652  * Frees a request, but not any DMA buffers associated with it
1653  * (gr_finish_request should already have taken care of that).
1654  */
1655 static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
1656 {
1657         struct gr_request *req;
1658
1659         if (!_ep || !_req)
1660                 return;
1661         req = container_of(_req, struct gr_request, req);
1662
1663         /* Leads to memory leak */
1664         WARN(!list_empty(&req->queue),
1665              "request not dequeued properly before freeing\n");
1666
1667         kfree(req);
1668 }
1669
1670 /* Queue a request from the gadget */
1671 static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
1672                         gfp_t gfp_flags)
1673 {
1674         struct gr_ep *ep;
1675         struct gr_request *req;
1676         struct gr_udc *dev;
1677         int ret;
1678
1679         if (unlikely(!_ep || !_req))
1680                 return -EINVAL;
1681
1682         ep = container_of(_ep, struct gr_ep, ep);
1683         req = container_of(_req, struct gr_request, req);
1684         dev = ep->dev;
1685
1686         spin_lock(&ep->dev->lock);
1687
1688         /*
1689          * The ep0 pointer in the gadget struct is used both for ep0in and
1690          * ep0out. In a data stage in the out direction ep0out needs to be used
1691          * instead of the default ep0in. Completion functions might use
1692          * driver_data, so that needs to be copied as well.
1693          */
1694         if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1695                 ep = &dev->epo[0];
1696                 ep->ep.driver_data = dev->epi[0].ep.driver_data;
1697         }
1698
1699         if (ep->is_in)
1700                 gr_dbgprint_request("EXTERN", ep, req);
1701
1702         ret = gr_queue(ep, req, GFP_ATOMIC);
1703
1704         spin_unlock(&ep->dev->lock);
1705
1706         return ret;
1707 }
1708
1709 /* Dequeue JUST ONE request */
1710 static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1711 {
1712         struct gr_request *req;
1713         struct gr_ep *ep;
1714         struct gr_udc *dev;
1715         int ret = 0;
1716         unsigned long flags;
1717
1718         ep = container_of(_ep, struct gr_ep, ep);
1719         if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1720                 return -EINVAL;
1721         dev = ep->dev;
1722         if (!dev->driver)
1723                 return -ESHUTDOWN;
1724
1725         /* We can't touch (DMA) registers when suspended */
1726         if (dev->ep0state == GR_EP0_SUSPEND)
1727                 return -EBUSY;
1728
1729         spin_lock_irqsave(&dev->lock, flags);
1730
1731         /* Make sure it's actually queued on this endpoint */
1732         list_for_each_entry(req, &ep->queue, queue) {
1733                 if (&req->req == _req)
1734                         break;
1735         }
1736         if (&req->req != _req) {
1737                 ret = -EINVAL;
1738                 goto out;
1739         }
1740
1741         if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1742                 /* This request is currently being processed */
1743                 gr_abort_dma(ep);
1744                 if (ep->stopped)
1745                         gr_finish_request(ep, req, -ECONNRESET);
1746                 else
1747                         gr_dma_advance(ep, -ECONNRESET);
1748         } else if (!list_empty(&req->queue)) {
1749                 /* Not being processed - gr_finish_request dequeues it */
1750                 gr_finish_request(ep, req, -ECONNRESET);
1751         } else {
1752                 ret = -EOPNOTSUPP;
1753         }
1754
1755 out:
1756         spin_unlock_irqrestore(&dev->lock, flags);
1757
1758         return ret;
1759 }
1760
1761 /* Helper for gr_set_halt and gr_set_wedge */
1762 static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
1763 {
1764         int ret;
1765         struct gr_ep *ep;
1766
1767         if (!_ep)
1768                 return -ENODEV;
1769         ep = container_of(_ep, struct gr_ep, ep);
1770
1771         spin_lock(&ep->dev->lock);
1772
1773         /* Halting an IN endpoint should fail if queue is not empty */
1774         if (halt && ep->is_in && !list_empty(&ep->queue)) {
1775                 ret = -EAGAIN;
1776                 goto out;
1777         }
1778
1779         ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1780
1781 out:
1782         spin_unlock(&ep->dev->lock);
1783
1784         return ret;
1785 }
1786
1787 /* Halt endpoint */
1788 static int gr_set_halt(struct usb_ep *_ep, int halt)
1789 {
1790         return gr_set_halt_wedge(_ep, halt, 0);
1791 }
1792
1793 /* Halt and wedge endpoint */
1794 static int gr_set_wedge(struct usb_ep *_ep)
1795 {
1796         return gr_set_halt_wedge(_ep, 1, 1);
1797 }
1798
1799 /*
1800  * Return the total number of bytes currently stored in the internal buffers of
1801  * the endpoint.
1802  */
1803 static int gr_fifo_status(struct usb_ep *_ep)
1804 {
1805         struct gr_ep *ep;
1806         u32 epstat;
1807         u32 bytes = 0;
1808
1809         if (!_ep)
1810                 return -ENODEV;
1811         ep = container_of(_ep, struct gr_ep, ep);
1812
1813         epstat = gr_read32(&ep->regs->epstat);
1814
1815         if (epstat & GR_EPSTAT_B0)
1816                 bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
1817         if (epstat & GR_EPSTAT_B1)
1818                 bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
1819
1820         return bytes;
1821 }
1822
1823
1824 /* Empty data from internal buffers of an endpoint. */
1825 static void gr_fifo_flush(struct usb_ep *_ep)
1826 {
1827         struct gr_ep *ep;
1828         u32 epctrl;
1829
1830         if (!_ep)
1831                 return;
1832         ep = container_of(_ep, struct gr_ep, ep);
1833         dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1834
1835         spin_lock(&ep->dev->lock);
1836
1837         epctrl = gr_read32(&ep->regs->epctrl);
1838         epctrl |= GR_EPCTRL_CB;
1839         gr_write32(&ep->regs->epctrl, epctrl);
1840
1841         spin_unlock(&ep->dev->lock);
1842 }
1843
1844 static struct usb_ep_ops gr_ep_ops = {
1845         .enable         = gr_ep_enable,
1846         .disable        = gr_ep_disable,
1847
1848         .alloc_request  = gr_alloc_request,
1849         .free_request   = gr_free_request,
1850
1851         .queue          = gr_queue_ext,
1852         .dequeue        = gr_dequeue,
1853
1854         .set_halt       = gr_set_halt,
1855         .set_wedge      = gr_set_wedge,
1856         .fifo_status    = gr_fifo_status,
1857         .fifo_flush     = gr_fifo_flush,
1858 };
1859
1860 /* ---------------------------------------------------------------------- */
1861 /* USB Gadget ops */
1862
1863 static int gr_get_frame(struct usb_gadget *_gadget)
1864 {
1865         struct gr_udc *dev;
1866
1867         if (!_gadget)
1868                 return -ENODEV;
1869         dev = container_of(_gadget, struct gr_udc, gadget);
1870         return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
1871 }
1872
1873 static int gr_wakeup(struct usb_gadget *_gadget)
1874 {
1875         struct gr_udc *dev;
1876
1877         if (!_gadget)
1878                 return -ENODEV;
1879         dev = container_of(_gadget, struct gr_udc, gadget);
1880
1881         /* Remote wakeup feature not enabled by host*/
1882         if (!dev->remote_wakeup)
1883                 return -EINVAL;
1884
1885         spin_lock(&dev->lock);
1886
1887         gr_write32(&dev->regs->control,
1888                    gr_read32(&dev->regs->control) | GR_CONTROL_RW);
1889
1890         spin_unlock(&dev->lock);
1891
1892         return 0;
1893 }
1894
1895 static int gr_pullup(struct usb_gadget *_gadget, int is_on)
1896 {
1897         struct gr_udc *dev;
1898         u32 control;
1899
1900         if (!_gadget)
1901                 return -ENODEV;
1902         dev = container_of(_gadget, struct gr_udc, gadget);
1903
1904         spin_lock(&dev->lock);
1905
1906         control = gr_read32(&dev->regs->control);
1907         if (is_on)
1908                 control |= GR_CONTROL_EP;
1909         else
1910                 control &= ~GR_CONTROL_EP;
1911         gr_write32(&dev->regs->control, control);
1912
1913         spin_unlock(&dev->lock);
1914
1915         return 0;
1916 }
1917
1918 static int gr_udc_start(struct usb_gadget *gadget,
1919                         struct usb_gadget_driver *driver)
1920 {
1921         struct gr_udc *dev = to_gr_udc(gadget);
1922
1923         spin_lock(&dev->lock);
1924
1925         /* Hook up the driver */
1926         driver->driver.bus = NULL;
1927         dev->driver = driver;
1928
1929         /* Get ready for host detection */
1930         gr_enable_vbus_detect(dev);
1931
1932         spin_unlock(&dev->lock);
1933
1934         return 0;
1935 }
1936
1937 static int gr_udc_stop(struct usb_gadget *gadget)
1938 {
1939         struct gr_udc *dev = to_gr_udc(gadget);
1940         unsigned long flags;
1941
1942         spin_lock_irqsave(&dev->lock, flags);
1943
1944         dev->driver = NULL;
1945         gr_stop_activity(dev);
1946
1947         spin_unlock_irqrestore(&dev->lock, flags);
1948
1949         return 0;
1950 }
1951
1952 static const struct usb_gadget_ops gr_ops = {
1953         .get_frame      = gr_get_frame,
1954         .wakeup         = gr_wakeup,
1955         .pullup         = gr_pullup,
1956         .udc_start      = gr_udc_start,
1957         .udc_stop       = gr_udc_stop,
1958         /* Other operations not supported */
1959 };
1960
1961 /* ---------------------------------------------------------------------- */
1962 /* Module probe, removal and of-matching */
1963
1964 static const char * const onames[] = {
1965         "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1966         "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1967         "ep12out", "ep13out", "ep14out", "ep15out"
1968 };
1969
1970 static const char * const inames[] = {
1971         "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1972         "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1973         "ep12in", "ep13in", "ep14in", "ep15in"
1974 };
1975
1976 /* Must be called with dev->lock held */
1977 static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
1978 {
1979         struct gr_ep *ep;
1980         struct gr_request *req;
1981         struct usb_request *_req;
1982         void *buf;
1983
1984         if (is_in) {
1985                 ep = &dev->epi[num];
1986                 ep->ep.name = inames[num];
1987                 ep->regs = &dev->regs->epi[num];
1988         } else {
1989                 ep = &dev->epo[num];
1990                 ep->ep.name = onames[num];
1991                 ep->regs = &dev->regs->epo[num];
1992         }
1993
1994         gr_ep_reset(ep);
1995         ep->num = num;
1996         ep->is_in = is_in;
1997         ep->dev = dev;
1998         ep->ep.ops = &gr_ep_ops;
1999         INIT_LIST_HEAD(&ep->queue);
2000
2001         if (num == 0) {
2002                 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
2003                 if (!_req)
2004                         return -ENOMEM;
2005
2006                 buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
2007                 if (!buf) {
2008                         gr_free_request(&ep->ep, _req);
2009                         return -ENOMEM;
2010                 }
2011
2012                 req = container_of(_req, struct gr_request, req);
2013                 req->req.buf = buf;
2014                 req->req.length = MAX_CTRL_PL_SIZE;
2015
2016                 if (is_in)
2017                         dev->ep0reqi = req; /* Complete gets set as used */
2018                 else
2019                         dev->ep0reqo = req; /* Completion treated separately */
2020
2021                 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2022                 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2023
2024                 ep->ep.caps.type_control = true;
2025         } else {
2026                 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2027                 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2028
2029                 ep->ep.caps.type_iso = true;
2030                 ep->ep.caps.type_bulk = true;
2031                 ep->ep.caps.type_int = true;
2032         }
2033         list_add_tail(&ep->ep_list, &dev->ep_list);
2034
2035         if (is_in)
2036                 ep->ep.caps.dir_in = true;
2037         else
2038                 ep->ep.caps.dir_out = true;
2039
2040         ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
2041                                          &ep->tailbuf_paddr, GFP_ATOMIC);
2042         if (!ep->tailbuf)
2043                 return -ENOMEM;
2044
2045         return 0;
2046 }
2047
2048 /* Must be called with dev->lock held */
2049 static int gr_udc_init(struct gr_udc *dev)
2050 {
2051         struct device_node *np = dev->dev->of_node;
2052         u32 epctrl_val;
2053         u32 dmactrl_val;
2054         int i;
2055         int ret = 0;
2056         u32 bufsize;
2057
2058         gr_set_address(dev, 0);
2059
2060         INIT_LIST_HEAD(&dev->gadget.ep_list);
2061         dev->gadget.speed = USB_SPEED_UNKNOWN;
2062         dev->gadget.ep0 = &dev->epi[0].ep;
2063
2064         INIT_LIST_HEAD(&dev->ep_list);
2065         gr_set_ep0state(dev, GR_EP0_DISCONNECT);
2066
2067         for (i = 0; i < dev->nepo; i++) {
2068                 if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
2069                         bufsize = 1024;
2070                 ret = gr_ep_init(dev, i, 0, bufsize);
2071                 if (ret)
2072                         return ret;
2073         }
2074
2075         for (i = 0; i < dev->nepi; i++) {
2076                 if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
2077                         bufsize = 1024;
2078                 ret = gr_ep_init(dev, i, 1, bufsize);
2079                 if (ret)
2080                         return ret;
2081         }
2082
2083         /* Must be disabled by default */
2084         dev->remote_wakeup = 0;
2085
2086         /* Enable ep0out and ep0in */
2087         epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
2088         dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
2089         gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
2090         gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
2091         gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
2092         gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
2093
2094         return 0;
2095 }
2096
2097 static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
2098 {
2099         struct gr_ep *ep;
2100
2101         if (is_in)
2102                 ep = &dev->epi[num];
2103         else
2104                 ep = &dev->epo[num];
2105
2106         if (ep->tailbuf)
2107                 dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
2108                                   ep->tailbuf, ep->tailbuf_paddr);
2109 }
2110
2111 static int gr_remove(struct platform_device *pdev)
2112 {
2113         struct gr_udc *dev = platform_get_drvdata(pdev);
2114         int i;
2115
2116         if (dev->added)
2117                 usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
2118         if (dev->driver)
2119                 return -EBUSY;
2120
2121         gr_dfs_delete(dev);
2122         dma_pool_destroy(dev->desc_pool);
2123         platform_set_drvdata(pdev, NULL);
2124
2125         gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2126         gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
2127
2128         for (i = 0; i < dev->nepo; i++)
2129                 gr_ep_remove(dev, i, 0);
2130         for (i = 0; i < dev->nepi; i++)
2131                 gr_ep_remove(dev, i, 1);
2132
2133         return 0;
2134 }
2135 static int gr_request_irq(struct gr_udc *dev, int irq)
2136 {
2137         return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
2138                                          IRQF_SHARED, driver_name, dev);
2139 }
2140
2141 static int gr_probe(struct platform_device *pdev)
2142 {
2143         struct gr_udc *dev;
2144         struct resource *res;
2145         struct gr_regs __iomem *regs;
2146         int retval;
2147         u32 status;
2148
2149         dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2150         if (!dev)
2151                 return -ENOMEM;
2152         dev->dev = &pdev->dev;
2153
2154         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2155         regs = devm_ioremap_resource(dev->dev, res);
2156         if (IS_ERR(regs))
2157                 return PTR_ERR(regs);
2158
2159         dev->irq = platform_get_irq(pdev, 0);
2160         if (dev->irq <= 0) {
2161                 dev_err(dev->dev, "No irq found\n");
2162                 return -ENODEV;
2163         }
2164
2165         /* Some core configurations has separate irqs for IN and OUT events */
2166         dev->irqi = platform_get_irq(pdev, 1);
2167         if (dev->irqi > 0) {
2168                 dev->irqo = platform_get_irq(pdev, 2);
2169                 if (dev->irqo <= 0) {
2170                         dev_err(dev->dev, "Found irqi but not irqo\n");
2171                         return -ENODEV;
2172                 }
2173         } else {
2174                 dev->irqi = 0;
2175         }
2176
2177         dev->gadget.name = driver_name;
2178         dev->gadget.max_speed = USB_SPEED_HIGH;
2179         dev->gadget.ops = &gr_ops;
2180
2181         spin_lock_init(&dev->lock);
2182         dev->regs = regs;
2183
2184         platform_set_drvdata(pdev, dev);
2185
2186         /* Determine number of endpoints and data interface mode */
2187         status = gr_read32(&dev->regs->status);
2188         dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
2189         dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
2190
2191         if (!(status & GR_STATUS_DM)) {
2192                 dev_err(dev->dev, "Slave mode cores are not supported\n");
2193                 return -ENODEV;
2194         }
2195
2196         /* --- Effects of the following calls might need explicit cleanup --- */
2197
2198         /* Create DMA pool for descriptors */
2199         dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
2200                                          sizeof(struct gr_dma_desc), 4, 0);
2201         if (!dev->desc_pool) {
2202                 dev_err(dev->dev, "Could not allocate DMA pool");
2203                 return -ENOMEM;
2204         }
2205
2206         /* Inside lock so that no gadget can use this udc until probe is done */
2207         retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
2208         if (retval) {
2209                 dev_err(dev->dev, "Could not add gadget udc");
2210                 goto out;
2211         }
2212         dev->added = 1;
2213
2214         spin_lock(&dev->lock);
2215
2216         retval = gr_udc_init(dev);
2217         if (retval) {
2218                 spin_unlock(&dev->lock);
2219                 goto out;
2220         }
2221
2222         /* Clear all interrupt enables that might be left on since last boot */
2223         gr_disable_interrupts_and_pullup(dev);
2224
2225         spin_unlock(&dev->lock);
2226
2227         gr_dfs_create(dev);
2228
2229         retval = gr_request_irq(dev, dev->irq);
2230         if (retval) {
2231                 dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
2232                 goto out;
2233         }
2234
2235         if (dev->irqi) {
2236                 retval = gr_request_irq(dev, dev->irqi);
2237                 if (retval) {
2238                         dev_err(dev->dev, "Failed to request irqi %d\n",
2239                                 dev->irqi);
2240                         goto out;
2241                 }
2242                 retval = gr_request_irq(dev, dev->irqo);
2243                 if (retval) {
2244                         dev_err(dev->dev, "Failed to request irqo %d\n",
2245                                 dev->irqo);
2246                         goto out;
2247                 }
2248         }
2249
2250         if (dev->irqi)
2251                 dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
2252                          dev->irq, dev->irqi, dev->irqo);
2253         else
2254                 dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
2255
2256 out:
2257         if (retval)
2258                 gr_remove(pdev);
2259
2260         return retval;
2261 }
2262
2263 static const struct of_device_id gr_match[] = {
2264         {.name = "GAISLER_USBDC"},
2265         {.name = "01_021"},
2266         {},
2267 };
2268 MODULE_DEVICE_TABLE(of, gr_match);
2269
2270 static struct platform_driver gr_driver = {
2271         .driver = {
2272                 .name = DRIVER_NAME,
2273                 .of_match_table = gr_match,
2274         },
2275         .probe = gr_probe,
2276         .remove = gr_remove,
2277 };
2278 module_platform_driver(gr_driver);
2279
2280 MODULE_AUTHOR("Aeroflex Gaisler AB.");
2281 MODULE_DESCRIPTION(DRIVER_DESC);
2282 MODULE_LICENSE("GPL");