1 // SPDX-License-Identifier: GPL-2.0+
3 * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
5 * 2013 (c) Aeroflex Gaisler AB
7 * This driver supports GRUSBDC USB Device Controller cores available in the
8 * GRLIB VHDL IP core library.
10 * Full documentation of the GRUSBDC core can be found here:
11 * http://www.gaisler.com/products/grlib/grip.pdf
14 * - Andreas Larsson <andreas@gaisler.com>
19 * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
20 * individually configurable to any of the four USB transfer types. This driver
21 * only supports cores in DMA mode.
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/list.h>
30 #include <linux/interrupt.h>
31 #include <linux/device.h>
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dmapool.h>
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #include <linux/of_platform.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_address.h>
42 #include <asm/byteorder.h>
46 #define DRIVER_NAME "gr_udc"
47 #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
49 static const char driver_name[] = DRIVER_NAME;
50 static const char driver_desc[] = DRIVER_DESC;
52 #define gr_read32(x) (ioread32be((x)))
53 #define gr_write32(x, v) (iowrite32be((v), (x)))
55 /* USB speed and corresponding string calculated from status register value */
56 #define GR_SPEED(status) \
57 ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
58 #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
60 /* Size of hardware buffer calculated from epctrl register value */
61 #define GR_BUFFER_SIZE(epctrl) \
62 ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
63 GR_EPCTRL_BUFSZ_SCALER)
65 /* ---------------------------------------------------------------------- */
66 /* Debug printout functionality */
68 static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
70 static const char *gr_ep0state_string(enum gr_ep0state state)
72 static const char *const names[] = {
73 [GR_EP0_DISCONNECT] = "disconnect",
74 [GR_EP0_SETUP] = "setup",
75 [GR_EP0_IDATA] = "idata",
76 [GR_EP0_ODATA] = "odata",
77 [GR_EP0_ISTATUS] = "istatus",
78 [GR_EP0_OSTATUS] = "ostatus",
79 [GR_EP0_STALL] = "stall",
80 [GR_EP0_SUSPEND] = "suspend",
83 if (state < 0 || state >= ARRAY_SIZE(names))
91 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
92 struct gr_request *req)
94 int buflen = ep->is_in ? req->req.length : req->req.actual;
96 int plen = min(rowlen, buflen);
98 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
99 (buflen > plen ? " (truncated)" : ""));
100 print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
101 rowlen, 4, req->req.buf, plen, false);
104 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
105 u16 value, u16 index, u16 length)
107 dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
108 type, request, value, index, length);
110 #else /* !VERBOSE_DEBUG */
112 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
113 struct gr_request *req) {}
115 static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
116 u16 value, u16 index, u16 length) {}
118 #endif /* VERBOSE_DEBUG */
120 /* ---------------------------------------------------------------------- */
121 /* Debugfs functionality */
123 #ifdef CONFIG_USB_GADGET_DEBUG_FS
125 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
127 u32 epctrl = gr_read32(&ep->regs->epctrl);
128 u32 epstat = gr_read32(&ep->regs->epstat);
129 int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
130 struct gr_request *req;
132 seq_printf(seq, "%s:\n", ep->ep.name);
133 seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
134 seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
135 seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
136 seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
137 seq_printf(seq, " dma_start = %d\n", ep->dma_start);
138 seq_printf(seq, " stopped = %d\n", ep->stopped);
139 seq_printf(seq, " wedged = %d\n", ep->wedged);
140 seq_printf(seq, " callback = %d\n", ep->callback);
141 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
142 seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
143 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
144 if (mode == 1 || mode == 3)
145 seq_printf(seq, " nt = %d\n",
146 (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
148 seq_printf(seq, " Buffer 0: %s %s%d\n",
149 epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
150 epstat & GR_EPSTAT_BS ? " " : "selected ",
151 (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
152 seq_printf(seq, " Buffer 1: %s %s%d\n",
153 epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
154 epstat & GR_EPSTAT_BS ? "selected " : " ",
155 (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
157 if (list_empty(&ep->queue)) {
158 seq_puts(seq, " Queue: empty\n\n");
162 seq_puts(seq, " Queue:\n");
163 list_for_each_entry(req, &ep->queue, queue) {
164 struct gr_dma_desc *desc;
165 struct gr_dma_desc *next;
167 seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
168 &req->req.buf, req->req.actual, req->req.length);
170 next = req->first_desc;
173 next = desc->next_desc;
174 seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
175 desc == req->curr_desc ? 'c' : ' ',
176 desc, desc->paddr, desc->ctrl, desc->data);
177 } while (desc != req->last_desc);
182 static int gr_dfs_show(struct seq_file *seq, void *v)
184 struct gr_udc *dev = seq->private;
185 u32 control = gr_read32(&dev->regs->control);
186 u32 status = gr_read32(&dev->regs->status);
189 seq_printf(seq, "usb state = %s\n",
190 usb_state_string(dev->gadget.state));
191 seq_printf(seq, "address = %d\n",
192 (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
193 seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
194 seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
195 seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
196 seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
197 seq_printf(seq, "test_mode = %d\n", dev->test_mode);
200 list_for_each_entry(ep, &dev->ep_list, ep_list)
201 gr_seq_ep_show(seq, ep);
205 DEFINE_SHOW_ATTRIBUTE(gr_dfs);
207 static void gr_dfs_create(struct gr_udc *dev)
209 const char *name = "gr_udc_state";
211 dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
212 debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
215 static void gr_dfs_delete(struct gr_udc *dev)
217 debugfs_remove_recursive(dev->dfs_root);
220 #else /* !CONFIG_USB_GADGET_DEBUG_FS */
222 static void gr_dfs_create(struct gr_udc *dev) {}
223 static void gr_dfs_delete(struct gr_udc *dev) {}
225 #endif /* CONFIG_USB_GADGET_DEBUG_FS */
227 /* ---------------------------------------------------------------------- */
228 /* DMA and request handling */
230 /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
231 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
234 struct gr_dma_desc *dma_desc;
236 dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
238 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
242 dma_desc->paddr = paddr;
247 static inline void gr_free_dma_desc(struct gr_udc *dev,
248 struct gr_dma_desc *desc)
250 dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
253 /* Frees the chain of struct gr_dma_desc for the given request */
254 static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
256 struct gr_dma_desc *desc;
257 struct gr_dma_desc *next;
259 next = req->first_desc;
265 next = desc->next_desc;
266 gr_free_dma_desc(dev, desc);
267 } while (desc != req->last_desc);
269 req->first_desc = NULL;
270 req->curr_desc = NULL;
271 req->last_desc = NULL;
274 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
277 * Frees allocated resources and calls the appropriate completion function/setup
278 * package handler for a finished request.
280 * Must be called with dev->lock held and irqs disabled.
282 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
284 __releases(&dev->lock)
285 __acquires(&dev->lock)
289 list_del_init(&req->queue);
291 if (likely(req->req.status == -EINPROGRESS))
292 req->req.status = status;
294 status = req->req.status;
297 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
298 gr_free_dma_desc_chain(dev, req);
300 if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
301 req->req.actual = req->req.length;
302 } else if (req->oddlen && req->req.actual > req->evenlen) {
304 * Copy to user buffer in this case where length was not evenly
305 * divisible by ep->ep.maxpacket and the last descriptor was
308 char *buftail = ((char *)req->req.buf + req->evenlen);
310 memcpy(buftail, ep->tailbuf, req->oddlen);
312 if (req->req.actual > req->req.length) {
313 /* We got more data than was requested */
314 dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
316 gr_dbgprint_request("OVFL", ep, req);
317 req->req.status = -EOVERFLOW;
323 gr_dbgprint_request("SENT", ep, req);
325 gr_dbgprint_request("RECV", ep, req);
328 /* Prevent changes to ep->queue during callback */
330 if (req == dev->ep0reqo && !status) {
332 gr_ep0_setup(dev, req);
335 "Unexpected non setup packet on ep0in\n");
336 } else if (req->req.complete) {
337 spin_unlock(&dev->lock);
339 usb_gadget_giveback_request(&ep->ep, &req->req);
341 spin_lock(&dev->lock);
346 static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
348 struct gr_request *req;
350 req = kzalloc(sizeof(*req), gfp_flags);
354 INIT_LIST_HEAD(&req->queue);
360 * Starts DMA for endpoint ep if there are requests in the queue.
362 * Must be called with dev->lock held and with !ep->stopped.
364 static void gr_start_dma(struct gr_ep *ep)
366 struct gr_request *req;
369 if (list_empty(&ep->queue)) {
374 req = list_first_entry(&ep->queue, struct gr_request, queue);
376 /* A descriptor should already have been allocated */
377 BUG_ON(!req->curr_desc);
380 * The DMA controller can not handle smaller OUT buffers than
381 * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
382 * long packet are received. Therefore an internal bounce buffer gets
383 * used when such a request gets enabled.
385 if (!ep->is_in && req->oddlen)
386 req->last_desc->data = ep->tailbuf_paddr;
388 wmb(); /* Make sure all is settled before handing it over to DMA */
390 /* Set the descriptor pointer in the hardware */
391 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
393 /* Announce available descriptors */
394 dmactrl = gr_read32(&ep->regs->dmactrl);
395 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
401 * Finishes the first request in the ep's queue and, if available, starts the
402 * next request in queue.
404 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
406 static void gr_dma_advance(struct gr_ep *ep, int status)
408 struct gr_request *req;
410 req = list_first_entry(&ep->queue, struct gr_request, queue);
411 gr_finish_request(ep, req, status);
412 gr_start_dma(ep); /* Regardless of ep->dma_start */
416 * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
417 * transfer to be canceled and clears GR_DMACTRL_DA.
419 * Must be called with dev->lock held.
421 static void gr_abort_dma(struct gr_ep *ep)
425 dmactrl = gr_read32(&ep->regs->dmactrl);
426 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
430 * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
433 * Size is not used for OUT endpoints. Hardware can not be instructed to handle
434 * smaller buffer than MAXPL in the OUT direction.
436 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
437 dma_addr_t data, unsigned size, gfp_t gfp_flags)
439 struct gr_dma_desc *desc;
441 desc = gr_alloc_dma_desc(ep, gfp_flags);
448 (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
450 desc->ctrl = GR_DESC_OUT_CTRL_IE;
452 if (!req->first_desc) {
453 req->first_desc = desc;
454 req->curr_desc = desc;
456 req->last_desc->next_desc = desc;
457 req->last_desc->next = desc->paddr;
458 req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
460 req->last_desc = desc;
466 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
467 * together covers req->req.length bytes of the buffer at DMA address
468 * req->req.dma for the OUT direction.
470 * The first descriptor in the chain is enabled, the rest disabled. The
471 * interrupt handler will later enable them one by one when needed so we can
472 * find out when the transfer is finished. For OUT endpoints, all descriptors
473 * therefore generate interrutps.
475 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
478 u16 bytes_left; /* Bytes left to provide descriptors for */
479 u16 bytes_used; /* Bytes accommodated for */
482 req->first_desc = NULL; /* Signals that no allocation is done yet */
483 bytes_left = req->req.length;
485 while (bytes_left > 0) {
486 dma_addr_t start = req->req.dma + bytes_used;
487 u16 size = min(bytes_left, ep->bytes_per_buffer);
489 if (size < ep->bytes_per_buffer) {
490 /* Prepare using bounce buffer */
491 req->evenlen = req->req.length - bytes_left;
495 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
503 req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
508 gr_free_dma_desc_chain(ep->dev, req);
514 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
515 * together covers req->req.length bytes of the buffer at DMA address
516 * req->req.dma for the IN direction.
518 * When more data is provided than the maximum payload size, the hardware splits
519 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
520 * is always set to a multiple of the maximum payload (restricted to the valid
521 * number of maximum payloads during high bandwidth isochronous or interrupt
524 * All descriptors are enabled from the beginning and we only generate an
525 * interrupt for the last one indicating that the entire request has been pushed
528 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
531 u16 bytes_left; /* Bytes left in req to provide descriptors for */
532 u16 bytes_used; /* Bytes in req accommodated for */
535 req->first_desc = NULL; /* Signals that no allocation is done yet */
536 bytes_left = req->req.length;
538 do { /* Allow for zero length packets */
539 dma_addr_t start = req->req.dma + bytes_used;
540 u16 size = min(bytes_left, ep->bytes_per_buffer);
542 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
548 } while (bytes_left > 0);
551 * Send an extra zero length packet to indicate that no more data is
552 * available when req->req.zero is set and the data length is even
553 * multiples of ep->ep.maxpacket.
555 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
556 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
562 * For IN packets we only want to know when the last packet has been
563 * transmitted (not just put into internal buffers).
565 req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
570 gr_free_dma_desc_chain(ep->dev, req);
575 /* Must be called with dev->lock held */
576 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
578 struct gr_udc *dev = ep->dev;
581 if (unlikely(!ep->ep.desc && ep->num != 0)) {
582 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
586 if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
588 "Invalid request for %s: buf=%p list_empty=%d\n",
589 ep->ep.name, req->req.buf, list_empty(&req->queue));
593 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
594 dev_err(dev->dev, "-ESHUTDOWN");
598 /* Can't touch registers when suspended */
599 if (dev->ep0state == GR_EP0_SUSPEND) {
600 dev_err(dev->dev, "-EBUSY");
604 /* Set up DMA mapping in case the caller didn't */
605 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
607 dev_err(dev->dev, "usb_gadget_map_request");
612 ret = gr_setup_in_desc_list(ep, req, gfp_flags);
614 ret = gr_setup_out_desc_list(ep, req, gfp_flags);
618 req->req.status = -EINPROGRESS;
620 list_add_tail(&req->queue, &ep->queue);
622 /* Start DMA if not started, otherwise interrupt handler handles it */
623 if (!ep->dma_start && likely(!ep->stopped))
630 * Queue a request from within the driver.
632 * Must be called with dev->lock held.
634 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
638 gr_dbgprint_request("RESP", ep, req);
640 return gr_queue(ep, req, gfp_flags);
643 /* ---------------------------------------------------------------------- */
644 /* General helper functions */
647 * Dequeue ALL requests.
649 * Must be called with dev->lock held and irqs disabled.
651 static void gr_ep_nuke(struct gr_ep *ep)
653 struct gr_request *req;
659 while (!list_empty(&ep->queue)) {
660 req = list_first_entry(&ep->queue, struct gr_request, queue);
661 gr_finish_request(ep, req, -ESHUTDOWN);
666 * Reset the hardware state of this endpoint.
668 * Must be called with dev->lock held.
670 static void gr_ep_reset(struct gr_ep *ep)
672 gr_write32(&ep->regs->epctrl, 0);
673 gr_write32(&ep->regs->dmactrl, 0);
675 ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
682 * Generate STALL on ep0in/out.
684 * Must be called with dev->lock held.
686 static void gr_control_stall(struct gr_udc *dev)
690 epctrl = gr_read32(&dev->epo[0].regs->epctrl);
691 gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
692 epctrl = gr_read32(&dev->epi[0].regs->epctrl);
693 gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
695 dev->ep0state = GR_EP0_STALL;
699 * Halts, halts and wedges, or clears halt for an endpoint.
701 * Must be called with dev->lock held.
703 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
708 if (ep->num && !ep->ep.desc)
711 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
714 /* Never actually halt ep0, and therefore never clear halt for ep0 */
716 if (halt && !fromhost) {
717 /* ep0 halt from gadget - generate protocol stall */
718 gr_control_stall(ep->dev);
719 dev_dbg(ep->dev->dev, "EP: stall ep0\n");
725 dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
726 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
728 epctrl = gr_read32(&ep->regs->epctrl);
731 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
736 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
740 /* Things might have been queued up in the meantime */
748 /* Must be called with dev->lock held */
749 static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
751 if (dev->ep0state != value)
752 dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
753 gr_ep0state_string(value));
754 dev->ep0state = value;
758 * Should only be called when endpoints can not generate interrupts.
760 * Must be called with dev->lock held.
762 static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
764 gr_write32(&dev->regs->control, 0);
765 wmb(); /* Make sure that we do not deny one of our interrupts */
766 dev->irq_enabled = 0;
770 * Stop all device activity and disable data line pullup.
772 * Must be called with dev->lock held and irqs disabled.
774 static void gr_stop_activity(struct gr_udc *dev)
778 list_for_each_entry(ep, &dev->ep_list, ep_list)
781 gr_disable_interrupts_and_pullup(dev);
783 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
784 usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
787 /* ---------------------------------------------------------------------- */
788 /* ep0 setup packet handling */
790 static void gr_ep0_testmode_complete(struct usb_ep *_ep,
791 struct usb_request *_req)
797 ep = container_of(_ep, struct gr_ep, ep);
800 spin_lock(&dev->lock);
802 control = gr_read32(&dev->regs->control);
803 control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
804 gr_write32(&dev->regs->control, control);
806 spin_unlock(&dev->lock);
809 static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
811 /* Nothing needs to be done here */
815 * Queue a response on ep0in.
817 * Must be called with dev->lock held.
819 static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
820 void (*complete)(struct usb_ep *ep,
821 struct usb_request *req))
823 u8 *reqbuf = dev->ep0reqi->req.buf;
827 for (i = 0; i < length; i++)
829 dev->ep0reqi->req.length = length;
830 dev->ep0reqi->req.complete = complete;
832 status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
835 "Could not queue ep0in setup response: %d\n", status);
841 * Queue a 2 byte response on ep0in.
843 * Must be called with dev->lock held.
845 static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
847 __le16 le_response = cpu_to_le16(response);
849 return gr_ep0_respond(dev, (u8 *)&le_response, 2,
850 gr_ep0_dummy_complete);
854 * Queue a ZLP response on ep0in.
856 * Must be called with dev->lock held.
858 static inline int gr_ep0_respond_empty(struct gr_udc *dev)
860 return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
864 * This is run when a SET_ADDRESS request is received. First writes
865 * the new address to the control register which is updated internally
866 * when the next IN packet is ACKED.
868 * Must be called with dev->lock held.
870 static void gr_set_address(struct gr_udc *dev, u8 address)
874 control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
875 control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
876 control |= GR_CONTROL_SU;
877 gr_write32(&dev->regs->control, control);
881 * Returns negative for STALL, 0 for successful handling and positive for
884 * Must be called with dev->lock held.
886 static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
887 u16 value, u16 index)
893 case USB_REQ_SET_ADDRESS:
894 dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
895 gr_set_address(dev, value & 0xff);
897 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
899 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
900 return gr_ep0_respond_empty(dev);
902 case USB_REQ_GET_STATUS:
903 /* Self powered | remote wakeup */
904 response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
905 return gr_ep0_respond_u16(dev, response);
907 case USB_REQ_SET_FEATURE:
909 case USB_DEVICE_REMOTE_WAKEUP:
910 /* Allow remote wakeup */
911 dev->remote_wakeup = 1;
912 return gr_ep0_respond_empty(dev);
914 case USB_DEVICE_TEST_MODE:
915 /* The hardware does not support TEST_FORCE_EN */
917 if (test >= TEST_J && test <= TEST_PACKET) {
918 dev->test_mode = test;
919 return gr_ep0_respond(dev, NULL, 0,
920 gr_ep0_testmode_complete);
925 case USB_REQ_CLEAR_FEATURE:
927 case USB_DEVICE_REMOTE_WAKEUP:
928 /* Disallow remote wakeup */
929 dev->remote_wakeup = 0;
930 return gr_ep0_respond_empty(dev);
935 return 1; /* Delegate the rest */
939 * Returns negative for STALL, 0 for successful handling and positive for
942 * Must be called with dev->lock held.
944 static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
945 u16 value, u16 index)
947 if (dev->gadget.state != USB_STATE_CONFIGURED)
951 * Should return STALL for invalid interfaces, but udc driver does not
952 * know anything about that. However, many gadget drivers do not handle
953 * GET_STATUS so we need to take care of that.
957 case USB_REQ_GET_STATUS:
958 return gr_ep0_respond_u16(dev, 0x0000);
960 case USB_REQ_SET_FEATURE:
961 case USB_REQ_CLEAR_FEATURE:
963 * No possible valid standard requests. Still let gadget drivers
969 return 1; /* Delegate the rest */
973 * Returns negative for STALL, 0 for successful handling and positive for
976 * Must be called with dev->lock held.
978 static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
979 u16 value, u16 index)
984 u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
985 u8 is_in = index & USB_ENDPOINT_DIR_MASK;
987 if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
990 if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
993 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
996 case USB_REQ_GET_STATUS:
997 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
998 return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
1000 case USB_REQ_SET_FEATURE:
1002 case USB_ENDPOINT_HALT:
1003 status = gr_ep_halt_wedge(ep, 1, 0, 1);
1005 status = gr_ep0_respond_empty(dev);
1010 case USB_REQ_CLEAR_FEATURE:
1012 case USB_ENDPOINT_HALT:
1015 status = gr_ep_halt_wedge(ep, 0, 0, 1);
1017 status = gr_ep0_respond_empty(dev);
1023 return 1; /* Delegate the rest */
1026 /* Must be called with dev->lock held */
1027 static void gr_ep0out_requeue(struct gr_udc *dev)
1029 int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
1032 dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
1037 * The main function dealing with setup requests on ep0.
1039 * Must be called with dev->lock held and irqs disabled
1041 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1042 __releases(&dev->lock)
1043 __acquires(&dev->lock)
1046 struct usb_ctrlrequest ctrl;
1058 /* Restore from ep0 halt */
1059 if (dev->ep0state == GR_EP0_STALL) {
1060 gr_set_ep0state(dev, GR_EP0_SETUP);
1061 if (!req->req.actual)
1065 if (dev->ep0state == GR_EP0_ISTATUS) {
1066 gr_set_ep0state(dev, GR_EP0_SETUP);
1067 if (req->req.actual > 0)
1069 "Unexpected setup packet at state %s\n",
1070 gr_ep0state_string(GR_EP0_ISTATUS));
1072 goto out; /* Got expected ZLP */
1073 } else if (dev->ep0state != GR_EP0_SETUP) {
1075 "Unexpected ep0out request at state %s - stalling\n",
1076 gr_ep0state_string(dev->ep0state));
1077 gr_control_stall(dev);
1078 gr_set_ep0state(dev, GR_EP0_SETUP);
1080 } else if (!req->req.actual) {
1081 dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
1082 gr_ep0state_string(dev->ep0state));
1086 /* Handle SETUP packet */
1087 for (i = 0; i < req->req.actual; i++)
1088 u.raw[i] = ((u8 *)req->req.buf)[i];
1090 type = u.ctrl.bRequestType;
1091 request = u.ctrl.bRequest;
1092 value = le16_to_cpu(u.ctrl.wValue);
1093 index = le16_to_cpu(u.ctrl.wIndex);
1094 length = le16_to_cpu(u.ctrl.wLength);
1096 gr_dbgprint_devreq(dev, type, request, value, index, length);
1098 /* Check for data stage */
1100 if (type & USB_DIR_IN)
1101 gr_set_ep0state(dev, GR_EP0_IDATA);
1103 gr_set_ep0state(dev, GR_EP0_ODATA);
1106 status = 1; /* Positive status flags delegation */
1107 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1108 switch (type & USB_RECIP_MASK) {
1109 case USB_RECIP_DEVICE:
1110 status = gr_device_request(dev, type, request,
1113 case USB_RECIP_ENDPOINT:
1114 status = gr_endpoint_request(dev, type, request,
1117 case USB_RECIP_INTERFACE:
1118 status = gr_interface_request(dev, type, request,
1125 spin_unlock(&dev->lock);
1127 dev_vdbg(dev->dev, "DELEGATE\n");
1128 status = dev->driver->setup(&dev->gadget, &u.ctrl);
1130 spin_lock(&dev->lock);
1133 /* Generate STALL on both ep0out and ep0in if requested */
1134 if (unlikely(status < 0)) {
1135 dev_vdbg(dev->dev, "STALL\n");
1136 gr_control_stall(dev);
1139 if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
1140 request == USB_REQ_SET_CONFIGURATION) {
1142 dev_dbg(dev->dev, "STATUS: deconfigured\n");
1143 usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
1144 } else if (status >= 0) {
1145 /* Not configured unless gadget OK:s it */
1146 dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
1147 usb_gadget_set_state(&dev->gadget,
1148 USB_STATE_CONFIGURED);
1152 /* Get ready for next stage */
1153 if (dev->ep0state == GR_EP0_ODATA)
1154 gr_set_ep0state(dev, GR_EP0_OSTATUS);
1155 else if (dev->ep0state == GR_EP0_IDATA)
1156 gr_set_ep0state(dev, GR_EP0_ISTATUS);
1158 gr_set_ep0state(dev, GR_EP0_SETUP);
1161 gr_ep0out_requeue(dev);
1164 /* ---------------------------------------------------------------------- */
1165 /* VBUS and USB reset handling */
1167 /* Must be called with dev->lock held and irqs disabled */
1168 static void gr_vbus_connected(struct gr_udc *dev, u32 status)
1172 dev->gadget.speed = GR_SPEED(status);
1173 usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
1175 /* Turn on full interrupts and pullup */
1176 control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
1177 GR_CONTROL_SP | GR_CONTROL_EP);
1178 gr_write32(&dev->regs->control, control);
1181 /* Must be called with dev->lock held */
1182 static void gr_enable_vbus_detect(struct gr_udc *dev)
1186 dev->irq_enabled = 1;
1187 wmb(); /* Make sure we do not ignore an interrupt */
1188 gr_write32(&dev->regs->control, GR_CONTROL_VI);
1190 /* Take care of the case we are already plugged in at this point */
1191 status = gr_read32(&dev->regs->status);
1192 if (status & GR_STATUS_VB)
1193 gr_vbus_connected(dev, status);
1196 /* Must be called with dev->lock held and irqs disabled */
1197 static void gr_vbus_disconnected(struct gr_udc *dev)
1199 gr_stop_activity(dev);
1201 /* Report disconnect */
1202 if (dev->driver && dev->driver->disconnect) {
1203 spin_unlock(&dev->lock);
1205 dev->driver->disconnect(&dev->gadget);
1207 spin_lock(&dev->lock);
1210 gr_enable_vbus_detect(dev);
1213 /* Must be called with dev->lock held and irqs disabled */
1214 static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
1216 gr_set_address(dev, 0);
1217 gr_set_ep0state(dev, GR_EP0_SETUP);
1218 usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
1219 dev->gadget.speed = GR_SPEED(status);
1221 gr_ep_nuke(&dev->epo[0]);
1222 gr_ep_nuke(&dev->epi[0]);
1223 dev->epo[0].stopped = 0;
1224 dev->epi[0].stopped = 0;
1225 gr_ep0out_requeue(dev);
1228 /* ---------------------------------------------------------------------- */
1232 * Handles interrupts from in endpoints. Returns whether something was handled.
1234 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1236 static int gr_handle_in_ep(struct gr_ep *ep)
1238 struct gr_request *req;
1240 req = list_first_entry(&ep->queue, struct gr_request, queue);
1241 if (!req->last_desc)
1244 if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
1245 return 0; /* Not put in hardware buffers yet */
1247 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1248 return 0; /* Not transmitted yet, still in hardware buffers */
1250 /* Write complete */
1251 gr_dma_advance(ep, 0);
1257 * Handles interrupts from out endpoints. Returns whether something was handled.
1259 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1261 static int gr_handle_out_ep(struct gr_ep *ep)
1266 struct gr_request *req;
1267 struct gr_udc *dev = ep->dev;
1269 req = list_first_entry(&ep->queue, struct gr_request, queue);
1270 if (!req->curr_desc)
1273 ctrl = READ_ONCE(req->curr_desc->ctrl);
1274 if (ctrl & GR_DESC_OUT_CTRL_EN)
1275 return 0; /* Not received yet */
1278 len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
1279 req->req.actual += len;
1280 if (ctrl & GR_DESC_OUT_CTRL_SE)
1283 if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
1284 /* Short packet or >= expected size - we are done */
1286 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1288 * Send a status stage ZLP to ack the DATA stage in the
1289 * OUT direction. This needs to be done before
1290 * gr_dma_advance as that can lead to a call to
1291 * ep0_setup that can change dev->ep0state.
1293 gr_ep0_respond_empty(dev);
1294 gr_set_ep0state(dev, GR_EP0_SETUP);
1297 gr_dma_advance(ep, 0);
1299 /* Not done yet. Enable the next descriptor to receive more. */
1300 req->curr_desc = req->curr_desc->next_desc;
1301 req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
1303 ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1304 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1311 * Handle state changes. Returns whether something was handled.
1313 * Must be called with dev->lock held and irqs disabled.
1315 static int gr_handle_state_changes(struct gr_udc *dev)
1317 u32 status = gr_read32(&dev->regs->status);
1319 int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
1320 dev->gadget.state == USB_STATE_ATTACHED);
1322 /* VBUS valid detected */
1323 if (!powstate && (status & GR_STATUS_VB)) {
1324 dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
1325 gr_vbus_connected(dev, status);
1330 if (powstate && !(status & GR_STATUS_VB)) {
1331 dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
1332 gr_vbus_disconnected(dev);
1336 /* USB reset detected */
1337 if (status & GR_STATUS_UR) {
1338 dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
1339 GR_SPEED_STR(status));
1340 gr_write32(&dev->regs->status, GR_STATUS_UR);
1341 gr_udc_usbreset(dev, status);
1346 if (dev->gadget.speed != GR_SPEED(status)) {
1347 dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
1348 GR_SPEED_STR(status));
1349 dev->gadget.speed = GR_SPEED(status);
1353 /* Going into suspend */
1354 if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
1355 dev_dbg(dev->dev, "STATUS: USB suspend\n");
1356 gr_set_ep0state(dev, GR_EP0_SUSPEND);
1357 dev->suspended_from = dev->gadget.state;
1358 usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
1360 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1361 dev->driver && dev->driver->suspend) {
1362 spin_unlock(&dev->lock);
1364 dev->driver->suspend(&dev->gadget);
1366 spin_lock(&dev->lock);
1371 /* Coming out of suspend */
1372 if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
1373 dev_dbg(dev->dev, "STATUS: USB resume\n");
1374 if (dev->suspended_from == USB_STATE_POWERED)
1375 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
1377 gr_set_ep0state(dev, GR_EP0_SETUP);
1378 usb_gadget_set_state(&dev->gadget, dev->suspended_from);
1380 if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1381 dev->driver && dev->driver->resume) {
1382 spin_unlock(&dev->lock);
1384 dev->driver->resume(&dev->gadget);
1386 spin_lock(&dev->lock);
1394 /* Non-interrupt context irq handler */
1395 static irqreturn_t gr_irq_handler(int irq, void *_dev)
1397 struct gr_udc *dev = _dev;
1401 unsigned long flags;
1403 spin_lock_irqsave(&dev->lock, flags);
1405 if (!dev->irq_enabled)
1409 * Check IN ep interrupts. We check these before the OUT eps because
1410 * some gadgets reuse the request that might already be currently
1411 * outstanding and needs to be completed (mainly setup requests).
1413 for (i = 0; i < dev->nepi; i++) {
1415 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1416 handled = gr_handle_in_ep(ep) || handled;
1419 /* Check OUT ep interrupts */
1420 for (i = 0; i < dev->nepo; i++) {
1422 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1423 handled = gr_handle_out_ep(ep) || handled;
1426 /* Check status interrupts */
1427 handled = gr_handle_state_changes(dev) || handled;
1430 * Check AMBA DMA errors. Only check if we didn't find anything else to
1431 * handle because this shouldn't happen if we did everything right.
1434 list_for_each_entry(ep, &dev->ep_list, ep_list) {
1435 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1437 "AMBA Error occurred for %s\n",
1445 spin_unlock_irqrestore(&dev->lock, flags);
1447 return handled ? IRQ_HANDLED : IRQ_NONE;
1450 /* Interrupt context irq handler */
1451 static irqreturn_t gr_irq(int irq, void *_dev)
1453 struct gr_udc *dev = _dev;
1455 if (!dev->irq_enabled)
1458 return IRQ_WAKE_THREAD;
1461 /* ---------------------------------------------------------------------- */
1464 /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
1465 static int gr_ep_enable(struct usb_ep *_ep,
1466 const struct usb_endpoint_descriptor *desc)
1473 u16 buffer_size = 0;
1476 ep = container_of(_ep, struct gr_ep, ep);
1477 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1482 /* 'ep0' IN and OUT are reserved */
1483 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1486 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1489 /* Make sure we are clear for enabling */
1490 epctrl = gr_read32(&ep->regs->epctrl);
1491 if (epctrl & GR_EPCTRL_EV)
1494 /* Check that directions match */
1495 if (!ep->is_in != !usb_endpoint_dir_in(desc))
1499 if ((!ep->is_in && ep->num >= dev->nepo) ||
1500 (ep->is_in && ep->num >= dev->nepi))
1503 if (usb_endpoint_xfer_control(desc)) {
1505 } else if (usb_endpoint_xfer_isoc(desc)) {
1507 } else if (usb_endpoint_xfer_bulk(desc)) {
1509 } else if (usb_endpoint_xfer_int(desc)) {
1512 dev_err(dev->dev, "Unknown transfer type for %s\n",
1518 * Bits 10-0 set the max payload. 12-11 set the number of
1519 * additional transactions.
1521 max = usb_endpoint_maxp(desc);
1522 nt = usb_endpoint_maxp_mult(desc) - 1;
1523 buffer_size = GR_BUFFER_SIZE(epctrl);
1524 if (nt && (mode == 0 || mode == 2)) {
1526 "%s mode: multiple trans./microframe not valid\n",
1527 (mode == 2 ? "Bulk" : "Control"));
1529 } else if (nt == 0x3) {
1531 "Invalid value 0x3 for additional trans./microframe\n");
1533 } else if ((nt + 1) * max > buffer_size) {
1534 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
1535 buffer_size, (nt + 1), max);
1537 } else if (max == 0) {
1538 dev_err(dev->dev, "Max payload cannot be set to 0\n");
1540 } else if (max > ep->ep.maxpacket_limit) {
1541 dev_err(dev->dev, "Requested max payload %d > limit %d\n",
1542 max, ep->ep.maxpacket_limit);
1546 spin_lock(&ep->dev->lock);
1549 spin_unlock(&ep->dev->lock);
1556 ep->ep.maxpacket = max;
1562 * Maximum possible size of all payloads in one microframe
1563 * regardless of direction when using high-bandwidth mode.
1565 ep->bytes_per_buffer = (nt + 1) * max;
1566 } else if (ep->is_in) {
1568 * The biggest multiple of maximum packet size that fits into
1569 * the buffer. The hardware will split up into many packets in
1572 ep->bytes_per_buffer = (buffer_size / max) * max;
1575 * Only single packets will be placed the buffers in the OUT
1578 ep->bytes_per_buffer = max;
1581 epctrl = (max << GR_EPCTRL_MAXPL_POS)
1582 | (nt << GR_EPCTRL_NT_POS)
1583 | (mode << GR_EPCTRL_TT_POS)
1586 epctrl |= GR_EPCTRL_PI;
1587 gr_write32(&ep->regs->epctrl, epctrl);
1589 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1591 spin_unlock(&ep->dev->lock);
1593 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1594 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1598 /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
1599 static int gr_ep_disable(struct usb_ep *_ep)
1603 unsigned long flags;
1605 ep = container_of(_ep, struct gr_ep, ep);
1606 if (!_ep || !ep->ep.desc)
1611 /* 'ep0' IN and OUT are reserved */
1612 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1615 if (dev->ep0state == GR_EP0_SUSPEND)
1618 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1620 spin_lock_irqsave(&dev->lock, flags);
1626 spin_unlock_irqrestore(&dev->lock, flags);
1632 * Frees a request, but not any DMA buffers associated with it
1633 * (gr_finish_request should already have taken care of that).
1635 static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
1637 struct gr_request *req;
1641 req = container_of(_req, struct gr_request, req);
1643 /* Leads to memory leak */
1644 WARN(!list_empty(&req->queue),
1645 "request not dequeued properly before freeing\n");
1650 /* Queue a request from the gadget */
1651 static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
1655 struct gr_request *req;
1659 if (unlikely(!_ep || !_req))
1662 ep = container_of(_ep, struct gr_ep, ep);
1663 req = container_of(_req, struct gr_request, req);
1666 spin_lock(&ep->dev->lock);
1669 * The ep0 pointer in the gadget struct is used both for ep0in and
1670 * ep0out. In a data stage in the out direction ep0out needs to be used
1671 * instead of the default ep0in. Completion functions might use
1672 * driver_data, so that needs to be copied as well.
1674 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1676 ep->ep.driver_data = dev->epi[0].ep.driver_data;
1680 gr_dbgprint_request("EXTERN", ep, req);
1682 ret = gr_queue(ep, req, GFP_ATOMIC);
1684 spin_unlock(&ep->dev->lock);
1689 /* Dequeue JUST ONE request */
1690 static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1692 struct gr_request *req;
1696 unsigned long flags;
1698 ep = container_of(_ep, struct gr_ep, ep);
1699 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1705 /* We can't touch (DMA) registers when suspended */
1706 if (dev->ep0state == GR_EP0_SUSPEND)
1709 spin_lock_irqsave(&dev->lock, flags);
1711 /* Make sure it's actually queued on this endpoint */
1712 list_for_each_entry(req, &ep->queue, queue) {
1713 if (&req->req == _req)
1716 if (&req->req != _req) {
1721 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1722 /* This request is currently being processed */
1725 gr_finish_request(ep, req, -ECONNRESET);
1727 gr_dma_advance(ep, -ECONNRESET);
1728 } else if (!list_empty(&req->queue)) {
1729 /* Not being processed - gr_finish_request dequeues it */
1730 gr_finish_request(ep, req, -ECONNRESET);
1736 spin_unlock_irqrestore(&dev->lock, flags);
1741 /* Helper for gr_set_halt and gr_set_wedge */
1742 static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
1749 ep = container_of(_ep, struct gr_ep, ep);
1751 spin_lock(&ep->dev->lock);
1753 /* Halting an IN endpoint should fail if queue is not empty */
1754 if (halt && ep->is_in && !list_empty(&ep->queue)) {
1759 ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1762 spin_unlock(&ep->dev->lock);
1768 static int gr_set_halt(struct usb_ep *_ep, int halt)
1770 return gr_set_halt_wedge(_ep, halt, 0);
1773 /* Halt and wedge endpoint */
1774 static int gr_set_wedge(struct usb_ep *_ep)
1776 return gr_set_halt_wedge(_ep, 1, 1);
1780 * Return the total number of bytes currently stored in the internal buffers of
1783 static int gr_fifo_status(struct usb_ep *_ep)
1791 ep = container_of(_ep, struct gr_ep, ep);
1793 epstat = gr_read32(&ep->regs->epstat);
1795 if (epstat & GR_EPSTAT_B0)
1796 bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
1797 if (epstat & GR_EPSTAT_B1)
1798 bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
1804 /* Empty data from internal buffers of an endpoint. */
1805 static void gr_fifo_flush(struct usb_ep *_ep)
1812 ep = container_of(_ep, struct gr_ep, ep);
1813 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1815 spin_lock(&ep->dev->lock);
1817 epctrl = gr_read32(&ep->regs->epctrl);
1818 epctrl |= GR_EPCTRL_CB;
1819 gr_write32(&ep->regs->epctrl, epctrl);
1821 spin_unlock(&ep->dev->lock);
1824 static const struct usb_ep_ops gr_ep_ops = {
1825 .enable = gr_ep_enable,
1826 .disable = gr_ep_disable,
1828 .alloc_request = gr_alloc_request,
1829 .free_request = gr_free_request,
1831 .queue = gr_queue_ext,
1832 .dequeue = gr_dequeue,
1834 .set_halt = gr_set_halt,
1835 .set_wedge = gr_set_wedge,
1836 .fifo_status = gr_fifo_status,
1837 .fifo_flush = gr_fifo_flush,
1840 /* ---------------------------------------------------------------------- */
1841 /* USB Gadget ops */
1843 static int gr_get_frame(struct usb_gadget *_gadget)
1849 dev = container_of(_gadget, struct gr_udc, gadget);
1850 return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
1853 static int gr_wakeup(struct usb_gadget *_gadget)
1859 dev = container_of(_gadget, struct gr_udc, gadget);
1861 /* Remote wakeup feature not enabled by host*/
1862 if (!dev->remote_wakeup)
1865 spin_lock(&dev->lock);
1867 gr_write32(&dev->regs->control,
1868 gr_read32(&dev->regs->control) | GR_CONTROL_RW);
1870 spin_unlock(&dev->lock);
1875 static int gr_pullup(struct usb_gadget *_gadget, int is_on)
1882 dev = container_of(_gadget, struct gr_udc, gadget);
1884 spin_lock(&dev->lock);
1886 control = gr_read32(&dev->regs->control);
1888 control |= GR_CONTROL_EP;
1890 control &= ~GR_CONTROL_EP;
1891 gr_write32(&dev->regs->control, control);
1893 spin_unlock(&dev->lock);
1898 static int gr_udc_start(struct usb_gadget *gadget,
1899 struct usb_gadget_driver *driver)
1901 struct gr_udc *dev = to_gr_udc(gadget);
1903 spin_lock(&dev->lock);
1905 /* Hook up the driver */
1906 driver->driver.bus = NULL;
1907 dev->driver = driver;
1909 /* Get ready for host detection */
1910 gr_enable_vbus_detect(dev);
1912 spin_unlock(&dev->lock);
1917 static int gr_udc_stop(struct usb_gadget *gadget)
1919 struct gr_udc *dev = to_gr_udc(gadget);
1920 unsigned long flags;
1922 spin_lock_irqsave(&dev->lock, flags);
1925 gr_stop_activity(dev);
1927 spin_unlock_irqrestore(&dev->lock, flags);
1932 static const struct usb_gadget_ops gr_ops = {
1933 .get_frame = gr_get_frame,
1934 .wakeup = gr_wakeup,
1935 .pullup = gr_pullup,
1936 .udc_start = gr_udc_start,
1937 .udc_stop = gr_udc_stop,
1938 /* Other operations not supported */
1941 /* ---------------------------------------------------------------------- */
1942 /* Module probe, removal and of-matching */
1944 static const char * const onames[] = {
1945 "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1946 "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1947 "ep12out", "ep13out", "ep14out", "ep15out"
1950 static const char * const inames[] = {
1951 "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1952 "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1953 "ep12in", "ep13in", "ep14in", "ep15in"
1956 /* Must be called with dev->lock held */
1957 static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
1960 struct gr_request *req;
1961 struct usb_request *_req;
1965 ep = &dev->epi[num];
1966 ep->ep.name = inames[num];
1967 ep->regs = &dev->regs->epi[num];
1969 ep = &dev->epo[num];
1970 ep->ep.name = onames[num];
1971 ep->regs = &dev->regs->epo[num];
1978 ep->ep.ops = &gr_ep_ops;
1979 INIT_LIST_HEAD(&ep->queue);
1982 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
1986 buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
1988 gr_free_request(&ep->ep, _req);
1992 req = container_of(_req, struct gr_request, req);
1994 req->req.length = MAX_CTRL_PL_SIZE;
1997 dev->ep0reqi = req; /* Complete gets set as used */
1999 dev->ep0reqo = req; /* Completion treated separately */
2001 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2002 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2004 ep->ep.caps.type_control = true;
2006 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2007 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2009 ep->ep.caps.type_iso = true;
2010 ep->ep.caps.type_bulk = true;
2011 ep->ep.caps.type_int = true;
2013 list_add_tail(&ep->ep_list, &dev->ep_list);
2016 ep->ep.caps.dir_in = true;
2018 ep->ep.caps.dir_out = true;
2020 ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
2021 &ep->tailbuf_paddr, GFP_ATOMIC);
2028 /* Must be called with dev->lock held */
2029 static int gr_udc_init(struct gr_udc *dev)
2031 struct device_node *np = dev->dev->of_node;
2038 gr_set_address(dev, 0);
2040 INIT_LIST_HEAD(&dev->gadget.ep_list);
2041 dev->gadget.speed = USB_SPEED_UNKNOWN;
2042 dev->gadget.ep0 = &dev->epi[0].ep;
2044 INIT_LIST_HEAD(&dev->ep_list);
2045 gr_set_ep0state(dev, GR_EP0_DISCONNECT);
2047 for (i = 0; i < dev->nepo; i++) {
2048 if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
2050 ret = gr_ep_init(dev, i, 0, bufsize);
2055 for (i = 0; i < dev->nepi; i++) {
2056 if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
2058 ret = gr_ep_init(dev, i, 1, bufsize);
2063 /* Must be disabled by default */
2064 dev->remote_wakeup = 0;
2066 /* Enable ep0out and ep0in */
2067 epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
2068 dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
2069 gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
2070 gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
2071 gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
2072 gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
2077 static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
2082 ep = &dev->epi[num];
2084 ep = &dev->epo[num];
2087 dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
2088 ep->tailbuf, ep->tailbuf_paddr);
2091 static int gr_remove(struct platform_device *pdev)
2093 struct gr_udc *dev = platform_get_drvdata(pdev);
2097 usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
2102 dma_pool_destroy(dev->desc_pool);
2103 platform_set_drvdata(pdev, NULL);
2105 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2106 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
2108 for (i = 0; i < dev->nepo; i++)
2109 gr_ep_remove(dev, i, 0);
2110 for (i = 0; i < dev->nepi; i++)
2111 gr_ep_remove(dev, i, 1);
2115 static int gr_request_irq(struct gr_udc *dev, int irq)
2117 return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
2118 IRQF_SHARED, driver_name, dev);
2121 static int gr_probe(struct platform_device *pdev)
2124 struct resource *res;
2125 struct gr_regs __iomem *regs;
2129 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2132 dev->dev = &pdev->dev;
2134 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2135 regs = devm_ioremap_resource(dev->dev, res);
2137 return PTR_ERR(regs);
2139 dev->irq = platform_get_irq(pdev, 0);
2140 if (dev->irq <= 0) {
2141 dev_err(dev->dev, "No irq found\n");
2145 /* Some core configurations has separate irqs for IN and OUT events */
2146 dev->irqi = platform_get_irq(pdev, 1);
2147 if (dev->irqi > 0) {
2148 dev->irqo = platform_get_irq(pdev, 2);
2149 if (dev->irqo <= 0) {
2150 dev_err(dev->dev, "Found irqi but not irqo\n");
2157 dev->gadget.name = driver_name;
2158 dev->gadget.max_speed = USB_SPEED_HIGH;
2159 dev->gadget.ops = &gr_ops;
2161 spin_lock_init(&dev->lock);
2164 platform_set_drvdata(pdev, dev);
2166 /* Determine number of endpoints and data interface mode */
2167 status = gr_read32(&dev->regs->status);
2168 dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
2169 dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
2171 if (!(status & GR_STATUS_DM)) {
2172 dev_err(dev->dev, "Slave mode cores are not supported\n");
2176 /* --- Effects of the following calls might need explicit cleanup --- */
2178 /* Create DMA pool for descriptors */
2179 dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
2180 sizeof(struct gr_dma_desc), 4, 0);
2181 if (!dev->desc_pool) {
2182 dev_err(dev->dev, "Could not allocate DMA pool");
2186 /* Inside lock so that no gadget can use this udc until probe is done */
2187 retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
2189 dev_err(dev->dev, "Could not add gadget udc");
2194 spin_lock(&dev->lock);
2196 retval = gr_udc_init(dev);
2198 spin_unlock(&dev->lock);
2202 /* Clear all interrupt enables that might be left on since last boot */
2203 gr_disable_interrupts_and_pullup(dev);
2205 spin_unlock(&dev->lock);
2209 retval = gr_request_irq(dev, dev->irq);
2211 dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
2216 retval = gr_request_irq(dev, dev->irqi);
2218 dev_err(dev->dev, "Failed to request irqi %d\n",
2222 retval = gr_request_irq(dev, dev->irqo);
2224 dev_err(dev->dev, "Failed to request irqo %d\n",
2231 dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
2232 dev->irq, dev->irqi, dev->irqo);
2234 dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
2243 static const struct of_device_id gr_match[] = {
2244 {.name = "GAISLER_USBDC"},
2248 MODULE_DEVICE_TABLE(of, gr_match);
2250 static struct platform_driver gr_driver = {
2252 .name = DRIVER_NAME,
2253 .of_match_table = gr_match,
2256 .remove = gr_remove,
2258 module_platform_driver(gr_driver);
2260 MODULE_AUTHOR("Aeroflex Gaisler AB.");
2261 MODULE_DESCRIPTION(DRIVER_DESC);
2262 MODULE_LICENSE("GPL");