2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/list.h>
43 #include <linux/dma-mapping.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
47 #include "musb_trace.h"
49 /* MUSB HOST status 22-mar-2006
51 * - There's still lots of partial code duplication for fault paths, so
52 * they aren't handled as consistently as they need to be.
54 * - PIO mostly behaved when last tested.
55 * + including ep0, with all usbtest cases 9, 10
56 * + usbtest 14 (ep0out) doesn't seem to run at all
57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58 * configurations, but otherwise double buffering passes basic tests.
59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
61 * - DMA (CPPI) ... partially behaves, not currently recommended
62 * + about 1/15 the speed of typical EHCI implementations (PCI)
63 * + RX, all too often reqpkt seems to misbehave after tx
64 * + TX, no known issues (other than evident silicon issue)
66 * - DMA (Mentor/OMAP) ...has at least toggle update problems
68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69 * starvation ... nothing yet for TX, interrupt, or bulk.
71 * - Not tested with HNP, but some SRP paths seem to behave.
73 * NOTE 24-August-2006:
75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76 * extra endpoint for periodic use enabling hub + keybd + mouse. That
77 * mostly works, except that with "usbnet" it's easy to trigger cases
78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80 * although ARP RX wins. (That test was done with a full speed link.)
85 * NOTE on endpoint usage:
87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89 * (Yes, bulk _could_ use more of the endpoints than that, and would even
92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93 * So far that scheduling is both dumb and optimistic: the endpoint will be
94 * "claimed" until its software queue is no longer refilled. No multiplexing
95 * of transfers between endpoints, or anything clever.
98 struct musb *hcd_to_musb(struct usb_hcd *hcd)
100 return *(struct musb **) hcd->hcd_priv;
104 static void musb_ep_program(struct musb *musb, u8 epnum,
105 struct urb *urb, int is_out,
106 u8 *buf, u32 offset, u32 len);
109 * Clear TX fifo. Needed to avoid BABBLE errors.
111 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
113 struct musb *musb = ep->musb;
114 void __iomem *epio = ep->regs;
118 csr = musb_readw(epio, MUSB_TXCSR);
119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
121 musb_writew(epio, MUSB_TXCSR, csr);
122 csr = musb_readw(epio, MUSB_TXCSR);
125 * FIXME: sometimes the tx fifo flush failed, it has been
126 * observed during device disconnect on AM335x.
128 * To reproduce the issue, ensure tx urb(s) are queued when
129 * unplug the usb device which is connected to AM335x usb
132 * I found using a usb-ethernet device and running iperf
133 * (client on AM335x) has very high chance to trigger it.
135 * Better to turn on musb_dbg() in musb_cleanup_urb() with
136 * CPPI enabled to see the issue when aborting the tx channel.
138 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
139 "Could not flush host TX%d fifo: csr: %04x\n",
146 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
148 void __iomem *epio = ep->regs;
152 /* scrub any data left in the fifo */
154 csr = musb_readw(epio, MUSB_TXCSR);
155 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
157 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
158 csr = musb_readw(epio, MUSB_TXCSR);
162 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
165 /* and reset for the next transfer */
166 musb_writew(epio, MUSB_TXCSR, 0);
170 * Start transmit. Caller is responsible for locking shared resources.
171 * musb must be locked.
173 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
177 /* NOTE: no locks here; caller should lock and select EP */
179 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
180 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
181 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
183 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
184 musb_writew(ep->regs, MUSB_CSR0, txcsr);
189 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
193 /* NOTE: no locks here; caller should lock and select EP */
194 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
195 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
196 if (is_cppi_enabled(ep->musb))
197 txcsr |= MUSB_TXCSR_DMAMODE;
198 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
201 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
203 if (is_in != 0 || ep->is_shared_fifo)
205 if (is_in == 0 || ep->is_shared_fifo)
209 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
211 return is_in ? ep->in_qh : ep->out_qh;
215 * Start the URB at the front of an endpoint's queue
216 * end must be claimed from the caller.
218 * Context: controller locked, irqs blocked
221 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
225 void __iomem *mbase = musb->mregs;
226 struct urb *urb = next_urb(qh);
227 void *buf = urb->transfer_buffer;
229 struct musb_hw_ep *hw_ep = qh->hw_ep;
230 int epnum = hw_ep->epnum;
232 /* initialize software qh state */
236 /* gather right source of data */
238 case USB_ENDPOINT_XFER_CONTROL:
239 /* control transfers always start with SETUP */
241 musb->ep0_stage = MUSB_EP0_START;
242 buf = urb->setup_packet;
245 case USB_ENDPOINT_XFER_ISOC:
248 offset = urb->iso_frame_desc[0].offset;
249 len = urb->iso_frame_desc[0].length;
251 default: /* bulk, interrupt */
252 /* actual_length may be nonzero on retry paths */
253 buf = urb->transfer_buffer + urb->actual_length;
254 len = urb->transfer_buffer_length - urb->actual_length;
257 trace_musb_urb_start(musb, urb);
259 /* Configure endpoint */
260 musb_ep_set_qh(hw_ep, is_in, qh);
261 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
263 /* transmit may have more work: start it when it is time */
267 /* determine if the time is right for a periodic transfer */
269 case USB_ENDPOINT_XFER_ISOC:
270 case USB_ENDPOINT_XFER_INT:
271 musb_dbg(musb, "check whether there's still time for periodic Tx");
272 frame = musb_readw(mbase, MUSB_FRAME);
273 /* FIXME this doesn't implement that scheduling policy ...
274 * or handle framecounter wrapping
276 if (1) { /* Always assume URB_ISO_ASAP */
277 /* REVISIT the SOF irq handler shouldn't duplicate
278 * this code; and we don't init urb->start_frame...
283 qh->frame = urb->start_frame;
284 /* enable SOF interrupt so we can count down */
285 musb_dbg(musb, "SOF for %d", epnum);
286 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
287 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
293 musb_dbg(musb, "Start TX%d %s", epnum,
294 hw_ep->tx_channel ? "dma" : "pio");
296 if (!hw_ep->tx_channel)
297 musb_h_tx_start(hw_ep);
298 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
299 musb_h_tx_dma_start(hw_ep);
303 /* Context: caller owns controller lock, IRQs are blocked */
304 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
305 __releases(musb->lock)
306 __acquires(musb->lock)
308 trace_musb_urb_gb(musb, urb);
310 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
311 spin_unlock(&musb->lock);
312 usb_hcd_giveback_urb(musb->hcd, urb, status);
313 spin_lock(&musb->lock);
316 /* For bulk/interrupt endpoints only */
317 static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
320 void __iomem *epio = qh->hw_ep->regs;
324 * FIXME: the current Mentor DMA code seems to have
325 * problems getting toggle correct.
329 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
331 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
333 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
337 * Advance this hardware endpoint's queue, completing the specified URB and
338 * advancing to either the next URB queued to that qh, or else invalidating
339 * that qh and advancing to the next qh scheduled after the current one.
341 * Context: caller owns controller lock, IRQs are blocked
343 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
344 struct musb_hw_ep *hw_ep, int is_in)
346 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
347 struct musb_hw_ep *ep = qh->hw_ep;
348 int ready = qh->is_ready;
351 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
353 /* save toggle eagerly, for paranoia */
355 case USB_ENDPOINT_XFER_BULK:
356 case USB_ENDPOINT_XFER_INT:
357 musb_save_toggle(qh, is_in, urb);
359 case USB_ENDPOINT_XFER_ISOC:
360 if (status == 0 && urb->error_count)
366 musb_giveback(musb, urb, status);
367 qh->is_ready = ready;
369 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
370 * invalidate qh as soon as list_empty(&hep->urb_list)
372 if (list_empty(&qh->hep->urb_list)) {
373 struct list_head *head;
374 struct dma_controller *dma = musb->dma_controller;
378 if (ep->rx_channel) {
379 dma->channel_release(ep->rx_channel);
380 ep->rx_channel = NULL;
384 if (ep->tx_channel) {
385 dma->channel_release(ep->tx_channel);
386 ep->tx_channel = NULL;
390 /* Clobber old pointers to this qh */
391 musb_ep_set_qh(ep, is_in, NULL);
392 qh->hep->hcpriv = NULL;
396 case USB_ENDPOINT_XFER_CONTROL:
397 case USB_ENDPOINT_XFER_BULK:
398 /* fifo policy for these lists, except that NAKing
399 * should rotate a qh to the end (for fairness).
402 head = qh->ring.prev;
409 case USB_ENDPOINT_XFER_ISOC:
410 case USB_ENDPOINT_XFER_INT:
411 /* this is where periodic bandwidth should be
412 * de-allocated if it's tracked and allocated;
413 * and where we'd update the schedule tree...
421 if (qh != NULL && qh->is_ready) {
422 musb_dbg(musb, "... next ep%d %cX urb %p",
423 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
424 musb_start_urb(musb, is_in, qh);
428 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
430 /* we don't want fifo to fill itself again;
431 * ignore dma (various models),
432 * leave toggle alone (may not have been saved yet)
434 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
435 csr &= ~(MUSB_RXCSR_H_REQPKT
436 | MUSB_RXCSR_H_AUTOREQ
437 | MUSB_RXCSR_AUTOCLEAR);
439 /* write 2x to allow double buffering */
440 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
441 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
443 /* flush writebuffer */
444 return musb_readw(hw_ep->regs, MUSB_RXCSR);
448 * PIO RX for a packet (or part of it).
451 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
459 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
460 void __iomem *epio = hw_ep->regs;
461 struct musb_qh *qh = hw_ep->in_qh;
462 int pipe = urb->pipe;
463 void *buffer = urb->transfer_buffer;
465 /* musb_ep_select(mbase, epnum); */
466 rx_count = musb_readw(epio, MUSB_RXCOUNT);
467 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
468 urb->transfer_buffer, qh->offset,
469 urb->transfer_buffer_length);
472 if (usb_pipeisoc(pipe)) {
474 struct usb_iso_packet_descriptor *d;
481 d = urb->iso_frame_desc + qh->iso_idx;
482 buf = buffer + d->offset;
484 if (rx_count > length) {
489 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
493 urb->actual_length += length;
494 d->actual_length = length;
498 /* see if we are done */
499 done = (++qh->iso_idx >= urb->number_of_packets);
502 buf = buffer + qh->offset;
503 length = urb->transfer_buffer_length - qh->offset;
504 if (rx_count > length) {
505 if (urb->status == -EINPROGRESS)
506 urb->status = -EOVERFLOW;
507 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
511 urb->actual_length += length;
512 qh->offset += length;
514 /* see if we are done */
515 done = (urb->actual_length == urb->transfer_buffer_length)
516 || (rx_count < qh->maxpacket)
517 || (urb->status != -EINPROGRESS);
519 && (urb->status == -EINPROGRESS)
520 && (urb->transfer_flags & URB_SHORT_NOT_OK)
521 && (urb->actual_length
522 < urb->transfer_buffer_length))
523 urb->status = -EREMOTEIO;
526 musb_read_fifo(hw_ep, length, buf);
528 csr = musb_readw(epio, MUSB_RXCSR);
529 csr |= MUSB_RXCSR_H_WZC_BITS;
530 if (unlikely(do_flush))
531 musb_h_flush_rxfifo(hw_ep, csr);
533 /* REVISIT this assumes AUTOCLEAR is never set */
534 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
536 csr |= MUSB_RXCSR_H_REQPKT;
537 musb_writew(epio, MUSB_RXCSR, csr);
543 /* we don't always need to reinit a given side of an endpoint...
544 * when we do, use tx/rx reinit routine and then construct a new CSR
545 * to address data toggle, NYET, and DMA or PIO.
547 * it's possible that driver bugs (especially for DMA) or aborting a
548 * transfer might have left the endpoint busier than it should be.
549 * the busy/not-empty tests are basically paranoia.
552 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
554 struct musb_hw_ep *ep = musb->endpoints + epnum;
557 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
558 * That always uses tx_reinit since ep0 repurposes TX register
559 * offsets; the initial SETUP packet is also a kind of OUT.
562 /* if programmed for Tx, put it in RX mode */
563 if (ep->is_shared_fifo) {
564 csr = musb_readw(ep->regs, MUSB_TXCSR);
565 if (csr & MUSB_TXCSR_MODE) {
566 musb_h_tx_flush_fifo(ep);
567 csr = musb_readw(ep->regs, MUSB_TXCSR);
568 musb_writew(ep->regs, MUSB_TXCSR,
569 csr | MUSB_TXCSR_FRCDATATOG);
573 * Clear the MODE bit (and everything else) to enable Rx.
574 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
576 if (csr & MUSB_TXCSR_DMAMODE)
577 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
578 musb_writew(ep->regs, MUSB_TXCSR, 0);
580 /* scrub all previous state, clearing toggle */
582 csr = musb_readw(ep->regs, MUSB_RXCSR);
583 if (csr & MUSB_RXCSR_RXPKTRDY)
584 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
585 musb_readw(ep->regs, MUSB_RXCOUNT));
587 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
589 /* target addr and (for multipoint) hub addr/port */
590 if (musb->is_multipoint) {
591 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
592 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
593 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
595 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
597 /* protocol/endpoint, interval/NAKlimit, i/o size */
598 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
599 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
600 /* NOTE: bulk combining rewrites high bits of maxpacket */
601 /* Set RXMAXP with the FIFO size of the endpoint
602 * to disable double buffer mode.
604 if (musb->double_buffer_not_ok)
605 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
607 musb_writew(ep->regs, MUSB_RXMAXP,
608 qh->maxpacket | ((qh->hb_mult - 1) << 11));
613 static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
614 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
615 struct urb *urb, u32 offset,
616 u32 *length, u8 *mode)
618 struct dma_channel *channel = hw_ep->tx_channel;
619 void __iomem *epio = hw_ep->regs;
620 u16 pkt_size = qh->maxpacket;
623 if (*length > channel->max_len)
624 *length = channel->max_len;
626 csr = musb_readw(epio, MUSB_TXCSR);
627 if (*length > pkt_size) {
629 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
630 /* autoset shouldn't be set in high bandwidth */
632 * Enable Autoset according to table
634 * bulk_split hb_mult Autoset_Enable
636 * 0 >1 No(High BW ISO)
640 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
641 can_bulk_split(hw_ep->musb, qh->type)))
642 csr |= MUSB_TXCSR_AUTOSET;
645 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
646 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
648 channel->desired_mode = *mode;
649 musb_writew(epio, MUSB_TXCSR, csr);
652 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
653 struct musb_hw_ep *hw_ep,
660 struct dma_channel *channel = hw_ep->tx_channel;
662 channel->actual_len = 0;
665 * TX uses "RNDIS" mode automatically but needs help
666 * to identify the zero-length-final-packet case.
668 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
671 static bool musb_tx_dma_program(struct dma_controller *dma,
672 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
673 struct urb *urb, u32 offset, u32 length)
675 struct dma_channel *channel = hw_ep->tx_channel;
676 u16 pkt_size = qh->maxpacket;
679 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
680 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
682 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
683 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
688 qh->segsize = length;
691 * Ensure the data reaches to main memory before starting
696 if (!dma->channel_program(channel, pkt_size, mode,
697 urb->transfer_dma + offset, length)) {
698 void __iomem *epio = hw_ep->regs;
701 dma->channel_release(channel);
702 hw_ep->tx_channel = NULL;
704 csr = musb_readw(epio, MUSB_TXCSR);
705 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
706 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
713 * Program an HDRC endpoint as per the given URB
714 * Context: irqs blocked, controller lock held
716 static void musb_ep_program(struct musb *musb, u8 epnum,
717 struct urb *urb, int is_out,
718 u8 *buf, u32 offset, u32 len)
720 struct dma_controller *dma_controller;
721 struct dma_channel *dma_channel;
723 void __iomem *mbase = musb->mregs;
724 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
725 void __iomem *epio = hw_ep->regs;
726 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
727 u16 packet_sz = qh->maxpacket;
731 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
732 "h_addr%02x h_port%02x bytes %d",
733 is_out ? "-->" : "<--",
734 epnum, urb, urb->dev->speed,
735 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
736 qh->h_addr_reg, qh->h_port_reg,
739 musb_ep_select(mbase, epnum);
741 if (is_out && !len) {
743 csr = musb_readw(epio, MUSB_TXCSR);
744 csr &= ~MUSB_TXCSR_DMAENAB;
745 musb_writew(epio, MUSB_TXCSR, csr);
746 hw_ep->tx_channel = NULL;
749 /* candidate for DMA? */
750 dma_controller = musb->dma_controller;
751 if (use_dma && is_dma_capable() && epnum && dma_controller) {
752 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
754 dma_channel = dma_controller->channel_alloc(
755 dma_controller, hw_ep, is_out);
757 hw_ep->tx_channel = dma_channel;
759 hw_ep->rx_channel = dma_channel;
764 /* make sure we clear DMAEnab, autoSet bits from previous run */
766 /* OUT/transmit/EP0 or IN/receive? */
772 csr = musb_readw(epio, MUSB_TXCSR);
774 /* disable interrupt in case we flush */
775 int_txe = musb->intrtxe;
776 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
778 /* general endpoint setup */
780 /* flush all old state, set default */
782 * We could be flushing valid
783 * packets in double buffering
786 if (!hw_ep->tx_double_buffered)
787 musb_h_tx_flush_fifo(hw_ep);
790 * We must not clear the DMAMODE bit before or in
791 * the same cycle with the DMAENAB bit, so we clear
792 * the latter first...
794 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
797 | MUSB_TXCSR_FRCDATATOG
798 | MUSB_TXCSR_H_RXSTALL
800 | MUSB_TXCSR_TXPKTRDY
802 csr |= MUSB_TXCSR_MODE;
804 if (!hw_ep->tx_double_buffered) {
805 if (usb_gettoggle(urb->dev, qh->epnum, 1))
806 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
807 | MUSB_TXCSR_H_DATATOGGLE;
809 csr |= MUSB_TXCSR_CLRDATATOG;
812 musb_writew(epio, MUSB_TXCSR, csr);
813 /* REVISIT may need to clear FLUSHFIFO ... */
814 csr &= ~MUSB_TXCSR_DMAMODE;
815 musb_writew(epio, MUSB_TXCSR, csr);
816 csr = musb_readw(epio, MUSB_TXCSR);
818 /* endpoint 0: just flush */
819 musb_h_ep0_flush_fifo(hw_ep);
822 /* target addr and (for multipoint) hub addr/port */
823 if (musb->is_multipoint) {
824 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
825 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
826 musb_write_txhubport(musb, epnum, qh->h_port_reg);
827 /* FIXME if !epnum, do the same for RX ... */
829 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
831 /* protocol/endpoint/interval/NAKlimit */
833 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
834 if (musb->double_buffer_not_ok) {
835 musb_writew(epio, MUSB_TXMAXP,
836 hw_ep->max_packet_sz_tx);
837 } else if (can_bulk_split(musb, qh->type)) {
838 qh->hb_mult = hw_ep->max_packet_sz_tx
840 musb_writew(epio, MUSB_TXMAXP, packet_sz
841 | ((qh->hb_mult) - 1) << 11);
843 musb_writew(epio, MUSB_TXMAXP,
845 ((qh->hb_mult - 1) << 11));
847 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
849 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
850 if (musb->is_multipoint)
851 musb_writeb(epio, MUSB_TYPE0,
855 if (can_bulk_split(musb, qh->type))
856 load_count = min((u32) hw_ep->max_packet_sz_tx,
859 load_count = min((u32) packet_sz, len);
861 if (dma_channel && musb_tx_dma_program(dma_controller,
862 hw_ep, qh, urb, offset, len))
866 /* PIO to load FIFO */
867 qh->segsize = load_count;
869 sg_miter_start(&qh->sg_miter, urb->sg, 1,
872 if (!sg_miter_next(&qh->sg_miter)) {
873 dev_err(musb->controller,
876 sg_miter_stop(&qh->sg_miter);
879 buf = qh->sg_miter.addr + urb->sg->offset +
881 load_count = min_t(u32, load_count,
882 qh->sg_miter.length);
883 musb_write_fifo(hw_ep, load_count, buf);
884 qh->sg_miter.consumed = load_count;
885 sg_miter_stop(&qh->sg_miter);
887 musb_write_fifo(hw_ep, load_count, buf);
890 /* re-enable interrupt */
891 musb_writew(mbase, MUSB_INTRTXE, int_txe);
897 if (hw_ep->rx_reinit) {
898 musb_rx_reinit(musb, qh, epnum);
900 /* init new state: toggle and NYET, maybe DMA later */
901 if (usb_gettoggle(urb->dev, qh->epnum, 0))
902 csr = MUSB_RXCSR_H_WR_DATATOGGLE
903 | MUSB_RXCSR_H_DATATOGGLE;
906 if (qh->type == USB_ENDPOINT_XFER_INT)
907 csr |= MUSB_RXCSR_DISNYET;
910 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
912 if (csr & (MUSB_RXCSR_RXPKTRDY
914 | MUSB_RXCSR_H_REQPKT))
915 ERR("broken !rx_reinit, ep%d csr %04x\n",
918 /* scrub any stale state, leaving toggle alone */
919 csr &= MUSB_RXCSR_DISNYET;
922 /* kick things off */
924 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
925 /* Candidate for DMA */
926 dma_channel->actual_len = 0L;
929 /* AUTOREQ is in a DMA register */
930 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
931 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
934 * Unless caller treats short RX transfers as
935 * errors, we dare not queue multiple transfers.
937 dma_ok = dma_controller->channel_program(dma_channel,
938 packet_sz, !(urb->transfer_flags &
940 urb->transfer_dma + offset,
943 dma_controller->channel_release(dma_channel);
944 hw_ep->rx_channel = dma_channel = NULL;
946 csr |= MUSB_RXCSR_DMAENAB;
949 csr |= MUSB_RXCSR_H_REQPKT;
950 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
951 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
952 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
956 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
957 * the end; avoids starvation for other endpoints.
959 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
962 struct dma_channel *dma;
964 void __iomem *mbase = musb->mregs;
965 void __iomem *epio = ep->regs;
966 struct musb_qh *cur_qh, *next_qh;
969 musb_ep_select(mbase, ep->epnum);
971 dma = is_dma_capable() ? ep->rx_channel : NULL;
974 * Need to stop the transaction by clearing REQPKT first
975 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
976 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
978 rx_csr = musb_readw(epio, MUSB_RXCSR);
979 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
980 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
981 musb_writew(epio, MUSB_RXCSR, rx_csr);
982 rx_csr &= ~MUSB_RXCSR_DATAERROR;
983 musb_writew(epio, MUSB_RXCSR, rx_csr);
985 cur_qh = first_qh(&musb->in_bulk);
987 dma = is_dma_capable() ? ep->tx_channel : NULL;
989 /* clear nak timeout bit */
990 tx_csr = musb_readw(epio, MUSB_TXCSR);
991 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
992 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
993 musb_writew(epio, MUSB_TXCSR, tx_csr);
995 cur_qh = first_qh(&musb->out_bulk);
998 urb = next_urb(cur_qh);
999 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1000 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1001 musb->dma_controller->channel_abort(dma);
1002 urb->actual_length += dma->actual_len;
1003 dma->actual_len = 0L;
1005 musb_save_toggle(cur_qh, is_in, urb);
1008 /* move cur_qh to end of queue */
1009 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1011 /* get the next qh from musb->in_bulk */
1012 next_qh = first_qh(&musb->in_bulk);
1014 /* set rx_reinit and schedule the next qh */
1017 /* move cur_qh to end of queue */
1018 list_move_tail(&cur_qh->ring, &musb->out_bulk);
1020 /* get the next qh from musb->out_bulk */
1021 next_qh = first_qh(&musb->out_bulk);
1023 /* set tx_reinit and schedule the next qh */
1028 musb_start_urb(musb, is_in, next_qh);
1033 * Service the default endpoint (ep0) as host.
1034 * Return true until it's time to start the status stage.
1036 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1039 u8 *fifo_dest = NULL;
1041 struct musb_hw_ep *hw_ep = musb->control_ep;
1042 struct musb_qh *qh = hw_ep->in_qh;
1043 struct usb_ctrlrequest *request;
1045 switch (musb->ep0_stage) {
1047 fifo_dest = urb->transfer_buffer + urb->actual_length;
1048 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1049 urb->actual_length);
1050 if (fifo_count < len)
1051 urb->status = -EOVERFLOW;
1053 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1055 urb->actual_length += fifo_count;
1056 if (len < qh->maxpacket) {
1057 /* always terminate on short read; it's
1058 * rarely reported as an error.
1060 } else if (urb->actual_length <
1061 urb->transfer_buffer_length)
1064 case MUSB_EP0_START:
1065 request = (struct usb_ctrlrequest *) urb->setup_packet;
1067 if (!request->wLength) {
1068 musb_dbg(musb, "start no-DATA");
1070 } else if (request->bRequestType & USB_DIR_IN) {
1071 musb_dbg(musb, "start IN-DATA");
1072 musb->ep0_stage = MUSB_EP0_IN;
1076 musb_dbg(musb, "start OUT-DATA");
1077 musb->ep0_stage = MUSB_EP0_OUT;
1082 fifo_count = min_t(size_t, qh->maxpacket,
1083 urb->transfer_buffer_length -
1084 urb->actual_length);
1086 fifo_dest = (u8 *) (urb->transfer_buffer
1087 + urb->actual_length);
1088 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1090 (fifo_count == 1) ? "" : "s",
1092 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1094 urb->actual_length += fifo_count;
1099 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1107 * Handle default endpoint interrupt as host. Only called in IRQ time
1108 * from musb_interrupt().
1110 * called with controller irqlocked
1112 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1117 void __iomem *mbase = musb->mregs;
1118 struct musb_hw_ep *hw_ep = musb->control_ep;
1119 void __iomem *epio = hw_ep->regs;
1120 struct musb_qh *qh = hw_ep->in_qh;
1121 bool complete = false;
1122 irqreturn_t retval = IRQ_NONE;
1124 /* ep0 only has one queue, "in" */
1127 musb_ep_select(mbase, 0);
1128 csr = musb_readw(epio, MUSB_CSR0);
1129 len = (csr & MUSB_CSR0_RXPKTRDY)
1130 ? musb_readb(epio, MUSB_COUNT0)
1133 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1134 csr, qh, len, urb, musb->ep0_stage);
1136 /* if we just did status stage, we are done */
1137 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1138 retval = IRQ_HANDLED;
1142 /* prepare status */
1143 if (csr & MUSB_CSR0_H_RXSTALL) {
1144 musb_dbg(musb, "STALLING ENDPOINT");
1147 } else if (csr & MUSB_CSR0_H_ERROR) {
1148 musb_dbg(musb, "no response, csr0 %04x", csr);
1151 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1152 musb_dbg(musb, "control NAK timeout");
1154 /* NOTE: this code path would be a good place to PAUSE a
1155 * control transfer, if another one is queued, so that
1156 * ep0 is more likely to stay busy. That's already done
1157 * for bulk RX transfers.
1159 * if (qh->ring.next != &musb->control), then
1160 * we have a candidate... NAKing is *NOT* an error
1162 musb_writew(epio, MUSB_CSR0, 0);
1163 retval = IRQ_HANDLED;
1167 musb_dbg(musb, "aborting");
1168 retval = IRQ_HANDLED;
1170 urb->status = status;
1173 /* use the proper sequence to abort the transfer */
1174 if (csr & MUSB_CSR0_H_REQPKT) {
1175 csr &= ~MUSB_CSR0_H_REQPKT;
1176 musb_writew(epio, MUSB_CSR0, csr);
1177 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1178 musb_writew(epio, MUSB_CSR0, csr);
1180 musb_h_ep0_flush_fifo(hw_ep);
1183 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1186 musb_writew(epio, MUSB_CSR0, 0);
1189 if (unlikely(!urb)) {
1190 /* stop endpoint since we have no place for its data, this
1191 * SHOULD NEVER HAPPEN! */
1192 ERR("no URB for end 0\n");
1194 musb_h_ep0_flush_fifo(hw_ep);
1199 /* call common logic and prepare response */
1200 if (musb_h_ep0_continue(musb, len, urb)) {
1201 /* more packets required */
1202 csr = (MUSB_EP0_IN == musb->ep0_stage)
1203 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1205 /* data transfer complete; perform status phase */
1206 if (usb_pipeout(urb->pipe)
1207 || !urb->transfer_buffer_length)
1208 csr = MUSB_CSR0_H_STATUSPKT
1209 | MUSB_CSR0_H_REQPKT;
1211 csr = MUSB_CSR0_H_STATUSPKT
1212 | MUSB_CSR0_TXPKTRDY;
1214 /* disable ping token in status phase */
1215 csr |= MUSB_CSR0_H_DIS_PING;
1217 /* flag status stage */
1218 musb->ep0_stage = MUSB_EP0_STATUS;
1220 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1223 musb_writew(epio, MUSB_CSR0, csr);
1224 retval = IRQ_HANDLED;
1226 musb->ep0_stage = MUSB_EP0_IDLE;
1228 /* call completion handler if done */
1230 musb_advance_schedule(musb, urb, hw_ep, 1);
1236 #ifdef CONFIG_USB_INVENTRA_DMA
1238 /* Host side TX (OUT) using Mentor DMA works as follows:
1240 - if queue was empty, Program Endpoint
1241 - ... which starts DMA to fifo in mode 1 or 0
1243 DMA Isr (transfer complete) -> TxAvail()
1244 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1245 only in musb_cleanup_urb)
1246 - TxPktRdy has to be set in mode 0 or for
1247 short packets in mode 1.
1252 /* Service a Tx-Available or dma completion irq for the endpoint */
1253 void musb_host_tx(struct musb *musb, u8 epnum)
1260 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1261 void __iomem *epio = hw_ep->regs;
1262 struct musb_qh *qh = hw_ep->out_qh;
1263 struct urb *urb = next_urb(qh);
1265 void __iomem *mbase = musb->mregs;
1266 struct dma_channel *dma;
1267 bool transfer_pending = false;
1269 musb_ep_select(mbase, epnum);
1270 tx_csr = musb_readw(epio, MUSB_TXCSR);
1272 /* with CPPI, DMA sometimes triggers "extra" irqs */
1274 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1279 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1280 trace_musb_urb_tx(musb, urb);
1281 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1282 dma ? ", dma" : "");
1284 /* check for errors */
1285 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1286 /* dma was disabled, fifo flushed */
1287 musb_dbg(musb, "TX end %d stall", epnum);
1289 /* stall; record URB status */
1292 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1293 /* (NON-ISO) dma was disabled, fifo flushed */
1294 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1296 status = -ETIMEDOUT;
1298 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1299 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1300 && !list_is_singular(&musb->out_bulk)) {
1301 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1302 musb_bulk_nak_timeout(musb, hw_ep, 0);
1304 musb_dbg(musb, "TX ep%d device not responding", epnum);
1305 /* NOTE: this code path would be a good place to PAUSE a
1306 * transfer, if there's some other (nonperiodic) tx urb
1307 * that could use this fifo. (dma complicates it...)
1308 * That's already done for bulk RX transfers.
1310 * if (bulk && qh->ring.next != &musb->out_bulk), then
1311 * we have a candidate... NAKing is *NOT* an error
1313 musb_ep_select(mbase, epnum);
1314 musb_writew(epio, MUSB_TXCSR,
1315 MUSB_TXCSR_H_WZC_BITS
1316 | MUSB_TXCSR_TXPKTRDY);
1323 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1324 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1325 musb->dma_controller->channel_abort(dma);
1328 /* do the proper sequence to abort the transfer in the
1329 * usb core; the dma engine should already be stopped.
1331 musb_h_tx_flush_fifo(hw_ep);
1332 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1333 | MUSB_TXCSR_DMAENAB
1334 | MUSB_TXCSR_H_ERROR
1335 | MUSB_TXCSR_H_RXSTALL
1336 | MUSB_TXCSR_H_NAKTIMEOUT
1339 musb_ep_select(mbase, epnum);
1340 musb_writew(epio, MUSB_TXCSR, tx_csr);
1341 /* REVISIT may need to clear FLUSHFIFO ... */
1342 musb_writew(epio, MUSB_TXCSR, tx_csr);
1343 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1348 /* second cppi case */
1349 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1350 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1354 if (is_dma_capable() && dma && !status) {
1356 * DMA has completed. But if we're using DMA mode 1 (multi
1357 * packet DMA), we need a terminal TXPKTRDY interrupt before
1358 * we can consider this transfer completed, lest we trash
1359 * its last packet when writing the next URB's data. So we
1360 * switch back to mode 0 to get that interrupt; we'll come
1361 * back here once it happens.
1363 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1365 * We shouldn't clear DMAMODE with DMAENAB set; so
1366 * clear them in a safe order. That should be OK
1367 * once TXPKTRDY has been set (and I've never seen
1368 * it being 0 at this moment -- DMA interrupt latency
1369 * is significant) but if it hasn't been then we have
1370 * no choice but to stop being polite and ignore the
1371 * programmer's guide... :-)
1373 * Note that we must write TXCSR with TXPKTRDY cleared
1374 * in order not to re-trigger the packet send (this bit
1375 * can't be cleared by CPU), and there's another caveat:
1376 * TXPKTRDY may be set shortly and then cleared in the
1377 * double-buffered FIFO mode, so we do an extra TXCSR
1378 * read for debouncing...
1380 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1381 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1382 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1383 MUSB_TXCSR_TXPKTRDY);
1384 musb_writew(epio, MUSB_TXCSR,
1385 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1387 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1388 MUSB_TXCSR_TXPKTRDY);
1389 musb_writew(epio, MUSB_TXCSR,
1390 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1393 * There is no guarantee that we'll get an interrupt
1394 * after clearing DMAMODE as we might have done this
1395 * too late (after TXPKTRDY was cleared by controller).
1396 * Re-read TXCSR as we have spoiled its previous value.
1398 tx_csr = musb_readw(epio, MUSB_TXCSR);
1402 * We may get here from a DMA completion or TXPKTRDY interrupt.
1403 * In any case, we must check the FIFO status here and bail out
1404 * only if the FIFO still has data -- that should prevent the
1405 * "missed" TXPKTRDY interrupts and deal with double-buffered
1408 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1410 "DMA complete but FIFO not empty, CSR %04x",
1416 if (!status || dma || usb_pipeisoc(pipe)) {
1418 length = dma->actual_len;
1420 length = qh->segsize;
1421 qh->offset += length;
1423 if (usb_pipeisoc(pipe)) {
1424 struct usb_iso_packet_descriptor *d;
1426 d = urb->iso_frame_desc + qh->iso_idx;
1427 d->actual_length = length;
1429 if (++qh->iso_idx >= urb->number_of_packets) {
1436 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1439 /* see if we need to send more data, or ZLP */
1440 if (qh->segsize < qh->maxpacket)
1442 else if (qh->offset == urb->transfer_buffer_length
1443 && !(urb->transfer_flags
1447 offset = qh->offset;
1448 length = urb->transfer_buffer_length - offset;
1449 transfer_pending = true;
1454 /* urb->status != -EINPROGRESS means request has been faulted,
1455 * so we must abort this transfer after cleanup
1457 if (urb->status != -EINPROGRESS) {
1460 status = urb->status;
1465 urb->status = status;
1466 urb->actual_length = qh->offset;
1467 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1469 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1470 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1472 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1473 musb_h_tx_dma_start(hw_ep);
1476 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1477 musb_dbg(musb, "not complete, but DMA enabled?");
1482 * PIO: start next packet in this URB.
1484 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1485 * (and presumably, FIFO is not half-full) we should write *two*
1486 * packets before updating TXCSR; other docs disagree...
1488 if (length > qh->maxpacket)
1489 length = qh->maxpacket;
1490 /* Unmap the buffer so that CPU can use it */
1491 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1494 * We need to map sg if the transfer_buffer is
1497 if (!urb->transfer_buffer) {
1498 /* sg_miter_start is already done in musb_ep_program */
1499 if (!sg_miter_next(&qh->sg_miter)) {
1500 dev_err(musb->controller, "error: sg list empty\n");
1501 sg_miter_stop(&qh->sg_miter);
1505 length = min_t(u32, length, qh->sg_miter.length);
1506 musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1507 qh->sg_miter.consumed = length;
1508 sg_miter_stop(&qh->sg_miter);
1510 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1513 qh->segsize = length;
1515 musb_ep_select(mbase, epnum);
1516 musb_writew(epio, MUSB_TXCSR,
1517 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1520 #ifdef CONFIG_USB_TI_CPPI41_DMA
1521 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1522 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1523 struct musb_hw_ep *hw_ep,
1528 struct dma_channel *channel = hw_ep->rx_channel;
1529 void __iomem *epio = hw_ep->regs;
1534 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1535 (u32)urb->transfer_dma;
1537 length = urb->iso_frame_desc[qh->iso_idx].length;
1539 val = musb_readw(epio, MUSB_RXCSR);
1540 val |= MUSB_RXCSR_DMAENAB;
1541 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1543 res = dma->channel_program(channel, qh->maxpacket, 0,
1549 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1550 struct musb_hw_ep *hw_ep,
1559 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1560 defined(CONFIG_USB_TI_CPPI41_DMA)
1561 /* Host side RX (IN) using Mentor DMA works as follows:
1563 - if queue was empty, ProgramEndpoint
1564 - first IN token is sent out (by setting ReqPkt)
1565 LinuxIsr -> RxReady()
1566 /\ => first packet is received
1567 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1568 | -> DMA Isr (transfer complete) -> RxReady()
1569 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1570 | - if urb not complete, send next IN token (ReqPkt)
1571 | | else complete urb.
1573 ---------------------------
1575 * Nuances of mode 1:
1576 * For short packets, no ack (+RxPktRdy) is sent automatically
1577 * (even if AutoClear is ON)
1578 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1579 * automatically => major problem, as collecting the next packet becomes
1580 * difficult. Hence mode 1 is not used.
1583 * All we care about at this driver level is that
1584 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1585 * (b) termination conditions are: short RX, or buffer full;
1586 * (c) fault modes include
1587 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1588 * (and that endpoint's dma queue stops immediately)
1589 * - overflow (full, PLUS more bytes in the terminal packet)
1591 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1592 * thus be a great candidate for using mode 1 ... for all but the
1593 * last packet of one URB's transfer.
1595 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1596 struct musb_hw_ep *hw_ep,
1601 struct dma_channel *channel = hw_ep->rx_channel;
1602 void __iomem *epio = hw_ep->regs;
1609 if (usb_pipeisoc(pipe)) {
1610 struct usb_iso_packet_descriptor *d;
1612 d = urb->iso_frame_desc + qh->iso_idx;
1613 d->actual_length = len;
1615 /* even if there was an error, we did the dma
1616 * for iso_frame_desc->length
1618 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1621 if (++qh->iso_idx >= urb->number_of_packets) {
1624 /* REVISIT: Why ignore return value here? */
1625 if (musb_dma_cppi41(hw_ep->musb))
1626 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1632 /* done if urb buffer is full or short packet is recd */
1633 done = (urb->actual_length + len >=
1634 urb->transfer_buffer_length
1635 || channel->actual_len < qh->maxpacket
1636 || channel->rx_packet_done);
1639 /* send IN token for next packet, without AUTOREQ */
1641 val = musb_readw(epio, MUSB_RXCSR);
1642 val |= MUSB_RXCSR_H_REQPKT;
1643 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1649 /* Disadvantage of using mode 1:
1650 * It's basically usable only for mass storage class; essentially all
1651 * other protocols also terminate transfers on short packets.
1654 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1655 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1656 * to use the extra IN token to grab the last packet using mode 0, then
1657 * the problem is that you cannot be sure when the device will send the
1658 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1659 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1660 * transfer, while sometimes it is recd just a little late so that if you
1661 * try to configure for mode 0 soon after the mode 1 transfer is
1662 * completed, you will find rxcount 0. Okay, so you might think why not
1663 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1665 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1666 struct musb_hw_ep *hw_ep,
1672 struct musb *musb = hw_ep->musb;
1673 void __iomem *epio = hw_ep->regs;
1674 struct dma_channel *channel = hw_ep->rx_channel;
1676 int length, pipe, done;
1679 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1682 if (usb_pipeisoc(pipe)) {
1684 struct usb_iso_packet_descriptor *d;
1686 d = urb->iso_frame_desc + qh->iso_idx;
1692 if (rx_count > d->length) {
1693 if (d_status == 0) {
1694 d_status = -EOVERFLOW;
1697 musb_dbg(musb, "** OVERFLOW %d into %d",
1698 rx_count, d->length);
1703 d->status = d_status;
1704 buf = urb->transfer_dma + d->offset;
1707 buf = urb->transfer_dma + urb->actual_length;
1710 channel->desired_mode = 0;
1712 /* because of the issue below, mode 1 will
1713 * only rarely behave with correct semantics.
1715 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1716 && (urb->transfer_buffer_length - urb->actual_length)
1718 channel->desired_mode = 1;
1719 if (rx_count < hw_ep->max_packet_sz_rx) {
1721 channel->desired_mode = 0;
1723 length = urb->transfer_buffer_length;
1727 /* See comments above on disadvantages of using mode 1 */
1728 val = musb_readw(epio, MUSB_RXCSR);
1729 val &= ~MUSB_RXCSR_H_REQPKT;
1731 if (channel->desired_mode == 0)
1732 val &= ~MUSB_RXCSR_H_AUTOREQ;
1734 val |= MUSB_RXCSR_H_AUTOREQ;
1735 val |= MUSB_RXCSR_DMAENAB;
1737 /* autoclear shouldn't be set in high bandwidth */
1738 if (qh->hb_mult == 1)
1739 val |= MUSB_RXCSR_AUTOCLEAR;
1741 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1743 /* REVISIT if when actual_length != 0,
1744 * transfer_buffer_length needs to be
1747 done = dma->channel_program(channel, qh->maxpacket,
1748 channel->desired_mode,
1752 dma->channel_release(channel);
1753 hw_ep->rx_channel = NULL;
1755 val = musb_readw(epio, MUSB_RXCSR);
1756 val &= ~(MUSB_RXCSR_DMAENAB
1757 | MUSB_RXCSR_H_AUTOREQ
1758 | MUSB_RXCSR_AUTOCLEAR);
1759 musb_writew(epio, MUSB_RXCSR, val);
1765 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1766 struct musb_hw_ep *hw_ep,
1774 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1775 struct musb_hw_ep *hw_ep,
1786 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1787 * and high-bandwidth IN transfer cases.
1789 void musb_host_rx(struct musb *musb, u8 epnum)
1792 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1793 struct dma_controller *c = musb->dma_controller;
1794 void __iomem *epio = hw_ep->regs;
1795 struct musb_qh *qh = hw_ep->in_qh;
1797 void __iomem *mbase = musb->mregs;
1800 bool iso_err = false;
1803 struct dma_channel *dma;
1804 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1806 musb_ep_select(mbase, epnum);
1809 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1813 rx_csr = musb_readw(epio, MUSB_RXCSR);
1816 if (unlikely(!urb)) {
1817 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1818 * usbtest #11 (unlinks) triggers it regularly, sometimes
1819 * with fifo full. (Only with DMA??)
1821 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1822 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1823 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1829 trace_musb_urb_rx(musb, urb);
1831 /* check for errors, concurrent stall & unlink is not really
1833 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1834 musb_dbg(musb, "RX end %d STALL", epnum);
1836 /* stall; record URB status */
1839 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1840 musb_dbg(musb, "end %d RX proto error", epnum);
1843 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1845 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1846 musb_writew(epio, MUSB_RXCSR, rx_csr);
1848 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1850 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1851 musb_dbg(musb, "RX end %d NAK timeout", epnum);
1853 /* NOTE: NAKing is *NOT* an error, so we want to
1854 * continue. Except ... if there's a request for
1855 * another QH, use that instead of starving it.
1857 * Devices like Ethernet and serial adapters keep
1858 * reads posted at all times, which will starve
1859 * other devices without this logic.
1861 if (usb_pipebulk(urb->pipe)
1863 && !list_is_singular(&musb->in_bulk)) {
1864 musb_bulk_nak_timeout(musb, hw_ep, 1);
1867 musb_ep_select(mbase, epnum);
1868 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1869 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1870 musb_writew(epio, MUSB_RXCSR, rx_csr);
1874 musb_dbg(musb, "RX end %d ISO data error", epnum);
1875 /* packet error reported later */
1878 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1879 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1884 /* faults abort the transfer */
1886 /* clean up dma and collect transfer count */
1887 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1888 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1889 musb->dma_controller->channel_abort(dma);
1890 xfer_len = dma->actual_len;
1892 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1893 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1898 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1899 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1900 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1904 /* thorough shutdown for now ... given more precise fault handling
1905 * and better queueing support, we might keep a DMA pipeline going
1906 * while processing this irq for earlier completions.
1909 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1910 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1911 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1912 /* REVISIT this happened for a while on some short reads...
1913 * the cleanup still needs investigation... looks bad...
1914 * and also duplicates dma cleanup code above ... plus,
1915 * shouldn't this be the "half full" double buffer case?
1917 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1918 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1919 musb->dma_controller->channel_abort(dma);
1920 xfer_len = dma->actual_len;
1924 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1925 xfer_len, dma ? ", dma" : "");
1926 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1928 musb_ep_select(mbase, epnum);
1929 musb_writew(epio, MUSB_RXCSR,
1930 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1933 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1934 xfer_len = dma->actual_len;
1936 val &= ~(MUSB_RXCSR_DMAENAB
1937 | MUSB_RXCSR_H_AUTOREQ
1938 | MUSB_RXCSR_AUTOCLEAR
1939 | MUSB_RXCSR_RXPKTRDY);
1940 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1942 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1943 musb_dma_cppi41(musb)) {
1944 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1945 musb_dbg(hw_ep->musb,
1946 "ep %d dma %s, rxcsr %04x, rxcount %d",
1947 epnum, done ? "off" : "reset",
1948 musb_readw(epio, MUSB_RXCSR),
1949 musb_readw(epio, MUSB_RXCOUNT));
1954 } else if (urb->status == -EINPROGRESS) {
1955 /* if no errors, be sure a packet is ready for unloading */
1956 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1958 ERR("Rx interrupt with no errors or packet!\n");
1960 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1963 /* do the proper sequence to abort the transfer */
1964 musb_ep_select(mbase, epnum);
1965 val &= ~MUSB_RXCSR_H_REQPKT;
1966 musb_writew(epio, MUSB_RXCSR, val);
1970 /* we are expecting IN packets */
1971 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1972 musb_dma_cppi41(musb)) && dma) {
1973 musb_dbg(hw_ep->musb,
1974 "RX%d count %d, buffer 0x%llx len %d/%d",
1975 epnum, musb_readw(epio, MUSB_RXCOUNT),
1976 (unsigned long long) urb->transfer_dma
1977 + urb->actual_length,
1979 urb->transfer_buffer_length);
1981 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1985 dev_err(musb->controller, "error: rx_dma failed\n");
1989 unsigned int received_len;
1991 /* Unmap the buffer so that CPU can use it */
1992 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1995 * We need to map sg if the transfer_buffer is
1998 if (!urb->transfer_buffer) {
2000 sg_miter_start(&qh->sg_miter, urb->sg, 1,
2005 if (!sg_miter_next(&qh->sg_miter)) {
2006 dev_err(musb->controller, "error: sg list empty\n");
2007 sg_miter_stop(&qh->sg_miter);
2012 urb->transfer_buffer = qh->sg_miter.addr;
2013 received_len = urb->actual_length;
2015 done = musb_host_packet_rx(musb, urb, epnum,
2017 /* Calculate the number of bytes received */
2018 received_len = urb->actual_length -
2020 qh->sg_miter.consumed = received_len;
2021 sg_miter_stop(&qh->sg_miter);
2023 done = musb_host_packet_rx(musb, urb,
2026 musb_dbg(musb, "read %spacket", done ? "last " : "");
2031 urb->actual_length += xfer_len;
2032 qh->offset += xfer_len;
2036 urb->transfer_buffer = NULL;
2039 if (urb->status == -EINPROGRESS)
2040 urb->status = status;
2041 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2045 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
2046 * the software schedule associates multiple such nodes with a given
2047 * host side hardware endpoint + direction; scheduling may activate
2048 * that hardware endpoint.
2050 static int musb_schedule(
2057 int best_end, epnum;
2058 struct musb_hw_ep *hw_ep = NULL;
2059 struct list_head *head = NULL;
2062 struct urb *urb = next_urb(qh);
2064 /* use fixed hardware for control and bulk */
2065 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2066 head = &musb->control;
2067 hw_ep = musb->control_ep;
2071 /* else, periodic transfers get muxed to other endpoints */
2074 * We know this qh hasn't been scheduled, so all we need to do
2075 * is choose which hardware endpoint to put it on ...
2077 * REVISIT what we really want here is a regular schedule tree
2078 * like e.g. OHCI uses.
2083 for (epnum = 1, hw_ep = musb->endpoints + 1;
2084 epnum < musb->nr_endpoints;
2088 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2091 if (hw_ep == musb->bulk_ep)
2095 diff = hw_ep->max_packet_sz_rx;
2097 diff = hw_ep->max_packet_sz_tx;
2098 diff -= (qh->maxpacket * qh->hb_mult);
2100 if (diff >= 0 && best_diff > diff) {
2103 * Mentor controller has a bug in that if we schedule
2104 * a BULK Tx transfer on an endpoint that had earlier
2105 * handled ISOC then the BULK transfer has to start on
2106 * a zero toggle. If the BULK transfer starts on a 1
2107 * toggle then this transfer will fail as the mentor
2108 * controller starts the Bulk transfer on a 0 toggle
2109 * irrespective of the programming of the toggle bits
2110 * in the TXCSR register. Check for this condition
2111 * while allocating the EP for a Tx Bulk transfer. If
2114 hw_ep = musb->endpoints + epnum;
2115 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2116 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2118 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2119 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2126 /* use bulk reserved ep1 if no other ep is free */
2127 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2128 hw_ep = musb->bulk_ep;
2130 head = &musb->in_bulk;
2132 head = &musb->out_bulk;
2134 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2135 * multiplexed. This scheme does not work in high speed to full
2136 * speed scenario as NAK interrupts are not coming from a
2137 * full speed device connected to a high speed device.
2138 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2139 * 4 (8 frame or 8ms) for FS device.
2143 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2145 } else if (best_end < 0) {
2151 hw_ep = musb->endpoints + best_end;
2152 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2155 idle = list_empty(head);
2156 list_add_tail(&qh->ring, head);
2160 qh->hep->hcpriv = qh;
2162 musb_start_urb(musb, is_in, qh);
2166 static int musb_urb_enqueue(
2167 struct usb_hcd *hcd,
2171 unsigned long flags;
2172 struct musb *musb = hcd_to_musb(hcd);
2173 struct usb_host_endpoint *hep = urb->ep;
2175 struct usb_endpoint_descriptor *epd = &hep->desc;
2180 /* host role must be active */
2181 if (!is_host_active(musb) || !musb->is_active)
2184 trace_musb_urb_enq(musb, urb);
2186 spin_lock_irqsave(&musb->lock, flags);
2187 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2188 qh = ret ? NULL : hep->hcpriv;
2191 spin_unlock_irqrestore(&musb->lock, flags);
2193 /* DMA mapping was already done, if needed, and this urb is on
2194 * hep->urb_list now ... so we're done, unless hep wasn't yet
2195 * scheduled onto a live qh.
2197 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2198 * disabled, testing for empty qh->ring and avoiding qh setup costs
2199 * except for the first urb queued after a config change.
2204 /* Allocate and initialize qh, minimizing the work done each time
2205 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2207 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2208 * for bugs in other kernel code to break this driver...
2210 qh = kzalloc(sizeof *qh, mem_flags);
2212 spin_lock_irqsave(&musb->lock, flags);
2213 usb_hcd_unlink_urb_from_ep(hcd, urb);
2214 spin_unlock_irqrestore(&musb->lock, flags);
2220 INIT_LIST_HEAD(&qh->ring);
2223 qh->maxpacket = usb_endpoint_maxp(epd);
2224 qh->type = usb_endpoint_type(epd);
2226 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2227 * Some musb cores don't support high bandwidth ISO transfers; and
2228 * we don't (yet!) support high bandwidth interrupt transfers.
2230 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2231 if (qh->hb_mult > 1) {
2232 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2235 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2236 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2241 qh->maxpacket &= 0x7ff;
2244 qh->epnum = usb_endpoint_num(epd);
2246 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2247 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2249 /* precompute rxtype/txtype/type0 register */
2250 type_reg = (qh->type << 4) | qh->epnum;
2251 switch (urb->dev->speed) {
2255 case USB_SPEED_FULL:
2261 qh->type_reg = type_reg;
2263 /* Precompute RXINTERVAL/TXINTERVAL register */
2265 case USB_ENDPOINT_XFER_INT:
2267 * Full/low speeds use the linear encoding,
2268 * high speed uses the logarithmic encoding.
2270 if (urb->dev->speed <= USB_SPEED_FULL) {
2271 interval = max_t(u8, epd->bInterval, 1);
2275 case USB_ENDPOINT_XFER_ISOC:
2276 /* ISO always uses logarithmic encoding */
2277 interval = min_t(u8, epd->bInterval, 16);
2280 /* REVISIT we actually want to use NAK limits, hinting to the
2281 * transfer scheduling logic to try some other qh, e.g. try
2284 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2286 * The downside of disabling this is that transfer scheduling
2287 * gets VERY unfair for nonperiodic transfers; a misbehaving
2288 * peripheral could make that hurt. That's perfectly normal
2289 * for reads from network or serial adapters ... so we have
2290 * partial NAKlimit support for bulk RX.
2292 * The upside of disabling it is simpler transfer scheduling.
2296 qh->intv_reg = interval;
2298 /* precompute addressing for external hub/tt ports */
2299 if (musb->is_multipoint) {
2300 struct usb_device *parent = urb->dev->parent;
2302 if (parent != hcd->self.root_hub) {
2303 qh->h_addr_reg = (u8) parent->devnum;
2305 /* set up tt info if needed */
2307 qh->h_port_reg = (u8) urb->dev->ttport;
2308 if (urb->dev->tt->hub)
2310 (u8) urb->dev->tt->hub->devnum;
2311 if (urb->dev->tt->multi)
2312 qh->h_addr_reg |= 0x80;
2317 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2318 * until we get real dma queues (with an entry for each urb/buffer),
2319 * we only have work to do in the former case.
2321 spin_lock_irqsave(&musb->lock, flags);
2322 if (hep->hcpriv || !next_urb(qh)) {
2323 /* some concurrent activity submitted another urb to hep...
2324 * odd, rare, error prone, but legal.
2330 ret = musb_schedule(musb, qh,
2331 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2335 /* FIXME set urb->start_frame for iso/intr, it's tested in
2336 * musb_start_urb(), but otherwise only konicawc cares ...
2339 spin_unlock_irqrestore(&musb->lock, flags);
2343 spin_lock_irqsave(&musb->lock, flags);
2344 usb_hcd_unlink_urb_from_ep(hcd, urb);
2345 spin_unlock_irqrestore(&musb->lock, flags);
2353 * abort a transfer that's at the head of a hardware queue.
2354 * called with controller locked, irqs blocked
2355 * that hardware queue advances to the next transfer, unless prevented
2357 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2359 struct musb_hw_ep *ep = qh->hw_ep;
2360 struct musb *musb = ep->musb;
2361 void __iomem *epio = ep->regs;
2362 unsigned hw_end = ep->epnum;
2363 void __iomem *regs = ep->musb->mregs;
2364 int is_in = usb_pipein(urb->pipe);
2367 struct dma_channel *dma = NULL;
2369 musb_ep_select(regs, hw_end);
2371 if (is_dma_capable()) {
2372 dma = is_in ? ep->rx_channel : ep->tx_channel;
2374 status = ep->musb->dma_controller->channel_abort(dma);
2375 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2376 is_in ? 'R' : 'T', ep->epnum,
2378 urb->actual_length += dma->actual_len;
2382 /* turn off DMA requests, discard state, stop polling ... */
2383 if (ep->epnum && is_in) {
2384 /* giveback saves bulk toggle */
2385 csr = musb_h_flush_rxfifo(ep, 0);
2387 /* clear the endpoint's irq status here to avoid bogus irqs */
2388 if (is_dma_capable() && dma)
2389 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2390 } else if (ep->epnum) {
2391 musb_h_tx_flush_fifo(ep);
2392 csr = musb_readw(epio, MUSB_TXCSR);
2393 csr &= ~(MUSB_TXCSR_AUTOSET
2394 | MUSB_TXCSR_DMAENAB
2395 | MUSB_TXCSR_H_RXSTALL
2396 | MUSB_TXCSR_H_NAKTIMEOUT
2397 | MUSB_TXCSR_H_ERROR
2398 | MUSB_TXCSR_TXPKTRDY);
2399 musb_writew(epio, MUSB_TXCSR, csr);
2400 /* REVISIT may need to clear FLUSHFIFO ... */
2401 musb_writew(epio, MUSB_TXCSR, csr);
2402 /* flush cpu writebuffer */
2403 csr = musb_readw(epio, MUSB_TXCSR);
2405 musb_h_ep0_flush_fifo(ep);
2408 musb_advance_schedule(ep->musb, urb, ep, is_in);
2412 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2414 struct musb *musb = hcd_to_musb(hcd);
2416 unsigned long flags;
2417 int is_in = usb_pipein(urb->pipe);
2420 trace_musb_urb_deq(musb, urb);
2422 spin_lock_irqsave(&musb->lock, flags);
2423 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2432 * Any URB not actively programmed into endpoint hardware can be
2433 * immediately given back; that's any URB not at the head of an
2434 * endpoint queue, unless someday we get real DMA queues. And even
2435 * if it's at the head, it might not be known to the hardware...
2437 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2438 * has already been updated. This is a synchronous abort; it'd be
2439 * OK to hold off until after some IRQ, though.
2441 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2444 || urb->urb_list.prev != &qh->hep->urb_list
2445 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2446 int ready = qh->is_ready;
2449 musb_giveback(musb, urb, 0);
2450 qh->is_ready = ready;
2452 /* If nothing else (usually musb_giveback) is using it
2453 * and its URB list has emptied, recycle this qh.
2455 if (ready && list_empty(&qh->hep->urb_list)) {
2456 qh->hep->hcpriv = NULL;
2457 list_del(&qh->ring);
2461 ret = musb_cleanup_urb(urb, qh);
2463 spin_unlock_irqrestore(&musb->lock, flags);
2467 /* disable an endpoint */
2469 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2471 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2472 unsigned long flags;
2473 struct musb *musb = hcd_to_musb(hcd);
2477 spin_lock_irqsave(&musb->lock, flags);
2483 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2485 /* Kick the first URB off the hardware, if needed */
2487 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2490 /* make software (then hardware) stop ASAP */
2492 urb->status = -ESHUTDOWN;
2495 musb_cleanup_urb(urb, qh);
2497 /* Then nuke all the others ... and advance the
2498 * queue on hw_ep (e.g. bulk ring) when we're done.
2500 while (!list_empty(&hep->urb_list)) {
2502 urb->status = -ESHUTDOWN;
2503 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2506 /* Just empty the queue; the hardware is busy with
2507 * other transfers, and since !qh->is_ready nothing
2508 * will activate any of these as it advances.
2510 while (!list_empty(&hep->urb_list))
2511 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2514 list_del(&qh->ring);
2518 spin_unlock_irqrestore(&musb->lock, flags);
2521 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2523 struct musb *musb = hcd_to_musb(hcd);
2525 return musb_readw(musb->mregs, MUSB_FRAME);
2528 static int musb_h_start(struct usb_hcd *hcd)
2530 struct musb *musb = hcd_to_musb(hcd);
2532 /* NOTE: musb_start() is called when the hub driver turns
2533 * on port power, or when (OTG) peripheral starts.
2535 hcd->state = HC_STATE_RUNNING;
2536 musb->port1_status = 0;
2540 static void musb_h_stop(struct usb_hcd *hcd)
2542 musb_stop(hcd_to_musb(hcd));
2543 hcd->state = HC_STATE_HALT;
2546 static int musb_bus_suspend(struct usb_hcd *hcd)
2548 struct musb *musb = hcd_to_musb(hcd);
2552 ret = musb_port_suspend(musb, true);
2556 if (!is_host_active(musb))
2559 switch (musb->xceiv->otg->state) {
2560 case OTG_STATE_A_SUSPEND:
2562 case OTG_STATE_A_WAIT_VRISE:
2563 /* ID could be grounded even if there's no device
2564 * on the other end of the cable. NOTE that the
2565 * A_WAIT_VRISE timers are messy with MUSB...
2567 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2568 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2569 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2575 if (musb->is_active) {
2576 WARNING("trying to suspend as %s while active\n",
2577 usb_otg_state_string(musb->xceiv->otg->state));
2583 static int musb_bus_resume(struct usb_hcd *hcd)
2585 struct musb *musb = hcd_to_musb(hcd);
2588 musb->config->host_port_deassert_reset_at_resume)
2589 musb_port_reset(musb, false);
2594 #ifndef CONFIG_MUSB_PIO_ONLY
2596 #define MUSB_USB_DMA_ALIGN 4
2598 struct musb_temp_buffer {
2600 void *old_xfer_buffer;
2604 static void musb_free_temp_buffer(struct urb *urb)
2606 enum dma_data_direction dir;
2607 struct musb_temp_buffer *temp;
2610 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2613 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2615 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2618 if (dir == DMA_FROM_DEVICE) {
2619 if (usb_pipeisoc(urb->pipe))
2620 length = urb->transfer_buffer_length;
2622 length = urb->actual_length;
2624 memcpy(temp->old_xfer_buffer, temp->data, length);
2626 urb->transfer_buffer = temp->old_xfer_buffer;
2627 kfree(temp->kmalloc_ptr);
2629 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2632 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2634 enum dma_data_direction dir;
2635 struct musb_temp_buffer *temp;
2637 size_t kmalloc_size;
2639 if (urb->num_sgs || urb->sg ||
2640 urb->transfer_buffer_length == 0 ||
2641 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2644 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2646 /* Allocate a buffer with enough padding for alignment */
2647 kmalloc_size = urb->transfer_buffer_length +
2648 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2650 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2654 /* Position our struct temp_buffer such that data is aligned */
2655 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2658 temp->kmalloc_ptr = kmalloc_ptr;
2659 temp->old_xfer_buffer = urb->transfer_buffer;
2660 if (dir == DMA_TO_DEVICE)
2661 memcpy(temp->data, urb->transfer_buffer,
2662 urb->transfer_buffer_length);
2663 urb->transfer_buffer = temp->data;
2665 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2670 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2673 struct musb *musb = hcd_to_musb(hcd);
2677 * The DMA engine in RTL1.8 and above cannot handle
2678 * DMA addresses that are not aligned to a 4 byte boundary.
2679 * For such engine implemented (un)map_urb_for_dma hooks.
2680 * Do not use these hooks for RTL<1.8
2682 if (musb->hwvers < MUSB_HWVERS_1800)
2683 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2685 ret = musb_alloc_temp_buffer(urb, mem_flags);
2689 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2691 musb_free_temp_buffer(urb);
2696 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2698 struct musb *musb = hcd_to_musb(hcd);
2700 usb_hcd_unmap_urb_for_dma(hcd, urb);
2702 /* Do not use this hook for RTL<1.8 (see description above) */
2703 if (musb->hwvers < MUSB_HWVERS_1800)
2706 musb_free_temp_buffer(urb);
2708 #endif /* !CONFIG_MUSB_PIO_ONLY */
2710 static const struct hc_driver musb_hc_driver = {
2711 .description = "musb-hcd",
2712 .product_desc = "MUSB HDRC host driver",
2713 .hcd_priv_size = sizeof(struct musb *),
2714 .flags = HCD_USB2 | HCD_MEMORY,
2716 /* not using irq handler or reset hooks from usbcore, since
2717 * those must be shared with peripheral code for OTG configs
2720 .start = musb_h_start,
2721 .stop = musb_h_stop,
2723 .get_frame_number = musb_h_get_frame_number,
2725 .urb_enqueue = musb_urb_enqueue,
2726 .urb_dequeue = musb_urb_dequeue,
2727 .endpoint_disable = musb_h_disable,
2729 #ifndef CONFIG_MUSB_PIO_ONLY
2730 .map_urb_for_dma = musb_map_urb_for_dma,
2731 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2734 .hub_status_data = musb_hub_status_data,
2735 .hub_control = musb_hub_control,
2736 .bus_suspend = musb_bus_suspend,
2737 .bus_resume = musb_bus_resume,
2738 /* .start_port_reset = NULL, */
2739 /* .hub_irq_enable = NULL, */
2742 int musb_host_alloc(struct musb *musb)
2744 struct device *dev = musb->controller;
2746 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2747 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2751 *musb->hcd->hcd_priv = (unsigned long) musb;
2752 musb->hcd->self.uses_pio_for_control = 1;
2753 musb->hcd->uses_new_polling = 1;
2754 musb->hcd->has_tt = 1;
2759 void musb_host_cleanup(struct musb *musb)
2761 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2763 usb_remove_hcd(musb->hcd);
2766 void musb_host_free(struct musb *musb)
2768 usb_put_hcd(musb->hcd);
2771 int musb_host_setup(struct musb *musb, int power_budget)
2774 struct usb_hcd *hcd = musb->hcd;
2776 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2777 MUSB_HST_MODE(musb);
2778 musb->xceiv->otg->default_a = 1;
2779 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2781 otg_set_host(musb->xceiv->otg, &hcd->self);
2782 hcd->self.otg_port = 1;
2783 musb->xceiv->otg->host = &hcd->self;
2784 hcd->power_budget = 2 * (power_budget ? : 250);
2786 ret = usb_add_hcd(hcd, 0, 0);
2790 device_wakeup_enable(hcd->self.controller);
2794 void musb_host_resume_root_hub(struct musb *musb)
2796 usb_hcd_resume_root_hub(musb->hcd);
2799 void musb_host_poke_root_hub(struct musb *musb)
2801 MUSB_HST_MODE(musb);
2802 if (musb->hcd->status_urb)
2803 usb_hcd_poll_rh_status(musb->hcd);
2805 usb_hcd_resume_root_hub(musb->hcd);