2 * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
4 * Copyright (C) 2006 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/usb.h>
15 #include <linux/platform_device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/slab.h>
18 #include <linux/dmaengine.h>
20 #include "musb_core.h"
23 #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
25 #define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
27 struct tusb_dma_data {
29 struct dma_chan *chan;
32 struct tusb_omap_dma_ch {
35 unsigned long phys_offset;
38 struct musb_hw_ep *hw_ep;
40 struct tusb_dma_data *dma_data;
42 struct tusb_omap_dma *tusb_dma;
48 u16 transfer_packet_sz;
53 struct tusb_omap_dma {
54 struct dma_controller controller;
57 struct tusb_dma_data dma_pool[MAX_DMAREQ];
58 unsigned multichannel:1;
62 * Allocate dmareq0 to the current channel unless it's already taken
64 static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
66 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
69 dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
70 chdat->epnum, reg & 0xf);
75 reg = (1 << 4) | chdat->epnum;
79 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
84 static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
86 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
88 if ((reg & 0xf) != chdat->epnum) {
89 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
90 chdat->epnum, reg & 0xf);
93 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
97 * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
100 static void tusb_omap_dma_cb(void *data)
102 struct dma_channel *channel = (struct dma_channel *)data;
103 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
104 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
105 struct musb *musb = chdat->musb;
106 struct device *dev = musb->controller;
107 struct musb_hw_ep *hw_ep = chdat->hw_ep;
108 void __iomem *ep_conf = hw_ep->conf;
109 void __iomem *mbase = musb->mregs;
110 unsigned long remaining, flags, pio;
112 spin_lock_irqsave(&musb->lock, flags);
114 dev_dbg(musb->controller, "ep%i %s dma callback\n",
115 chdat->epnum, chdat->tx ? "tx" : "rx");
118 remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
120 remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
122 remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
124 /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
125 if (unlikely(remaining > chdat->transfer_len)) {
126 dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
127 chdat->tx ? "tx" : "rx", remaining);
131 channel->actual_len = chdat->transfer_len - remaining;
132 pio = chdat->len - channel->actual_len;
134 dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
136 /* Transfer remaining 1 - 31 bytes */
137 if (pio > 0 && pio < 32) {
140 dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
141 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
143 dma_unmap_single(dev, chdat->dma_addr,
146 musb_write_fifo(hw_ep, pio, buf);
148 dma_unmap_single(dev, chdat->dma_addr,
151 musb_read_fifo(hw_ep, pio, buf);
153 channel->actual_len += pio;
156 if (!tusb_dma->multichannel)
157 tusb_omap_free_shared_dmareq(chdat);
159 channel->status = MUSB_DMA_STATUS_FREE;
161 musb_dma_completion(musb, chdat->epnum, chdat->tx);
163 /* We must terminate short tx transfers manually by setting TXPKTRDY.
164 * REVISIT: This same problem may occur with other MUSB dma as well.
165 * Easy to test with g_ether by pinging the MUSB board with ping -s54.
167 if ((chdat->transfer_len < chdat->packet_sz)
168 || (chdat->transfer_len % chdat->packet_sz != 0)) {
172 dev_dbg(musb->controller, "terminating short tx packet\n");
173 musb_ep_select(mbase, chdat->epnum);
174 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
175 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
176 | MUSB_TXCSR_P_WZC_BITS;
177 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
181 spin_unlock_irqrestore(&musb->lock, flags);
184 static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
185 u8 rndis_mode, dma_addr_t dma_addr, u32 len)
187 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
188 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
189 struct musb *musb = chdat->musb;
190 struct device *dev = musb->controller;
191 struct musb_hw_ep *hw_ep = chdat->hw_ep;
192 void __iomem *mbase = musb->mregs;
193 void __iomem *ep_conf = hw_ep->conf;
194 dma_addr_t fifo_addr = hw_ep->fifo_sync;
198 struct tusb_dma_data *dma_data;
199 struct dma_async_tx_descriptor *dma_desc;
200 struct dma_slave_config dma_cfg;
201 enum dma_transfer_direction dma_dir;
205 if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
209 * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
210 * register which will cause missed DMA interrupt. We could try to
211 * use a timer for the callback, but it is unsafe as the XFR_SIZE
212 * register is corrupt, and we won't know if the DMA worked.
218 * Because of HW issue #10, it seems like mixing sync DMA and async
219 * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
220 * using the channel for DMA.
223 dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
225 dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
227 dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
229 dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
230 chdat->tx ? "tx" : "rx", dma_remaining);
234 chdat->transfer_len = len & ~0x1f;
237 chdat->transfer_packet_sz = chdat->transfer_len;
239 chdat->transfer_packet_sz = packet_sz;
241 dma_data = chdat->dma_data;
242 if (!tusb_dma->multichannel) {
243 if (tusb_omap_use_shared_dmareq(chdat) != 0) {
244 dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
247 if (dma_data->dmareq < 0) {
248 /* REVISIT: This should get blocked earlier, happens
249 * with MSC ErrorRecoveryTest
256 chdat->packet_sz = packet_sz;
258 channel->actual_len = 0;
259 chdat->dma_addr = dma_addr;
260 channel->status = MUSB_DMA_STATUS_BUSY;
262 /* Since we're recycling dma areas, we need to clean or invalidate */
264 dma_dir = DMA_MEM_TO_DEV;
265 dma_map_single(dev, phys_to_virt(dma_addr), len,
268 dma_dir = DMA_DEV_TO_MEM;
269 dma_map_single(dev, phys_to_virt(dma_addr), len,
273 memset(&dma_cfg, 0, sizeof(dma_cfg));
275 /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
276 if ((dma_addr & 0x3) == 0) {
277 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
278 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
281 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
282 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
285 fifo_addr = hw_ep->fifo_async;
288 dev_dbg(musb->controller,
289 "ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
290 chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
291 chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
293 dma_cfg.src_addr = fifo_addr;
294 dma_cfg.dst_addr = fifo_addr;
295 dma_cfg.src_port_window_size = port_window;
296 dma_cfg.src_maxburst = port_window;
297 dma_cfg.dst_port_window_size = port_window;
298 dma_cfg.dst_maxburst = port_window;
300 ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
302 dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
306 dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
307 chdat->transfer_len, dma_dir,
308 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
310 dev_err(musb->controller, "DMA prep_slave_single failed\n");
314 dma_desc->callback = tusb_omap_dma_cb;
315 dma_desc->callback_param = channel;
316 dmaengine_submit(dma_desc);
318 dev_dbg(musb->controller,
319 "ep%i %s using %i-bit %s dma from %pad to %pad\n",
320 chdat->epnum, chdat->tx ? "tx" : "rx",
321 dma_cfg.src_addr_width * 8,
322 ((dma_addr & 0x3) == 0) ? "sync" : "async",
323 (dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
324 (dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
327 * Prepare MUSB for DMA transfer
329 musb_ep_select(mbase, chdat->epnum);
331 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
332 csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
333 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
334 csr &= ~MUSB_TXCSR_P_UNDERRUN;
335 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
337 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
338 csr |= MUSB_RXCSR_DMAENAB;
339 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
340 musb_writew(hw_ep->regs, MUSB_RXCSR,
341 csr | MUSB_RXCSR_P_WZC_BITS);
344 /* Start DMA transfer */
345 dma_async_issue_pending(dma_data->chan);
348 /* Send transfer_packet_sz packets at a time */
349 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
351 psize |= chdat->transfer_packet_sz;
352 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
354 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
355 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
357 /* Receive transfer_packet_sz packets at a time */
358 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
359 psize &= ~(0x7ff << 16);
360 psize |= (chdat->transfer_packet_sz << 16);
361 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
363 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
364 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
370 static int tusb_omap_dma_abort(struct dma_channel *channel)
372 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
375 dmaengine_terminate_all(chdat->dma_data->chan);
377 channel->status = MUSB_DMA_STATUS_FREE;
382 static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
384 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
385 int i, dmareq_nr = -1;
387 for (i = 0; i < MAX_DMAREQ; i++) {
388 int cur = (reg & (0xf << (i * 5))) >> (i * 5);
398 reg |= (chdat->epnum << (dmareq_nr * 5));
400 reg |= ((1 << 4) << (dmareq_nr * 5));
401 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
403 chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
408 static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
412 if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
415 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
416 reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
417 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
419 chdat->dma_data = NULL;
422 static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
424 static struct dma_channel *
425 tusb_omap_dma_allocate(struct dma_controller *c,
426 struct musb_hw_ep *hw_ep,
430 struct tusb_omap_dma *tusb_dma;
432 struct dma_channel *channel = NULL;
433 struct tusb_omap_dma_ch *chdat = NULL;
434 struct tusb_dma_data *dma_data = NULL;
436 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
437 musb = tusb_dma->controller.musb;
439 /* REVISIT: Why does dmareq5 not work? */
440 if (hw_ep->epnum == 0) {
441 dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
445 for (i = 0; i < MAX_DMAREQ; i++) {
446 struct dma_channel *ch = dma_channel_pool[i];
447 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
448 ch->status = MUSB_DMA_STATUS_FREE;
450 chdat = ch->private_data;
458 chdat->musb = tusb_dma->controller.musb;
459 chdat->tbase = tusb_dma->tbase;
460 chdat->hw_ep = hw_ep;
461 chdat->epnum = hw_ep->epnum;
462 chdat->completed_len = 0;
463 chdat->tusb_dma = tusb_dma;
469 channel->max_len = 0x7fffffff;
470 channel->desired_mode = 0;
471 channel->actual_len = 0;
473 if (!chdat->dma_data) {
474 if (tusb_dma->multichannel) {
475 ret = tusb_omap_dma_allocate_dmareq(chdat);
479 chdat->dma_data = &tusb_dma->dma_pool[0];
483 dma_data = chdat->dma_data;
485 dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
487 chdat->tx ? "tx" : "rx",
488 tusb_dma->multichannel ? "shared" : "dedicated",
494 tusb_omap_dma_free_dmareq(chdat);
496 dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
497 channel->status = MUSB_DMA_STATUS_UNKNOWN;
502 static void tusb_omap_dma_release(struct dma_channel *channel)
504 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
505 struct musb *musb = chdat->musb;
507 dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
509 channel->status = MUSB_DMA_STATUS_UNKNOWN;
511 dmaengine_terminate_sync(chdat->dma_data->chan);
512 tusb_omap_dma_free_dmareq(chdat);
517 void tusb_dma_controller_destroy(struct dma_controller *c)
519 struct tusb_omap_dma *tusb_dma;
522 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
523 for (i = 0; i < MAX_DMAREQ; i++) {
524 struct dma_channel *ch = dma_channel_pool[i];
526 kfree(ch->private_data);
530 /* Free up the DMA channels */
531 if (tusb_dma && tusb_dma->dma_pool[i].chan)
532 dma_release_channel(tusb_dma->dma_pool[i].chan);
537 EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
539 static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
541 struct musb *musb = tusb_dma->controller.musb;
545 for (i = 0; i < MAX_DMAREQ; i++) {
546 struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
549 * Request DMA channels:
550 * - one channel in case of non multichannel mode
551 * - MAX_DMAREQ number of channels in multichannel mode
553 if (i == 0 || tusb_dma->multichannel) {
556 sprintf(ch_name, "dmareq%d", i);
557 dma_data->chan = dma_request_chan(musb->controller,
559 if (IS_ERR(dma_data->chan)) {
560 dev_err(musb->controller,
561 "Failed to request %s\n", ch_name);
562 ret = PTR_ERR(dma_data->chan);
566 dma_data->dmareq = i;
568 dma_data->dmareq = -1;
575 for (; i >= 0; i--) {
576 struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
578 if (dma_data->dmareq >= 0)
579 dma_release_channel(dma_data->chan);
585 struct dma_controller *
586 tusb_dma_controller_create(struct musb *musb, void __iomem *base)
588 void __iomem *tbase = musb->ctrl_base;
589 struct tusb_omap_dma *tusb_dma;
592 /* REVISIT: Get dmareq lines used from board-*.c */
594 musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
595 musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
597 musb_writel(tbase, TUSB_DMA_REQ_CONF,
598 TUSB_DMA_REQ_CONF_BURST_SIZE(2)
599 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
600 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
602 tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
606 tusb_dma->controller.musb = musb;
607 tusb_dma->tbase = musb->ctrl_base;
609 tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
610 tusb_dma->controller.channel_release = tusb_omap_dma_release;
611 tusb_dma->controller.channel_program = tusb_omap_dma_program;
612 tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
614 if (musb->tusb_revision >= TUSB_REV_30)
615 tusb_dma->multichannel = 1;
617 for (i = 0; i < MAX_DMAREQ; i++) {
618 struct dma_channel *ch;
619 struct tusb_omap_dma_ch *chdat;
621 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
625 dma_channel_pool[i] = ch;
627 chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
631 ch->status = MUSB_DMA_STATUS_UNKNOWN;
632 ch->private_data = chdat;
635 if (tusb_omap_allocate_dma_pool(tusb_dma))
638 return &tusb_dma->controller;
641 musb_dma_controller_destroy(&tusb_dma->controller);
645 EXPORT_SYMBOL_GPL(tusb_dma_controller_create);