1 // SPDX-License-Identifier: GPL-2.0-only
3 * Special handling for DW DMA core
5 * Copyright (c) 2009, 2014 Intel Corporation.
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/irqreturn.h>
12 #include <linux/jiffies.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/platform_data/dma-dw.h>
16 #include <linux/spi/spi.h>
17 #include <linux/types.h>
21 #define DW_SPI_RX_BUSY 0
22 #define DW_SPI_RX_BURST_LEVEL 16
23 #define DW_SPI_TX_BUSY 1
24 #define DW_SPI_TX_BURST_LEVEL 16
26 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
28 struct dw_dma_slave *s = param;
30 if (s->dma_dev != chan->device->dev)
37 static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
39 struct dma_slave_caps caps;
40 u32 max_burst, def_burst;
43 def_burst = dws->fifo_len / 2;
45 ret = dma_get_slave_caps(dws->rxchan, &caps);
46 if (!ret && caps.max_burst)
47 max_burst = caps.max_burst;
49 max_burst = DW_SPI_RX_BURST_LEVEL;
51 dws->rxburst = min(max_burst, def_burst);
52 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
54 ret = dma_get_slave_caps(dws->txchan, &caps);
55 if (!ret && caps.max_burst)
56 max_burst = caps.max_burst;
58 max_burst = DW_SPI_TX_BURST_LEVEL;
61 * Having a Rx DMA channel serviced with higher priority than a Tx DMA
62 * channel might not be enough to provide a well balanced DMA-based
63 * SPI transfer interface. There might still be moments when the Tx DMA
64 * channel is occasionally handled faster than the Rx DMA channel.
65 * That in its turn will eventually cause the SPI Rx FIFO overflow if
66 * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
67 * cleared by the Rx DMA channel. In order to fix the problem the Tx
68 * DMA activity is intentionally slowed down by limiting the SPI Tx
69 * FIFO depth with a value twice bigger than the Tx burst length.
71 dws->txburst = min(max_burst, def_burst);
72 dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
75 static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
77 struct dma_slave_caps tx = {0}, rx = {0};
79 dma_get_slave_caps(dws->txchan, &tx);
80 dma_get_slave_caps(dws->rxchan, &rx);
82 if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
83 dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
84 else if (tx.max_sg_burst > 0)
85 dws->dma_sg_burst = tx.max_sg_burst;
86 else if (rx.max_sg_burst > 0)
87 dws->dma_sg_burst = rx.max_sg_burst;
89 dws->dma_sg_burst = 0;
92 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
94 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
95 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
96 struct pci_dev *dma_dev;
100 * Get pci device for DMA controller, currently it could only
101 * be the DMA controller of Medfield
103 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
108 dma_cap_set(DMA_SLAVE, mask);
110 /* 1. Init rx channel */
111 rx->dma_dev = &dma_dev->dev;
112 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
116 /* 2. Init tx channel */
117 tx->dma_dev = &dma_dev->dev;
118 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
122 dws->master->dma_rx = dws->rxchan;
123 dws->master->dma_tx = dws->txchan;
125 init_completion(&dws->dma_completion);
127 dw_spi_dma_maxburst_init(dws);
129 dw_spi_dma_sg_burst_init(dws);
131 pci_dev_put(dma_dev);
136 dma_release_channel(dws->rxchan);
139 pci_dev_put(dma_dev);
143 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
147 dws->rxchan = dma_request_chan(dev, "rx");
148 if (IS_ERR(dws->rxchan)) {
149 ret = PTR_ERR(dws->rxchan);
154 dws->txchan = dma_request_chan(dev, "tx");
155 if (IS_ERR(dws->txchan)) {
156 ret = PTR_ERR(dws->txchan);
161 dws->master->dma_rx = dws->rxchan;
162 dws->master->dma_tx = dws->txchan;
164 init_completion(&dws->dma_completion);
166 dw_spi_dma_maxburst_init(dws);
168 dw_spi_dma_sg_burst_init(dws);
173 dma_release_channel(dws->rxchan);
179 static void dw_spi_dma_exit(struct dw_spi *dws)
182 dmaengine_terminate_sync(dws->txchan);
183 dma_release_channel(dws->txchan);
187 dmaengine_terminate_sync(dws->rxchan);
188 dma_release_channel(dws->rxchan);
192 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
194 dw_spi_check_status(dws, false);
196 complete(&dws->dma_completion);
201 static bool dw_spi_can_dma(struct spi_controller *master,
202 struct spi_device *spi, struct spi_transfer *xfer)
204 struct dw_spi *dws = spi_controller_get_devdata(master);
206 return xfer->len > dws->fifo_len;
209 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
212 return DMA_SLAVE_BUSWIDTH_1_BYTE;
213 else if (n_bytes == 2)
214 return DMA_SLAVE_BUSWIDTH_2_BYTES;
216 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
219 static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
221 unsigned long long ms;
223 ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
230 ms = wait_for_completion_timeout(&dws->dma_completion,
231 msecs_to_jiffies(ms));
234 dev_err(&dws->master->cur_msg->spi->dev,
235 "DMA transaction timed out\n");
242 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
244 return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT);
247 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
248 struct spi_transfer *xfer)
250 int retry = DW_SPI_WAIT_RETRIES;
251 struct spi_delay delay;
254 nents = dw_readl(dws, DW_SPI_TXFLR);
255 delay.unit = SPI_DELAY_UNIT_SCK;
256 delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
258 while (dw_spi_dma_tx_busy(dws) && retry--)
259 spi_delay_exec(&delay, xfer);
262 dev_err(&dws->master->dev, "Tx hanged up\n");
270 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
271 * channel will clear a corresponding bit.
273 static void dw_spi_dma_tx_done(void *arg)
275 struct dw_spi *dws = arg;
277 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
278 if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy))
281 complete(&dws->dma_completion);
284 static int dw_spi_dma_config_tx(struct dw_spi *dws)
286 struct dma_slave_config txconf;
288 memset(&txconf, 0, sizeof(txconf));
289 txconf.direction = DMA_MEM_TO_DEV;
290 txconf.dst_addr = dws->dma_addr;
291 txconf.dst_maxburst = dws->txburst;
292 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
293 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
294 txconf.device_fc = false;
296 return dmaengine_slave_config(dws->txchan, &txconf);
299 static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
302 struct dma_async_tx_descriptor *txdesc;
306 txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
308 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
312 txdesc->callback = dw_spi_dma_tx_done;
313 txdesc->callback_param = dws;
315 cookie = dmaengine_submit(txdesc);
316 ret = dma_submit_error(cookie);
318 dmaengine_terminate_sync(dws->txchan);
322 set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
327 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
329 return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT);
332 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
334 int retry = DW_SPI_WAIT_RETRIES;
335 struct spi_delay delay;
336 unsigned long ns, us;
340 * It's unlikely that DMA engine is still doing the data fetching, but
341 * if it's let's give it some reasonable time. The timeout calculation
342 * is based on the synchronous APB/SSI reference clock rate, on a
343 * number of data entries left in the Rx FIFO, times a number of clock
344 * periods normally needed for a single APB read/write transaction
345 * without PREADY signal utilized (which is true for the DW APB SSI
348 nents = dw_readl(dws, DW_SPI_RXFLR);
349 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
350 if (ns <= NSEC_PER_USEC) {
351 delay.unit = SPI_DELAY_UNIT_NSECS;
354 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
355 delay.unit = SPI_DELAY_UNIT_USECS;
356 delay.value = clamp_val(us, 0, USHRT_MAX);
359 while (dw_spi_dma_rx_busy(dws) && retry--)
360 spi_delay_exec(&delay, NULL);
363 dev_err(&dws->master->dev, "Rx hanged up\n");
371 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
372 * channel will clear a corresponding bit.
374 static void dw_spi_dma_rx_done(void *arg)
376 struct dw_spi *dws = arg;
378 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
379 if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy))
382 complete(&dws->dma_completion);
385 static int dw_spi_dma_config_rx(struct dw_spi *dws)
387 struct dma_slave_config rxconf;
389 memset(&rxconf, 0, sizeof(rxconf));
390 rxconf.direction = DMA_DEV_TO_MEM;
391 rxconf.src_addr = dws->dma_addr;
392 rxconf.src_maxburst = dws->rxburst;
393 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
394 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
395 rxconf.device_fc = false;
397 return dmaengine_slave_config(dws->rxchan, &rxconf);
400 static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
403 struct dma_async_tx_descriptor *rxdesc;
407 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
409 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
413 rxdesc->callback = dw_spi_dma_rx_done;
414 rxdesc->callback_param = dws;
416 cookie = dmaengine_submit(rxdesc);
417 ret = dma_submit_error(cookie);
419 dmaengine_terminate_sync(dws->rxchan);
423 set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
428 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
436 /* Setup DMA channels */
437 ret = dw_spi_dma_config_tx(dws);
442 ret = dw_spi_dma_config_rx(dws);
447 /* Set the DMA handshaking interface */
448 dma_ctrl = DW_SPI_DMACR_TDMAE;
450 dma_ctrl |= DW_SPI_DMACR_RDMAE;
451 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
453 /* Set the interrupt mask */
454 imr = DW_SPI_INT_TXOI;
456 imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
457 dw_spi_umask_intr(dws, imr);
459 reinit_completion(&dws->dma_completion);
461 dws->transfer_handler = dw_spi_dma_transfer_handler;
466 static int dw_spi_dma_transfer_all(struct dw_spi *dws,
467 struct spi_transfer *xfer)
471 /* Submit the DMA Tx transfer */
472 ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
476 /* Submit the DMA Rx transfer if required */
478 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
483 /* rx must be started before tx due to spi instinct */
484 dma_async_issue_pending(dws->rxchan);
487 dma_async_issue_pending(dws->txchan);
489 ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
492 dw_writel(dws, DW_SPI_DMACR, 0);
498 * In case if at least one of the requested DMA channels doesn't support the
499 * hardware accelerated SG list entries traverse, the DMA driver will most
500 * likely work that around by performing the IRQ-based SG list entries
501 * resubmission. That might and will cause a problem if the DMA Tx channel is
502 * recharged and re-executed before the Rx DMA channel. Due to
503 * non-deterministic IRQ-handler execution latency the DMA Tx channel will
504 * start pushing data to the SPI bus before the Rx DMA channel is even
505 * reinitialized with the next inbound SG list entry. By doing so the DMA Tx
506 * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
507 * the DMA Rx channel being recharged and re-executed will eventually be
510 * In order to solve the problem we have to feed the DMA engine with SG list
511 * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
512 * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
513 * and rx_sg lists may have different number of entries of different lengths
514 * (though total length should match) let's virtually split the SG-lists to the
515 * set of DMA transfers, which length is a minimum of the ordered SG-entries
516 * lengths. An ASCII-sketch of the implemented algo is following:
519 * tx_sg list: |___|____|__|
520 * rx_sg list: |_|____|____|
521 * DMA transfers: |_|_|__|_|__|
523 * Note in order to have this workaround solving the denoted problem the DMA
524 * engine driver should properly initialize the max_sg_burst capability and set
525 * the DMA device max segment size parameter with maximum data block size the
526 * DMA engine supports.
529 static int dw_spi_dma_transfer_one(struct dw_spi *dws,
530 struct spi_transfer *xfer)
532 struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
533 unsigned int tx_len = 0, rx_len = 0;
534 unsigned int base, len;
537 sg_init_table(&tx_tmp, 1);
538 sg_init_table(&rx_tmp, 1);
540 for (base = 0, len = 0; base < xfer->len; base += len) {
541 /* Fetch next Tx DMA data chunk */
543 tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
544 sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
545 tx_len = sg_dma_len(tx_sg);
548 /* Fetch next Rx DMA data chunk */
550 rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
551 sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
552 rx_len = sg_dma_len(rx_sg);
555 len = min(tx_len, rx_len);
557 sg_dma_len(&tx_tmp) = len;
558 sg_dma_len(&rx_tmp) = len;
560 /* Submit DMA Tx transfer */
561 ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
565 /* Submit DMA Rx transfer */
566 ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
570 /* Rx must be started before Tx due to SPI instinct */
571 dma_async_issue_pending(dws->rxchan);
573 dma_async_issue_pending(dws->txchan);
576 * Here we only need to wait for the DMA transfer to be
577 * finished since SPI controller is kept enabled during the
578 * procedure this loop implements and there is no risk to lose
579 * data left in the Tx/Rx FIFOs.
581 ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
585 reinit_completion(&dws->dma_completion);
587 sg_dma_address(&tx_tmp) += len;
588 sg_dma_address(&rx_tmp) += len;
593 dw_writel(dws, DW_SPI_DMACR, 0);
598 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
603 nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
606 * Execute normal DMA-based transfer (which submits the Rx and Tx SG
607 * lists directly to the DMA engine at once) if either full hardware
608 * accelerated SG list traverse is supported by both channels, or the
609 * Tx-only SPI transfer is requested, or the DMA engine is capable to
610 * handle both SG lists on hardware accelerated basis.
612 if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
613 ret = dw_spi_dma_transfer_all(dws, xfer);
615 ret = dw_spi_dma_transfer_one(dws, xfer);
619 if (dws->master->cur_msg->status == -EINPROGRESS) {
620 ret = dw_spi_dma_wait_tx_done(dws, xfer);
625 if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
626 ret = dw_spi_dma_wait_rx_done(dws);
631 static void dw_spi_dma_stop(struct dw_spi *dws)
633 if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) {
634 dmaengine_terminate_sync(dws->txchan);
635 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
637 if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) {
638 dmaengine_terminate_sync(dws->rxchan);
639 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
643 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
644 .dma_init = dw_spi_dma_init_mfld,
645 .dma_exit = dw_spi_dma_exit,
646 .dma_setup = dw_spi_dma_setup,
647 .can_dma = dw_spi_can_dma,
648 .dma_transfer = dw_spi_dma_transfer,
649 .dma_stop = dw_spi_dma_stop,
652 void dw_spi_dma_setup_mfld(struct dw_spi *dws)
654 dws->dma_ops = &dw_spi_dma_mfld_ops;
656 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE);
658 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
659 .dma_init = dw_spi_dma_init_generic,
660 .dma_exit = dw_spi_dma_exit,
661 .dma_setup = dw_spi_dma_setup,
662 .can_dma = dw_spi_can_dma,
663 .dma_transfer = dw_spi_dma_transfer,
664 .dma_stop = dw_spi_dma_stop,
667 void dw_spi_dma_setup_generic(struct dw_spi *dws)
669 dws->dma_ops = &dw_spi_dma_generic_ops;
671 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE);