2 * Special handling for DW core on Intel MID platform
4 * Copyright (c) 2009, 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/dma-mapping.h>
17 #include <linux/dmaengine.h>
18 #include <linux/interrupt.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/types.h>
25 #ifdef CONFIG_SPI_DW_MID_DMA
26 #include <linux/pci.h>
27 #include <linux/platform_data/dma-dw.h>
32 static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
33 static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
35 static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
37 struct dw_dma_slave *s = param;
39 if (s->dma_dev != chan->device->dev)
46 static int mid_spi_dma_init(struct dw_spi *dws)
48 struct pci_dev *dma_dev;
49 struct dw_dma_slave *tx = dws->dma_tx;
50 struct dw_dma_slave *rx = dws->dma_rx;
54 * Get pci device for DMA controller, currently it could only
55 * be the DMA controller of Medfield
57 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
62 dma_cap_set(DMA_SLAVE, mask);
64 /* 1. Init rx channel */
65 rx->dma_dev = &dma_dev->dev;
66 dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
69 dws->master->dma_rx = dws->rxchan;
71 /* 2. Init tx channel */
72 tx->dma_dev = &dma_dev->dev;
73 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
76 dws->master->dma_tx = dws->txchan;
82 dma_release_channel(dws->rxchan);
87 static void mid_spi_dma_exit(struct dw_spi *dws)
92 dmaengine_terminate_sync(dws->txchan);
93 dma_release_channel(dws->txchan);
95 dmaengine_terminate_sync(dws->rxchan);
96 dma_release_channel(dws->rxchan);
99 static irqreturn_t dma_transfer(struct dw_spi *dws)
101 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
106 dw_readl(dws, DW_SPI_ICR);
109 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
110 dws->master->cur_msg->status = -EIO;
111 spi_finalize_current_transfer(dws->master);
115 static bool mid_spi_can_dma(struct spi_master *master, struct spi_device *spi,
116 struct spi_transfer *xfer)
118 struct dw_spi *dws = spi_master_get_devdata(master);
120 if (!dws->dma_inited)
123 return xfer->len > dws->fifo_len;
126 static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
128 return DMA_SLAVE_BUSWIDTH_1_BYTE;
129 else if (dma_width == 2)
130 return DMA_SLAVE_BUSWIDTH_2_BYTES;
132 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
136 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
137 * channel will clear a corresponding bit.
139 static void dw_spi_dma_tx_done(void *arg)
141 struct dw_spi *dws = arg;
143 clear_bit(TX_BUSY, &dws->dma_chan_busy);
144 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
146 spi_finalize_current_transfer(dws->master);
149 static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
150 struct spi_transfer *xfer)
152 struct dma_slave_config txconf;
153 struct dma_async_tx_descriptor *txdesc;
158 memset(&txconf, 0, sizeof(txconf));
159 txconf.direction = DMA_MEM_TO_DEV;
160 txconf.dst_addr = dws->dma_addr;
161 txconf.dst_maxburst = 16;
162 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
163 txconf.dst_addr_width = convert_dma_width(dws->dma_width);
164 txconf.device_fc = false;
166 dmaengine_slave_config(dws->txchan, &txconf);
168 txdesc = dmaengine_prep_slave_sg(dws->txchan,
172 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
176 txdesc->callback = dw_spi_dma_tx_done;
177 txdesc->callback_param = dws;
183 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
184 * channel will clear a corresponding bit.
186 static void dw_spi_dma_rx_done(void *arg)
188 struct dw_spi *dws = arg;
190 clear_bit(RX_BUSY, &dws->dma_chan_busy);
191 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
193 spi_finalize_current_transfer(dws->master);
196 static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
197 struct spi_transfer *xfer)
199 struct dma_slave_config rxconf;
200 struct dma_async_tx_descriptor *rxdesc;
205 memset(&rxconf, 0, sizeof(rxconf));
206 rxconf.direction = DMA_DEV_TO_MEM;
207 rxconf.src_addr = dws->dma_addr;
208 rxconf.src_maxburst = 16;
209 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
210 rxconf.src_addr_width = convert_dma_width(dws->dma_width);
211 rxconf.device_fc = false;
213 dmaengine_slave_config(dws->rxchan, &rxconf);
215 rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
219 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
223 rxdesc->callback = dw_spi_dma_rx_done;
224 rxdesc->callback_param = dws;
229 static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
231 u16 imr = 0, dma_ctrl = 0;
233 dw_writel(dws, DW_SPI_DMARDLR, 0xf);
234 dw_writel(dws, DW_SPI_DMATDLR, 0x10);
237 dma_ctrl |= SPI_DMA_TDMAE;
241 dma_ctrl |= SPI_DMA_RDMAE;
242 imr |= SPI_INT_RXUI | SPI_INT_RXOI;
244 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
246 /* Set the interrupt mask */
247 spi_umask_intr(dws, imr);
249 dws->transfer_handler = dma_transfer;
254 static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
256 struct dma_async_tx_descriptor *txdesc, *rxdesc;
258 /* Prepare the TX dma transfer */
259 txdesc = dw_spi_dma_prepare_tx(dws, xfer);
261 /* Prepare the RX dma transfer */
262 rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
264 /* rx must be started before tx due to spi instinct */
266 set_bit(RX_BUSY, &dws->dma_chan_busy);
267 dmaengine_submit(rxdesc);
268 dma_async_issue_pending(dws->rxchan);
272 set_bit(TX_BUSY, &dws->dma_chan_busy);
273 dmaengine_submit(txdesc);
274 dma_async_issue_pending(dws->txchan);
280 static void mid_spi_dma_stop(struct dw_spi *dws)
282 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
283 dmaengine_terminate_sync(dws->txchan);
284 clear_bit(TX_BUSY, &dws->dma_chan_busy);
286 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
287 dmaengine_terminate_sync(dws->rxchan);
288 clear_bit(RX_BUSY, &dws->dma_chan_busy);
292 static const struct dw_spi_dma_ops mid_dma_ops = {
293 .dma_init = mid_spi_dma_init,
294 .dma_exit = mid_spi_dma_exit,
295 .dma_setup = mid_spi_dma_setup,
296 .can_dma = mid_spi_can_dma,
297 .dma_transfer = mid_spi_dma_transfer,
298 .dma_stop = mid_spi_dma_stop,
302 /* Some specific info for SPI0 controller on Intel MID */
304 /* HW info for MRST Clk Control Unit, 32b reg per controller */
305 #define MRST_SPI_CLK_BASE 100000000 /* 100m */
306 #define MRST_CLK_SPI_REG 0xff11d86c
307 #define CLK_SPI_BDIV_OFFSET 0
308 #define CLK_SPI_BDIV_MASK 0x00000007
309 #define CLK_SPI_CDIV_OFFSET 9
310 #define CLK_SPI_CDIV_MASK 0x00000e00
311 #define CLK_SPI_DISABLE_OFFSET 8
313 int dw_spi_mid_init(struct dw_spi *dws)
315 void __iomem *clk_reg;
318 clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
322 /* Get SPI controller operating freq info */
323 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
324 clk_cdiv &= CLK_SPI_CDIV_MASK;
325 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
326 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
330 #ifdef CONFIG_SPI_DW_MID_DMA
331 dws->dma_tx = &mid_dma_tx;
332 dws->dma_rx = &mid_dma_rx;
333 dws->dma_ops = &mid_dma_ops;