GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / spi / spi-dw-dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Special handling for DW DMA core
4  *
5  * Copyright (c) 2009, 2014 Intel Corporation.
6  */
7
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/irqreturn.h>
12 #include <linux/jiffies.h>
13 #include <linux/pci.h>
14 #include <linux/platform_data/dma-dw.h>
15 #include <linux/spi/spi.h>
16 #include <linux/types.h>
17
18 #include "spi-dw.h"
19
20 #define RX_BUSY         0
21 #define RX_BURST_LEVEL  16
22 #define TX_BUSY         1
23 #define TX_BURST_LEVEL  16
24
25 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
26 {
27         struct dw_dma_slave *s = param;
28
29         if (s->dma_dev != chan->device->dev)
30                 return false;
31
32         chan->private = s;
33         return true;
34 }
35
36 static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
37 {
38         struct dma_slave_caps caps;
39         u32 max_burst, def_burst;
40         int ret;
41
42         def_burst = dws->fifo_len / 2;
43
44         ret = dma_get_slave_caps(dws->rxchan, &caps);
45         if (!ret && caps.max_burst)
46                 max_burst = caps.max_burst;
47         else
48                 max_burst = RX_BURST_LEVEL;
49
50         dws->rxburst = min(max_burst, def_burst);
51         dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
52
53         ret = dma_get_slave_caps(dws->txchan, &caps);
54         if (!ret && caps.max_burst)
55                 max_burst = caps.max_burst;
56         else
57                 max_burst = TX_BURST_LEVEL;
58
59         /*
60          * Having a Rx DMA channel serviced with higher priority than a Tx DMA
61          * channel might not be enough to provide a well balanced DMA-based
62          * SPI transfer interface. There might still be moments when the Tx DMA
63          * channel is occasionally handled faster than the Rx DMA channel.
64          * That in its turn will eventually cause the SPI Rx FIFO overflow if
65          * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
66          * cleared by the Rx DMA channel. In order to fix the problem the Tx
67          * DMA activity is intentionally slowed down by limiting the SPI Tx
68          * FIFO depth with a value twice bigger than the Tx burst length.
69          */
70         dws->txburst = min(max_burst, def_burst);
71         dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
72 }
73
74 static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
75 {
76         struct dma_slave_caps tx = {0}, rx = {0};
77
78         dma_get_slave_caps(dws->txchan, &tx);
79         dma_get_slave_caps(dws->rxchan, &rx);
80
81         if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
82                 dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
83         else if (tx.max_sg_burst > 0)
84                 dws->dma_sg_burst = tx.max_sg_burst;
85         else if (rx.max_sg_burst > 0)
86                 dws->dma_sg_burst = rx.max_sg_burst;
87         else
88                 dws->dma_sg_burst = 0;
89 }
90
91 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
92 {
93         struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
94         struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
95         struct pci_dev *dma_dev;
96         dma_cap_mask_t mask;
97
98         /*
99          * Get pci device for DMA controller, currently it could only
100          * be the DMA controller of Medfield
101          */
102         dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
103         if (!dma_dev)
104                 return -ENODEV;
105
106         dma_cap_zero(mask);
107         dma_cap_set(DMA_SLAVE, mask);
108
109         /* 1. Init rx channel */
110         rx->dma_dev = &dma_dev->dev;
111         dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
112         if (!dws->rxchan)
113                 goto err_exit;
114
115         /* 2. Init tx channel */
116         tx->dma_dev = &dma_dev->dev;
117         dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
118         if (!dws->txchan)
119                 goto free_rxchan;
120
121         dws->master->dma_rx = dws->rxchan;
122         dws->master->dma_tx = dws->txchan;
123
124         init_completion(&dws->dma_completion);
125
126         dw_spi_dma_maxburst_init(dws);
127
128         dw_spi_dma_sg_burst_init(dws);
129
130         pci_dev_put(dma_dev);
131
132         return 0;
133
134 free_rxchan:
135         dma_release_channel(dws->rxchan);
136         dws->rxchan = NULL;
137 err_exit:
138         pci_dev_put(dma_dev);
139         return -EBUSY;
140 }
141
142 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
143 {
144         dws->rxchan = dma_request_slave_channel(dev, "rx");
145         if (!dws->rxchan)
146                 return -ENODEV;
147
148         dws->txchan = dma_request_slave_channel(dev, "tx");
149         if (!dws->txchan) {
150                 dma_release_channel(dws->rxchan);
151                 dws->rxchan = NULL;
152                 return -ENODEV;
153         }
154
155         dws->master->dma_rx = dws->rxchan;
156         dws->master->dma_tx = dws->txchan;
157
158         init_completion(&dws->dma_completion);
159
160         dw_spi_dma_maxburst_init(dws);
161
162         dw_spi_dma_sg_burst_init(dws);
163
164         return 0;
165 }
166
167 static void dw_spi_dma_exit(struct dw_spi *dws)
168 {
169         if (dws->txchan) {
170                 dmaengine_terminate_sync(dws->txchan);
171                 dma_release_channel(dws->txchan);
172         }
173
174         if (dws->rxchan) {
175                 dmaengine_terminate_sync(dws->rxchan);
176                 dma_release_channel(dws->rxchan);
177         }
178 }
179
180 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
181 {
182         dw_spi_check_status(dws, false);
183
184         complete(&dws->dma_completion);
185
186         return IRQ_HANDLED;
187 }
188
189 static bool dw_spi_can_dma(struct spi_controller *master,
190                            struct spi_device *spi, struct spi_transfer *xfer)
191 {
192         struct dw_spi *dws = spi_controller_get_devdata(master);
193
194         return xfer->len > dws->fifo_len;
195 }
196
197 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
198 {
199         if (n_bytes == 1)
200                 return DMA_SLAVE_BUSWIDTH_1_BYTE;
201         else if (n_bytes == 2)
202                 return DMA_SLAVE_BUSWIDTH_2_BYTES;
203
204         return DMA_SLAVE_BUSWIDTH_UNDEFINED;
205 }
206
207 static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
208 {
209         unsigned long long ms;
210
211         ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
212         do_div(ms, speed);
213         ms += ms + 200;
214
215         if (ms > UINT_MAX)
216                 ms = UINT_MAX;
217
218         ms = wait_for_completion_timeout(&dws->dma_completion,
219                                          msecs_to_jiffies(ms));
220
221         if (ms == 0) {
222                 dev_err(&dws->master->cur_msg->spi->dev,
223                         "DMA transaction timed out\n");
224                 return -ETIMEDOUT;
225         }
226
227         return 0;
228 }
229
230 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
231 {
232         return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
233 }
234
235 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
236                                    struct spi_transfer *xfer)
237 {
238         int retry = SPI_WAIT_RETRIES;
239         struct spi_delay delay;
240         u32 nents;
241
242         nents = dw_readl(dws, DW_SPI_TXFLR);
243         delay.unit = SPI_DELAY_UNIT_SCK;
244         delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
245
246         while (dw_spi_dma_tx_busy(dws) && retry--)
247                 spi_delay_exec(&delay, xfer);
248
249         if (retry < 0) {
250                 dev_err(&dws->master->dev, "Tx hanged up\n");
251                 return -EIO;
252         }
253
254         return 0;
255 }
256
257 /*
258  * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
259  * channel will clear a corresponding bit.
260  */
261 static void dw_spi_dma_tx_done(void *arg)
262 {
263         struct dw_spi *dws = arg;
264
265         clear_bit(TX_BUSY, &dws->dma_chan_busy);
266         if (test_bit(RX_BUSY, &dws->dma_chan_busy))
267                 return;
268
269         complete(&dws->dma_completion);
270 }
271
272 static int dw_spi_dma_config_tx(struct dw_spi *dws)
273 {
274         struct dma_slave_config txconf;
275
276         memset(&txconf, 0, sizeof(txconf));
277         txconf.direction = DMA_MEM_TO_DEV;
278         txconf.dst_addr = dws->dma_addr;
279         txconf.dst_maxburst = dws->txburst;
280         txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
281         txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
282         txconf.device_fc = false;
283
284         return dmaengine_slave_config(dws->txchan, &txconf);
285 }
286
287 static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
288                                 unsigned int nents)
289 {
290         struct dma_async_tx_descriptor *txdesc;
291         dma_cookie_t cookie;
292         int ret;
293
294         txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
295                                          DMA_MEM_TO_DEV,
296                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
297         if (!txdesc)
298                 return -ENOMEM;
299
300         txdesc->callback = dw_spi_dma_tx_done;
301         txdesc->callback_param = dws;
302
303         cookie = dmaengine_submit(txdesc);
304         ret = dma_submit_error(cookie);
305         if (ret) {
306                 dmaengine_terminate_sync(dws->txchan);
307                 return ret;
308         }
309
310         set_bit(TX_BUSY, &dws->dma_chan_busy);
311
312         return 0;
313 }
314
315 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
316 {
317         return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
318 }
319
320 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
321 {
322         int retry = SPI_WAIT_RETRIES;
323         struct spi_delay delay;
324         unsigned long ns, us;
325         u32 nents;
326
327         /*
328          * It's unlikely that DMA engine is still doing the data fetching, but
329          * if it's let's give it some reasonable time. The timeout calculation
330          * is based on the synchronous APB/SSI reference clock rate, on a
331          * number of data entries left in the Rx FIFO, times a number of clock
332          * periods normally needed for a single APB read/write transaction
333          * without PREADY signal utilized (which is true for the DW APB SSI
334          * controller).
335          */
336         nents = dw_readl(dws, DW_SPI_RXFLR);
337         ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
338         if (ns <= NSEC_PER_USEC) {
339                 delay.unit = SPI_DELAY_UNIT_NSECS;
340                 delay.value = ns;
341         } else {
342                 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
343                 delay.unit = SPI_DELAY_UNIT_USECS;
344                 delay.value = clamp_val(us, 0, USHRT_MAX);
345         }
346
347         while (dw_spi_dma_rx_busy(dws) && retry--)
348                 spi_delay_exec(&delay, NULL);
349
350         if (retry < 0) {
351                 dev_err(&dws->master->dev, "Rx hanged up\n");
352                 return -EIO;
353         }
354
355         return 0;
356 }
357
358 /*
359  * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
360  * channel will clear a corresponding bit.
361  */
362 static void dw_spi_dma_rx_done(void *arg)
363 {
364         struct dw_spi *dws = arg;
365
366         clear_bit(RX_BUSY, &dws->dma_chan_busy);
367         if (test_bit(TX_BUSY, &dws->dma_chan_busy))
368                 return;
369
370         complete(&dws->dma_completion);
371 }
372
373 static int dw_spi_dma_config_rx(struct dw_spi *dws)
374 {
375         struct dma_slave_config rxconf;
376
377         memset(&rxconf, 0, sizeof(rxconf));
378         rxconf.direction = DMA_DEV_TO_MEM;
379         rxconf.src_addr = dws->dma_addr;
380         rxconf.src_maxburst = dws->rxburst;
381         rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
382         rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
383         rxconf.device_fc = false;
384
385         return dmaengine_slave_config(dws->rxchan, &rxconf);
386 }
387
388 static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
389                                 unsigned int nents)
390 {
391         struct dma_async_tx_descriptor *rxdesc;
392         dma_cookie_t cookie;
393         int ret;
394
395         rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
396                                          DMA_DEV_TO_MEM,
397                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
398         if (!rxdesc)
399                 return -ENOMEM;
400
401         rxdesc->callback = dw_spi_dma_rx_done;
402         rxdesc->callback_param = dws;
403
404         cookie = dmaengine_submit(rxdesc);
405         ret = dma_submit_error(cookie);
406         if (ret) {
407                 dmaengine_terminate_sync(dws->rxchan);
408                 return ret;
409         }
410
411         set_bit(RX_BUSY, &dws->dma_chan_busy);
412
413         return 0;
414 }
415
416 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
417 {
418         u16 imr, dma_ctrl;
419         int ret;
420
421         if (!xfer->tx_buf)
422                 return -EINVAL;
423
424         /* Setup DMA channels */
425         ret = dw_spi_dma_config_tx(dws);
426         if (ret)
427                 return ret;
428
429         if (xfer->rx_buf) {
430                 ret = dw_spi_dma_config_rx(dws);
431                 if (ret)
432                         return ret;
433         }
434
435         /* Set the DMA handshaking interface */
436         dma_ctrl = SPI_DMA_TDMAE;
437         if (xfer->rx_buf)
438                 dma_ctrl |= SPI_DMA_RDMAE;
439         dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
440
441         /* Set the interrupt mask */
442         imr = SPI_INT_TXOI;
443         if (xfer->rx_buf)
444                 imr |= SPI_INT_RXUI | SPI_INT_RXOI;
445         spi_umask_intr(dws, imr);
446
447         reinit_completion(&dws->dma_completion);
448
449         dws->transfer_handler = dw_spi_dma_transfer_handler;
450
451         return 0;
452 }
453
454 static int dw_spi_dma_transfer_all(struct dw_spi *dws,
455                                    struct spi_transfer *xfer)
456 {
457         int ret;
458
459         /* Submit the DMA Tx transfer */
460         ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
461         if (ret)
462                 goto err_clear_dmac;
463
464         /* Submit the DMA Rx transfer if required */
465         if (xfer->rx_buf) {
466                 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
467                                            xfer->rx_sg.nents);
468                 if (ret)
469                         goto err_clear_dmac;
470
471                 /* rx must be started before tx due to spi instinct */
472                 dma_async_issue_pending(dws->rxchan);
473         }
474
475         dma_async_issue_pending(dws->txchan);
476
477         ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
478
479 err_clear_dmac:
480         dw_writel(dws, DW_SPI_DMACR, 0);
481
482         return ret;
483 }
484
485 /*
486  * In case if at least one of the requested DMA channels doesn't support the
487  * hardware accelerated SG list entries traverse, the DMA driver will most
488  * likely work that around by performing the IRQ-based SG list entries
489  * resubmission. That might and will cause a problem if the DMA Tx channel is
490  * recharged and re-executed before the Rx DMA channel. Due to
491  * non-deterministic IRQ-handler execution latency the DMA Tx channel will
492  * start pushing data to the SPI bus before the Rx DMA channel is even
493  * reinitialized with the next inbound SG list entry. By doing so the DMA Tx
494  * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
495  * the DMA Rx channel being recharged and re-executed will eventually be
496  * overflown.
497  *
498  * In order to solve the problem we have to feed the DMA engine with SG list
499  * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
500  * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
501  * and rx_sg lists may have different number of entries of different lengths
502  * (though total length should match) let's virtually split the SG-lists to the
503  * set of DMA transfers, which length is a minimum of the ordered SG-entries
504  * lengths. An ASCII-sketch of the implemented algo is following:
505  *                  xfer->len
506  *                |___________|
507  * tx_sg list:    |___|____|__|
508  * rx_sg list:    |_|____|____|
509  * DMA transfers: |_|_|__|_|__|
510  *
511  * Note in order to have this workaround solving the denoted problem the DMA
512  * engine driver should properly initialize the max_sg_burst capability and set
513  * the DMA device max segment size parameter with maximum data block size the
514  * DMA engine supports.
515  */
516
517 static int dw_spi_dma_transfer_one(struct dw_spi *dws,
518                                    struct spi_transfer *xfer)
519 {
520         struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
521         unsigned int tx_len = 0, rx_len = 0;
522         unsigned int base, len;
523         int ret;
524
525         sg_init_table(&tx_tmp, 1);
526         sg_init_table(&rx_tmp, 1);
527
528         for (base = 0, len = 0; base < xfer->len; base += len) {
529                 /* Fetch next Tx DMA data chunk */
530                 if (!tx_len) {
531                         tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
532                         sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
533                         tx_len = sg_dma_len(tx_sg);
534                 }
535
536                 /* Fetch next Rx DMA data chunk */
537                 if (!rx_len) {
538                         rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
539                         sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
540                         rx_len = sg_dma_len(rx_sg);
541                 }
542
543                 len = min(tx_len, rx_len);
544
545                 sg_dma_len(&tx_tmp) = len;
546                 sg_dma_len(&rx_tmp) = len;
547
548                 /* Submit DMA Tx transfer */
549                 ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
550                 if (ret)
551                         break;
552
553                 /* Submit DMA Rx transfer */
554                 ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
555                 if (ret)
556                         break;
557
558                 /* Rx must be started before Tx due to SPI instinct */
559                 dma_async_issue_pending(dws->rxchan);
560
561                 dma_async_issue_pending(dws->txchan);
562
563                 /*
564                  * Here we only need to wait for the DMA transfer to be
565                  * finished since SPI controller is kept enabled during the
566                  * procedure this loop implements and there is no risk to lose
567                  * data left in the Tx/Rx FIFOs.
568                  */
569                 ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
570                 if (ret)
571                         break;
572
573                 reinit_completion(&dws->dma_completion);
574
575                 sg_dma_address(&tx_tmp) += len;
576                 sg_dma_address(&rx_tmp) += len;
577                 tx_len -= len;
578                 rx_len -= len;
579         }
580
581         dw_writel(dws, DW_SPI_DMACR, 0);
582
583         return ret;
584 }
585
586 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
587 {
588         unsigned int nents;
589         int ret;
590
591         nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
592
593         /*
594          * Execute normal DMA-based transfer (which submits the Rx and Tx SG
595          * lists directly to the DMA engine at once) if either full hardware
596          * accelerated SG list traverse is supported by both channels, or the
597          * Tx-only SPI transfer is requested, or the DMA engine is capable to
598          * handle both SG lists on hardware accelerated basis.
599          */
600         if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
601                 ret = dw_spi_dma_transfer_all(dws, xfer);
602         else
603                 ret = dw_spi_dma_transfer_one(dws, xfer);
604         if (ret)
605                 return ret;
606
607         if (dws->master->cur_msg->status == -EINPROGRESS) {
608                 ret = dw_spi_dma_wait_tx_done(dws, xfer);
609                 if (ret)
610                         return ret;
611         }
612
613         if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
614                 ret = dw_spi_dma_wait_rx_done(dws);
615
616         return ret;
617 }
618
619 static void dw_spi_dma_stop(struct dw_spi *dws)
620 {
621         if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
622                 dmaengine_terminate_sync(dws->txchan);
623                 clear_bit(TX_BUSY, &dws->dma_chan_busy);
624         }
625         if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
626                 dmaengine_terminate_sync(dws->rxchan);
627                 clear_bit(RX_BUSY, &dws->dma_chan_busy);
628         }
629 }
630
631 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
632         .dma_init       = dw_spi_dma_init_mfld,
633         .dma_exit       = dw_spi_dma_exit,
634         .dma_setup      = dw_spi_dma_setup,
635         .can_dma        = dw_spi_can_dma,
636         .dma_transfer   = dw_spi_dma_transfer,
637         .dma_stop       = dw_spi_dma_stop,
638 };
639
640 void dw_spi_dma_setup_mfld(struct dw_spi *dws)
641 {
642         dws->dma_ops = &dw_spi_dma_mfld_ops;
643 }
644 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
645
646 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
647         .dma_init       = dw_spi_dma_init_generic,
648         .dma_exit       = dw_spi_dma_exit,
649         .dma_setup      = dw_spi_dma_setup,
650         .can_dma        = dw_spi_can_dma,
651         .dma_transfer   = dw_spi_dma_transfer,
652         .dma_stop       = dw_spi_dma_stop,
653 };
654
655 void dw_spi_dma_setup_generic(struct dw_spi *dws)
656 {
657         dws->dma_ops = &dw_spi_dma_generic_ops;
658 }
659 EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);