GNU Linux-libre 4.9.318-gnu1
[releases.git] / drivers / dma / dw / core.c
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  * Copyright (C) 2013 Intel Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26
27 #include "../dmaengine.h"
28 #include "internal.h"
29
30 /*
31  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33  * of which use ARM any more).  See the "Databook" from Synopsys for
34  * information beyond what licensees probably provide.
35  *
36  * The driver has been tested with the Atmel AT32AP7000, which does not
37  * support descriptor writeback.
38  */
39
40 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
41                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
42                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43                 bool _is_slave = is_slave_direction(_dwc->direction);   \
44                 u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
45                         DW_DMA_MSIZE_16;                        \
46                 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
47                         DW_DMA_MSIZE_16;                        \
48                 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ?         \
49                         _dwc->dws.p_master : _dwc->dws.m_master;        \
50                 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ?         \
51                         _dwc->dws.p_master : _dwc->dws.m_master;        \
52                                                                 \
53                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
54                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
55                  | DWC_CTLL_LLP_D_EN                            \
56                  | DWC_CTLL_LLP_S_EN                            \
57                  | DWC_CTLL_DMS(_dms)                           \
58                  | DWC_CTLL_SMS(_sms));                         \
59         })
60
61 /* The set of bus widths supported by the DMA controller */
62 #define DW_DMA_BUSWIDTHS                          \
63         BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       | \
64         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          | \
65         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         | \
66         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67
68 /*----------------------------------------------------------------------*/
69
70 static struct device *chan2dev(struct dma_chan *chan)
71 {
72         return &chan->dev->device;
73 }
74
75 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76 {
77         return to_dw_desc(dwc->active_list.next);
78 }
79
80 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
81 {
82         struct dw_desc          *desc = txd_to_dw_desc(tx);
83         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
84         dma_cookie_t            cookie;
85         unsigned long           flags;
86
87         spin_lock_irqsave(&dwc->lock, flags);
88         cookie = dma_cookie_assign(tx);
89
90         /*
91          * REVISIT: We should attempt to chain as many descriptors as
92          * possible, perhaps even appending to those already submitted
93          * for DMA. But this is hard to do in a race-free manner.
94          */
95
96         list_add_tail(&desc->desc_node, &dwc->queue);
97         spin_unlock_irqrestore(&dwc->lock, flags);
98         dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99                  __func__, desc->txd.cookie);
100
101         return cookie;
102 }
103
104 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105 {
106         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107         struct dw_desc *desc;
108         dma_addr_t phys;
109
110         desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111         if (!desc)
112                 return NULL;
113
114         dwc->descs_allocated++;
115         INIT_LIST_HEAD(&desc->tx_list);
116         dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117         desc->txd.tx_submit = dwc_tx_submit;
118         desc->txd.flags = DMA_CTRL_ACK;
119         desc->txd.phys = phys;
120         return desc;
121 }
122
123 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124 {
125         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126         struct dw_desc *child, *_next;
127
128         if (unlikely(!desc))
129                 return;
130
131         list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132                 list_del(&child->desc_node);
133                 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134                 dwc->descs_allocated--;
135         }
136
137         dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138         dwc->descs_allocated--;
139 }
140
141 static void dwc_initialize(struct dw_dma_chan *dwc)
142 {
143         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
144         u32 cfghi = DWC_CFGH_FIFO_MODE;
145         u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
146         bool hs_polarity = dwc->dws.hs_polarity;
147
148         if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
149                 return;
150
151         cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
152         cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
153
154         /* Set polarity of handshake interface */
155         cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
156
157         channel_writel(dwc, CFG_LO, cfglo);
158         channel_writel(dwc, CFG_HI, cfghi);
159
160         /* Enable interrupts */
161         channel_set_bit(dw, MASK.XFER, dwc->mask);
162         channel_set_bit(dw, MASK.ERROR, dwc->mask);
163
164         set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
165 }
166
167 /*----------------------------------------------------------------------*/
168
169 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
170 {
171         dev_err(chan2dev(&dwc->chan),
172                 "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
173                 channel_readl(dwc, SAR),
174                 channel_readl(dwc, DAR),
175                 channel_readl(dwc, LLP),
176                 channel_readl(dwc, CTL_HI),
177                 channel_readl(dwc, CTL_LO));
178 }
179
180 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
181 {
182         channel_clear_bit(dw, CH_EN, dwc->mask);
183         while (dma_readl(dw, CH_EN) & dwc->mask)
184                 cpu_relax();
185 }
186
187 /*----------------------------------------------------------------------*/
188
189 /* Perform single block transfer */
190 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
191                                        struct dw_desc *desc)
192 {
193         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
194         u32             ctllo;
195
196         /*
197          * Software emulation of LLP mode relies on interrupts to continue
198          * multi block transfer.
199          */
200         ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
201
202         channel_writel(dwc, SAR, lli_read(desc, sar));
203         channel_writel(dwc, DAR, lli_read(desc, dar));
204         channel_writel(dwc, CTL_LO, ctllo);
205         channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
206         channel_set_bit(dw, CH_EN, dwc->mask);
207
208         /* Move pointer to next descriptor */
209         dwc->tx_node_active = dwc->tx_node_active->next;
210 }
211
212 /* Called with dwc->lock held and bh disabled */
213 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
214 {
215         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
216         u8              lms = DWC_LLP_LMS(dwc->dws.m_master);
217         unsigned long   was_soft_llp;
218
219         /* ASSERT:  channel is idle */
220         if (dma_readl(dw, CH_EN) & dwc->mask) {
221                 dev_err(chan2dev(&dwc->chan),
222                         "%s: BUG: Attempted to start non-idle channel\n",
223                         __func__);
224                 dwc_dump_chan_regs(dwc);
225
226                 /* The tasklet will hopefully advance the queue... */
227                 return;
228         }
229
230         if (dwc->nollp) {
231                 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
232                                                 &dwc->flags);
233                 if (was_soft_llp) {
234                         dev_err(chan2dev(&dwc->chan),
235                                 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
236                         return;
237                 }
238
239                 dwc_initialize(dwc);
240
241                 first->residue = first->total_len;
242                 dwc->tx_node_active = &first->tx_list;
243
244                 /* Submit first block */
245                 dwc_do_single_block(dwc, first);
246
247                 return;
248         }
249
250         dwc_initialize(dwc);
251
252         channel_writel(dwc, LLP, first->txd.phys | lms);
253         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
254         channel_writel(dwc, CTL_HI, 0);
255         channel_set_bit(dw, CH_EN, dwc->mask);
256 }
257
258 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
259 {
260         struct dw_desc *desc;
261
262         if (list_empty(&dwc->queue))
263                 return;
264
265         list_move(dwc->queue.next, &dwc->active_list);
266         desc = dwc_first_active(dwc);
267         dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
268         dwc_dostart(dwc, desc);
269 }
270
271 /*----------------------------------------------------------------------*/
272
273 static void
274 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
275                 bool callback_required)
276 {
277         struct dma_async_tx_descriptor  *txd = &desc->txd;
278         struct dw_desc                  *child;
279         unsigned long                   flags;
280         struct dmaengine_desc_callback  cb;
281
282         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
283
284         spin_lock_irqsave(&dwc->lock, flags);
285         dma_cookie_complete(txd);
286         if (callback_required)
287                 dmaengine_desc_get_callback(txd, &cb);
288         else
289                 memset(&cb, 0, sizeof(cb));
290
291         /* async_tx_ack */
292         list_for_each_entry(child, &desc->tx_list, desc_node)
293                 async_tx_ack(&child->txd);
294         async_tx_ack(&desc->txd);
295         dwc_desc_put(dwc, desc);
296         spin_unlock_irqrestore(&dwc->lock, flags);
297
298         dmaengine_desc_callback_invoke(&cb, NULL);
299 }
300
301 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
302 {
303         struct dw_desc *desc, *_desc;
304         LIST_HEAD(list);
305         unsigned long flags;
306
307         spin_lock_irqsave(&dwc->lock, flags);
308         if (dma_readl(dw, CH_EN) & dwc->mask) {
309                 dev_err(chan2dev(&dwc->chan),
310                         "BUG: XFER bit set, but channel not idle!\n");
311
312                 /* Try to continue after resetting the channel... */
313                 dwc_chan_disable(dw, dwc);
314         }
315
316         /*
317          * Submit queued descriptors ASAP, i.e. before we go through
318          * the completed ones.
319          */
320         list_splice_init(&dwc->active_list, &list);
321         dwc_dostart_first_queued(dwc);
322
323         spin_unlock_irqrestore(&dwc->lock, flags);
324
325         list_for_each_entry_safe(desc, _desc, &list, desc_node)
326                 dwc_descriptor_complete(dwc, desc, true);
327 }
328
329 /* Returns how many bytes were already received from source */
330 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
331 {
332         u32 ctlhi = channel_readl(dwc, CTL_HI);
333         u32 ctllo = channel_readl(dwc, CTL_LO);
334
335         return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
336 }
337
338 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
339 {
340         dma_addr_t llp;
341         struct dw_desc *desc, *_desc;
342         struct dw_desc *child;
343         u32 status_xfer;
344         unsigned long flags;
345
346         spin_lock_irqsave(&dwc->lock, flags);
347         llp = channel_readl(dwc, LLP);
348         status_xfer = dma_readl(dw, RAW.XFER);
349
350         if (status_xfer & dwc->mask) {
351                 /* Everything we've submitted is done */
352                 dma_writel(dw, CLEAR.XFER, dwc->mask);
353
354                 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
355                         struct list_head *head, *active = dwc->tx_node_active;
356
357                         /*
358                          * We are inside first active descriptor.
359                          * Otherwise something is really wrong.
360                          */
361                         desc = dwc_first_active(dwc);
362
363                         head = &desc->tx_list;
364                         if (active != head) {
365                                 /* Update residue to reflect last sent descriptor */
366                                 if (active == head->next)
367                                         desc->residue -= desc->len;
368                                 else
369                                         desc->residue -= to_dw_desc(active->prev)->len;
370
371                                 child = to_dw_desc(active);
372
373                                 /* Submit next block */
374                                 dwc_do_single_block(dwc, child);
375
376                                 spin_unlock_irqrestore(&dwc->lock, flags);
377                                 return;
378                         }
379
380                         /* We are done here */
381                         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
382                 }
383
384                 spin_unlock_irqrestore(&dwc->lock, flags);
385
386                 dwc_complete_all(dw, dwc);
387                 return;
388         }
389
390         if (list_empty(&dwc->active_list)) {
391                 spin_unlock_irqrestore(&dwc->lock, flags);
392                 return;
393         }
394
395         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
396                 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
397                 spin_unlock_irqrestore(&dwc->lock, flags);
398                 return;
399         }
400
401         dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
402
403         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
404                 /* Initial residue value */
405                 desc->residue = desc->total_len;
406
407                 /* Check first descriptors addr */
408                 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
409                         spin_unlock_irqrestore(&dwc->lock, flags);
410                         return;
411                 }
412
413                 /* Check first descriptors llp */
414                 if (lli_read(desc, llp) == llp) {
415                         /* This one is currently in progress */
416                         desc->residue -= dwc_get_sent(dwc);
417                         spin_unlock_irqrestore(&dwc->lock, flags);
418                         return;
419                 }
420
421                 desc->residue -= desc->len;
422                 list_for_each_entry(child, &desc->tx_list, desc_node) {
423                         if (lli_read(child, llp) == llp) {
424                                 /* Currently in progress */
425                                 desc->residue -= dwc_get_sent(dwc);
426                                 spin_unlock_irqrestore(&dwc->lock, flags);
427                                 return;
428                         }
429                         desc->residue -= child->len;
430                 }
431
432                 /*
433                  * No descriptors so far seem to be in progress, i.e.
434                  * this one must be done.
435                  */
436                 spin_unlock_irqrestore(&dwc->lock, flags);
437                 dwc_descriptor_complete(dwc, desc, true);
438                 spin_lock_irqsave(&dwc->lock, flags);
439         }
440
441         dev_err(chan2dev(&dwc->chan),
442                 "BUG: All descriptors done, but channel not idle!\n");
443
444         /* Try to continue after resetting the channel... */
445         dwc_chan_disable(dw, dwc);
446
447         dwc_dostart_first_queued(dwc);
448         spin_unlock_irqrestore(&dwc->lock, flags);
449 }
450
451 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
452 {
453         dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
454                  lli_read(desc, sar),
455                  lli_read(desc, dar),
456                  lli_read(desc, llp),
457                  lli_read(desc, ctlhi),
458                  lli_read(desc, ctllo));
459 }
460
461 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
462 {
463         struct dw_desc *bad_desc;
464         struct dw_desc *child;
465         unsigned long flags;
466
467         dwc_scan_descriptors(dw, dwc);
468
469         spin_lock_irqsave(&dwc->lock, flags);
470
471         /*
472          * The descriptor currently at the head of the active list is
473          * borked. Since we don't have any way to report errors, we'll
474          * just have to scream loudly and try to carry on.
475          */
476         bad_desc = dwc_first_active(dwc);
477         list_del_init(&bad_desc->desc_node);
478         list_move(dwc->queue.next, dwc->active_list.prev);
479
480         /* Clear the error flag and try to restart the controller */
481         dma_writel(dw, CLEAR.ERROR, dwc->mask);
482         if (!list_empty(&dwc->active_list))
483                 dwc_dostart(dwc, dwc_first_active(dwc));
484
485         /*
486          * WARN may seem harsh, but since this only happens
487          * when someone submits a bad physical address in a
488          * descriptor, we should consider ourselves lucky that the
489          * controller flagged an error instead of scribbling over
490          * random memory locations.
491          */
492         dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
493                                        "  cookie: %d\n", bad_desc->txd.cookie);
494         dwc_dump_lli(dwc, bad_desc);
495         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
496                 dwc_dump_lli(dwc, child);
497
498         spin_unlock_irqrestore(&dwc->lock, flags);
499
500         /* Pretend the descriptor completed successfully */
501         dwc_descriptor_complete(dwc, bad_desc, true);
502 }
503
504 /* --------------------- Cyclic DMA API extensions -------------------- */
505
506 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
507 {
508         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
509         return channel_readl(dwc, SAR);
510 }
511 EXPORT_SYMBOL(dw_dma_get_src_addr);
512
513 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
514 {
515         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
516         return channel_readl(dwc, DAR);
517 }
518 EXPORT_SYMBOL(dw_dma_get_dst_addr);
519
520 /* Called with dwc->lock held and all DMAC interrupts disabled */
521 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
522                 u32 status_block, u32 status_err, u32 status_xfer)
523 {
524         unsigned long flags;
525
526         if (status_block & dwc->mask) {
527                 void (*callback)(void *param);
528                 void *callback_param;
529
530                 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
531                                 channel_readl(dwc, LLP));
532                 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
533
534                 callback = dwc->cdesc->period_callback;
535                 callback_param = dwc->cdesc->period_callback_param;
536
537                 if (callback)
538                         callback(callback_param);
539         }
540
541         /*
542          * Error and transfer complete are highly unlikely, and will most
543          * likely be due to a configuration error by the user.
544          */
545         if (unlikely(status_err & dwc->mask) ||
546                         unlikely(status_xfer & dwc->mask)) {
547                 unsigned int i;
548
549                 dev_err(chan2dev(&dwc->chan),
550                         "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
551                         status_xfer ? "xfer" : "error");
552
553                 spin_lock_irqsave(&dwc->lock, flags);
554
555                 dwc_dump_chan_regs(dwc);
556
557                 dwc_chan_disable(dw, dwc);
558
559                 /* Make sure DMA does not restart by loading a new list */
560                 channel_writel(dwc, LLP, 0);
561                 channel_writel(dwc, CTL_LO, 0);
562                 channel_writel(dwc, CTL_HI, 0);
563
564                 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
565                 dma_writel(dw, CLEAR.ERROR, dwc->mask);
566                 dma_writel(dw, CLEAR.XFER, dwc->mask);
567
568                 for (i = 0; i < dwc->cdesc->periods; i++)
569                         dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
570
571                 spin_unlock_irqrestore(&dwc->lock, flags);
572         }
573
574         /* Re-enable interrupts */
575         channel_set_bit(dw, MASK.BLOCK, dwc->mask);
576 }
577
578 /* ------------------------------------------------------------------------- */
579
580 static void dw_dma_tasklet(unsigned long data)
581 {
582         struct dw_dma *dw = (struct dw_dma *)data;
583         struct dw_dma_chan *dwc;
584         u32 status_block;
585         u32 status_xfer;
586         u32 status_err;
587         unsigned int i;
588
589         status_block = dma_readl(dw, RAW.BLOCK);
590         status_xfer = dma_readl(dw, RAW.XFER);
591         status_err = dma_readl(dw, RAW.ERROR);
592
593         dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
594
595         for (i = 0; i < dw->dma.chancnt; i++) {
596                 dwc = &dw->chan[i];
597                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
598                         dwc_handle_cyclic(dw, dwc, status_block, status_err,
599                                         status_xfer);
600                 else if (status_err & (1 << i))
601                         dwc_handle_error(dw, dwc);
602                 else if (status_xfer & (1 << i))
603                         dwc_scan_descriptors(dw, dwc);
604         }
605
606         /* Re-enable interrupts */
607         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
608         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
609 }
610
611 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
612 {
613         struct dw_dma *dw = dev_id;
614         u32 status;
615
616         /* Check if we have any interrupt from the DMAC which is not in use */
617         if (!dw->in_use)
618                 return IRQ_NONE;
619
620         status = dma_readl(dw, STATUS_INT);
621         dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
622
623         /* Check if we have any interrupt from the DMAC */
624         if (!status)
625                 return IRQ_NONE;
626
627         /*
628          * Just disable the interrupts. We'll turn them back on in the
629          * softirq handler.
630          */
631         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
632         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
633         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
634
635         status = dma_readl(dw, STATUS_INT);
636         if (status) {
637                 dev_err(dw->dma.dev,
638                         "BUG: Unexpected interrupts pending: 0x%x\n",
639                         status);
640
641                 /* Try to recover */
642                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
643                 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
644                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
645                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
646                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
647         }
648
649         tasklet_schedule(&dw->tasklet);
650
651         return IRQ_HANDLED;
652 }
653
654 /*----------------------------------------------------------------------*/
655
656 static struct dma_async_tx_descriptor *
657 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
658                 size_t len, unsigned long flags)
659 {
660         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
661         struct dw_dma           *dw = to_dw_dma(chan->device);
662         struct dw_desc          *desc;
663         struct dw_desc          *first;
664         struct dw_desc          *prev;
665         size_t                  xfer_count;
666         size_t                  offset;
667         u8                      m_master = dwc->dws.m_master;
668         unsigned int            src_width;
669         unsigned int            dst_width;
670         unsigned int            data_width = dw->pdata->data_width[m_master];
671         u32                     ctllo;
672         u8                      lms = DWC_LLP_LMS(m_master);
673
674         dev_vdbg(chan2dev(chan),
675                         "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
676                         &dest, &src, len, flags);
677
678         if (unlikely(!len)) {
679                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
680                 return NULL;
681         }
682
683         dwc->direction = DMA_MEM_TO_MEM;
684
685         src_width = dst_width = __ffs(data_width | src | dest | len);
686
687         ctllo = DWC_DEFAULT_CTLLO(chan)
688                         | DWC_CTLL_DST_WIDTH(dst_width)
689                         | DWC_CTLL_SRC_WIDTH(src_width)
690                         | DWC_CTLL_DST_INC
691                         | DWC_CTLL_SRC_INC
692                         | DWC_CTLL_FC_M2M;
693         prev = first = NULL;
694
695         for (offset = 0; offset < len; offset += xfer_count << src_width) {
696                 xfer_count = min_t(size_t, (len - offset) >> src_width,
697                                            dwc->block_size);
698
699                 desc = dwc_desc_get(dwc);
700                 if (!desc)
701                         goto err_desc_get;
702
703                 lli_write(desc, sar, src + offset);
704                 lli_write(desc, dar, dest + offset);
705                 lli_write(desc, ctllo, ctllo);
706                 lli_write(desc, ctlhi, xfer_count);
707                 desc->len = xfer_count << src_width;
708
709                 if (!first) {
710                         first = desc;
711                 } else {
712                         lli_write(prev, llp, desc->txd.phys | lms);
713                         list_add_tail(&desc->desc_node, &first->tx_list);
714                 }
715                 prev = desc;
716         }
717
718         if (flags & DMA_PREP_INTERRUPT)
719                 /* Trigger interrupt after last block */
720                 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
721
722         prev->lli.llp = 0;
723         lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
724         first->txd.flags = flags;
725         first->total_len = len;
726
727         return &first->txd;
728
729 err_desc_get:
730         dwc_desc_put(dwc, first);
731         return NULL;
732 }
733
734 static struct dma_async_tx_descriptor *
735 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
736                 unsigned int sg_len, enum dma_transfer_direction direction,
737                 unsigned long flags, void *context)
738 {
739         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
740         struct dw_dma           *dw = to_dw_dma(chan->device);
741         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
742         struct dw_desc          *prev;
743         struct dw_desc          *first;
744         u32                     ctllo;
745         u8                      m_master = dwc->dws.m_master;
746         u8                      lms = DWC_LLP_LMS(m_master);
747         dma_addr_t              reg;
748         unsigned int            reg_width;
749         unsigned int            mem_width;
750         unsigned int            data_width = dw->pdata->data_width[m_master];
751         unsigned int            i;
752         struct scatterlist      *sg;
753         size_t                  total_len = 0;
754
755         dev_vdbg(chan2dev(chan), "%s\n", __func__);
756
757         if (unlikely(!is_slave_direction(direction) || !sg_len))
758                 return NULL;
759
760         dwc->direction = direction;
761
762         prev = first = NULL;
763
764         switch (direction) {
765         case DMA_MEM_TO_DEV:
766                 reg_width = __ffs(sconfig->dst_addr_width);
767                 reg = sconfig->dst_addr;
768                 ctllo = (DWC_DEFAULT_CTLLO(chan)
769                                 | DWC_CTLL_DST_WIDTH(reg_width)
770                                 | DWC_CTLL_DST_FIX
771                                 | DWC_CTLL_SRC_INC);
772
773                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
774                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
775
776                 for_each_sg(sgl, sg, sg_len, i) {
777                         struct dw_desc  *desc;
778                         u32             len, dlen, mem;
779
780                         mem = sg_dma_address(sg);
781                         len = sg_dma_len(sg);
782
783                         mem_width = __ffs(data_width | mem | len);
784
785 slave_sg_todev_fill_desc:
786                         desc = dwc_desc_get(dwc);
787                         if (!desc)
788                                 goto err_desc_get;
789
790                         lli_write(desc, sar, mem);
791                         lli_write(desc, dar, reg);
792                         lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
793                         if ((len >> mem_width) > dwc->block_size) {
794                                 dlen = dwc->block_size << mem_width;
795                                 mem += dlen;
796                                 len -= dlen;
797                         } else {
798                                 dlen = len;
799                                 len = 0;
800                         }
801
802                         lli_write(desc, ctlhi, dlen >> mem_width);
803                         desc->len = dlen;
804
805                         if (!first) {
806                                 first = desc;
807                         } else {
808                                 lli_write(prev, llp, desc->txd.phys | lms);
809                                 list_add_tail(&desc->desc_node, &first->tx_list);
810                         }
811                         prev = desc;
812                         total_len += dlen;
813
814                         if (len)
815                                 goto slave_sg_todev_fill_desc;
816                 }
817                 break;
818         case DMA_DEV_TO_MEM:
819                 reg_width = __ffs(sconfig->src_addr_width);
820                 reg = sconfig->src_addr;
821                 ctllo = (DWC_DEFAULT_CTLLO(chan)
822                                 | DWC_CTLL_SRC_WIDTH(reg_width)
823                                 | DWC_CTLL_DST_INC
824                                 | DWC_CTLL_SRC_FIX);
825
826                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
827                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
828
829                 for_each_sg(sgl, sg, sg_len, i) {
830                         struct dw_desc  *desc;
831                         u32             len, dlen, mem;
832
833                         mem = sg_dma_address(sg);
834                         len = sg_dma_len(sg);
835
836                         mem_width = __ffs(data_width | mem | len);
837
838 slave_sg_fromdev_fill_desc:
839                         desc = dwc_desc_get(dwc);
840                         if (!desc)
841                                 goto err_desc_get;
842
843                         lli_write(desc, sar, reg);
844                         lli_write(desc, dar, mem);
845                         lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
846                         if ((len >> reg_width) > dwc->block_size) {
847                                 dlen = dwc->block_size << reg_width;
848                                 mem += dlen;
849                                 len -= dlen;
850                         } else {
851                                 dlen = len;
852                                 len = 0;
853                         }
854                         lli_write(desc, ctlhi, dlen >> reg_width);
855                         desc->len = dlen;
856
857                         if (!first) {
858                                 first = desc;
859                         } else {
860                                 lli_write(prev, llp, desc->txd.phys | lms);
861                                 list_add_tail(&desc->desc_node, &first->tx_list);
862                         }
863                         prev = desc;
864                         total_len += dlen;
865
866                         if (len)
867                                 goto slave_sg_fromdev_fill_desc;
868                 }
869                 break;
870         default:
871                 return NULL;
872         }
873
874         if (flags & DMA_PREP_INTERRUPT)
875                 /* Trigger interrupt after last block */
876                 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
877
878         prev->lli.llp = 0;
879         lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
880         first->total_len = total_len;
881
882         return &first->txd;
883
884 err_desc_get:
885         dev_err(chan2dev(chan),
886                 "not enough descriptors available. Direction %d\n", direction);
887         dwc_desc_put(dwc, first);
888         return NULL;
889 }
890
891 bool dw_dma_filter(struct dma_chan *chan, void *param)
892 {
893         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
894         struct dw_dma_slave *dws = param;
895
896         if (dws->dma_dev != chan->device->dev)
897                 return false;
898
899         /* We have to copy data since dws can be temporary storage */
900         memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
901
902         return true;
903 }
904 EXPORT_SYMBOL_GPL(dw_dma_filter);
905
906 /*
907  * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
908  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
909  *
910  * NOTE: burst size 2 is not supported by controller.
911  *
912  * This can be done by finding least significant bit set: n & (n - 1)
913  */
914 static inline void convert_burst(u32 *maxburst)
915 {
916         if (*maxburst > 1)
917                 *maxburst = fls(*maxburst) - 2;
918         else
919                 *maxburst = 0;
920 }
921
922 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
923 {
924         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
925
926         /* Check if chan will be configured for slave transfers */
927         if (!is_slave_direction(sconfig->direction))
928                 return -EINVAL;
929
930         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
931         dwc->direction = sconfig->direction;
932
933         convert_burst(&dwc->dma_sconfig.src_maxburst);
934         convert_burst(&dwc->dma_sconfig.dst_maxburst);
935
936         return 0;
937 }
938
939 static int dwc_pause(struct dma_chan *chan)
940 {
941         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
942         unsigned long           flags;
943         unsigned int            count = 20;     /* timeout iterations */
944         u32                     cfglo;
945
946         spin_lock_irqsave(&dwc->lock, flags);
947
948         cfglo = channel_readl(dwc, CFG_LO);
949         channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
950         while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
951                 udelay(2);
952
953         set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
954
955         spin_unlock_irqrestore(&dwc->lock, flags);
956
957         return 0;
958 }
959
960 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
961 {
962         u32 cfglo = channel_readl(dwc, CFG_LO);
963
964         channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
965
966         clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
967 }
968
969 static int dwc_resume(struct dma_chan *chan)
970 {
971         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
972         unsigned long           flags;
973
974         spin_lock_irqsave(&dwc->lock, flags);
975
976         if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
977                 dwc_chan_resume(dwc);
978
979         spin_unlock_irqrestore(&dwc->lock, flags);
980
981         return 0;
982 }
983
984 static int dwc_terminate_all(struct dma_chan *chan)
985 {
986         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
987         struct dw_dma           *dw = to_dw_dma(chan->device);
988         struct dw_desc          *desc, *_desc;
989         unsigned long           flags;
990         LIST_HEAD(list);
991
992         spin_lock_irqsave(&dwc->lock, flags);
993
994         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
995
996         dwc_chan_disable(dw, dwc);
997
998         dwc_chan_resume(dwc);
999
1000         /* active_list entries will end up before queued entries */
1001         list_splice_init(&dwc->queue, &list);
1002         list_splice_init(&dwc->active_list, &list);
1003
1004         spin_unlock_irqrestore(&dwc->lock, flags);
1005
1006         /* Flush all pending and queued descriptors */
1007         list_for_each_entry_safe(desc, _desc, &list, desc_node)
1008                 dwc_descriptor_complete(dwc, desc, false);
1009
1010         return 0;
1011 }
1012
1013 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
1014 {
1015         struct dw_desc *desc;
1016
1017         list_for_each_entry(desc, &dwc->active_list, desc_node)
1018                 if (desc->txd.cookie == c)
1019                         return desc;
1020
1021         return NULL;
1022 }
1023
1024 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1025 {
1026         struct dw_desc *desc;
1027         unsigned long flags;
1028         u32 residue;
1029
1030         spin_lock_irqsave(&dwc->lock, flags);
1031
1032         desc = dwc_find_desc(dwc, cookie);
1033         if (desc) {
1034                 if (desc == dwc_first_active(dwc)) {
1035                         residue = desc->residue;
1036                         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1037                                 residue -= dwc_get_sent(dwc);
1038                 } else {
1039                         residue = desc->total_len;
1040                 }
1041         } else {
1042                 residue = 0;
1043         }
1044
1045         spin_unlock_irqrestore(&dwc->lock, flags);
1046         return residue;
1047 }
1048
1049 static enum dma_status
1050 dwc_tx_status(struct dma_chan *chan,
1051               dma_cookie_t cookie,
1052               struct dma_tx_state *txstate)
1053 {
1054         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1055         enum dma_status         ret;
1056
1057         ret = dma_cookie_status(chan, cookie, txstate);
1058         if (ret == DMA_COMPLETE)
1059                 return ret;
1060
1061         dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1062
1063         ret = dma_cookie_status(chan, cookie, txstate);
1064         if (ret == DMA_COMPLETE)
1065                 return ret;
1066
1067         dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
1068
1069         if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
1070                 return DMA_PAUSED;
1071
1072         return ret;
1073 }
1074
1075 static void dwc_issue_pending(struct dma_chan *chan)
1076 {
1077         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1078         unsigned long           flags;
1079
1080         spin_lock_irqsave(&dwc->lock, flags);
1081         if (list_empty(&dwc->active_list))
1082                 dwc_dostart_first_queued(dwc);
1083         spin_unlock_irqrestore(&dwc->lock, flags);
1084 }
1085
1086 /*----------------------------------------------------------------------*/
1087
1088 static void dw_dma_off(struct dw_dma *dw)
1089 {
1090         unsigned int i;
1091
1092         dma_writel(dw, CFG, 0);
1093
1094         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1095         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1096         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1097         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1098         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1099
1100         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1101                 cpu_relax();
1102
1103         for (i = 0; i < dw->dma.chancnt; i++)
1104                 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
1105 }
1106
1107 static void dw_dma_on(struct dw_dma *dw)
1108 {
1109         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1110 }
1111
1112 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1113 {
1114         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1115         struct dw_dma           *dw = to_dw_dma(chan->device);
1116
1117         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1118
1119         /* ASSERT:  channel is idle */
1120         if (dma_readl(dw, CH_EN) & dwc->mask) {
1121                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1122                 return -EIO;
1123         }
1124
1125         dma_cookie_init(chan);
1126
1127         /*
1128          * NOTE: some controllers may have additional features that we
1129          * need to initialize here, like "scatter-gather" (which
1130          * doesn't mean what you think it means), and status writeback.
1131          */
1132
1133         /*
1134          * We need controller-specific data to set up slave transfers.
1135          */
1136         if (chan->private && !dw_dma_filter(chan, chan->private)) {
1137                 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1138                 return -EINVAL;
1139         }
1140
1141         /* Enable controller here if needed */
1142         if (!dw->in_use)
1143                 dw_dma_on(dw);
1144         dw->in_use |= dwc->mask;
1145
1146         return 0;
1147 }
1148
1149 static void dwc_free_chan_resources(struct dma_chan *chan)
1150 {
1151         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1152         struct dw_dma           *dw = to_dw_dma(chan->device);
1153         unsigned long           flags;
1154         LIST_HEAD(list);
1155
1156         dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1157                         dwc->descs_allocated);
1158
1159         /* ASSERT:  channel is idle */
1160         BUG_ON(!list_empty(&dwc->active_list));
1161         BUG_ON(!list_empty(&dwc->queue));
1162         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1163
1164         spin_lock_irqsave(&dwc->lock, flags);
1165
1166         /* Clear custom channel configuration */
1167         memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1168
1169         clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1170
1171         /* Disable interrupts */
1172         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1173         channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1174         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1175
1176         spin_unlock_irqrestore(&dwc->lock, flags);
1177
1178         /* Disable controller in case it was a last user */
1179         dw->in_use &= ~dwc->mask;
1180         if (!dw->in_use)
1181                 dw_dma_off(dw);
1182
1183         dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1184 }
1185
1186 /* --------------------- Cyclic DMA API extensions -------------------- */
1187
1188 /**
1189  * dw_dma_cyclic_start - start the cyclic DMA transfer
1190  * @chan: the DMA channel to start
1191  *
1192  * Must be called with soft interrupts disabled. Returns zero on success or
1193  * -errno on failure.
1194  */
1195 int dw_dma_cyclic_start(struct dma_chan *chan)
1196 {
1197         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1198         struct dw_dma           *dw = to_dw_dma(chan->device);
1199         unsigned long           flags;
1200
1201         if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1202                 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1203                 return -ENODEV;
1204         }
1205
1206         spin_lock_irqsave(&dwc->lock, flags);
1207
1208         /* Enable interrupts to perform cyclic transfer */
1209         channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1210
1211         dwc_dostart(dwc, dwc->cdesc->desc[0]);
1212
1213         spin_unlock_irqrestore(&dwc->lock, flags);
1214
1215         return 0;
1216 }
1217 EXPORT_SYMBOL(dw_dma_cyclic_start);
1218
1219 /**
1220  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1221  * @chan: the DMA channel to stop
1222  *
1223  * Must be called with soft interrupts disabled.
1224  */
1225 void dw_dma_cyclic_stop(struct dma_chan *chan)
1226 {
1227         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1228         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1229         unsigned long           flags;
1230
1231         spin_lock_irqsave(&dwc->lock, flags);
1232
1233         dwc_chan_disable(dw, dwc);
1234
1235         spin_unlock_irqrestore(&dwc->lock, flags);
1236 }
1237 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1238
1239 /**
1240  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1241  * @chan: the DMA channel to prepare
1242  * @buf_addr: physical DMA address where the buffer starts
1243  * @buf_len: total number of bytes for the entire buffer
1244  * @period_len: number of bytes for each period
1245  * @direction: transfer direction, to or from device
1246  *
1247  * Must be called before trying to start the transfer. Returns a valid struct
1248  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1249  */
1250 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1251                 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1252                 enum dma_transfer_direction direction)
1253 {
1254         struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1255         struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
1256         struct dw_cyclic_desc           *cdesc;
1257         struct dw_cyclic_desc           *retval = NULL;
1258         struct dw_desc                  *desc;
1259         struct dw_desc                  *last = NULL;
1260         u8                              lms = DWC_LLP_LMS(dwc->dws.m_master);
1261         unsigned long                   was_cyclic;
1262         unsigned int                    reg_width;
1263         unsigned int                    periods;
1264         unsigned int                    i;
1265         unsigned long                   flags;
1266
1267         spin_lock_irqsave(&dwc->lock, flags);
1268         if (dwc->nollp) {
1269                 spin_unlock_irqrestore(&dwc->lock, flags);
1270                 dev_dbg(chan2dev(&dwc->chan),
1271                                 "channel doesn't support LLP transfers\n");
1272                 return ERR_PTR(-EINVAL);
1273         }
1274
1275         if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1276                 spin_unlock_irqrestore(&dwc->lock, flags);
1277                 dev_dbg(chan2dev(&dwc->chan),
1278                                 "queue and/or active list are not empty\n");
1279                 return ERR_PTR(-EBUSY);
1280         }
1281
1282         was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1283         spin_unlock_irqrestore(&dwc->lock, flags);
1284         if (was_cyclic) {
1285                 dev_dbg(chan2dev(&dwc->chan),
1286                                 "channel already prepared for cyclic DMA\n");
1287                 return ERR_PTR(-EBUSY);
1288         }
1289
1290         retval = ERR_PTR(-EINVAL);
1291
1292         if (unlikely(!is_slave_direction(direction)))
1293                 goto out_err;
1294
1295         dwc->direction = direction;
1296
1297         if (direction == DMA_MEM_TO_DEV)
1298                 reg_width = __ffs(sconfig->dst_addr_width);
1299         else
1300                 reg_width = __ffs(sconfig->src_addr_width);
1301
1302         periods = buf_len / period_len;
1303
1304         /* Check for too big/unaligned periods and unaligned DMA buffer. */
1305         if (period_len > (dwc->block_size << reg_width))
1306                 goto out_err;
1307         if (unlikely(period_len & ((1 << reg_width) - 1)))
1308                 goto out_err;
1309         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1310                 goto out_err;
1311
1312         retval = ERR_PTR(-ENOMEM);
1313
1314         cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1315         if (!cdesc)
1316                 goto out_err;
1317
1318         cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1319         if (!cdesc->desc)
1320                 goto out_err_alloc;
1321
1322         for (i = 0; i < periods; i++) {
1323                 desc = dwc_desc_get(dwc);
1324                 if (!desc)
1325                         goto out_err_desc_get;
1326
1327                 switch (direction) {
1328                 case DMA_MEM_TO_DEV:
1329                         lli_write(desc, dar, sconfig->dst_addr);
1330                         lli_write(desc, sar, buf_addr + period_len * i);
1331                         lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1332                                 | DWC_CTLL_DST_WIDTH(reg_width)
1333                                 | DWC_CTLL_SRC_WIDTH(reg_width)
1334                                 | DWC_CTLL_DST_FIX
1335                                 | DWC_CTLL_SRC_INC
1336                                 | DWC_CTLL_INT_EN));
1337
1338                         lli_set(desc, ctllo, sconfig->device_fc ?
1339                                         DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1340                                         DWC_CTLL_FC(DW_DMA_FC_D_M2P));
1341
1342                         break;
1343                 case DMA_DEV_TO_MEM:
1344                         lli_write(desc, dar, buf_addr + period_len * i);
1345                         lli_write(desc, sar, sconfig->src_addr);
1346                         lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1347                                 | DWC_CTLL_SRC_WIDTH(reg_width)
1348                                 | DWC_CTLL_DST_WIDTH(reg_width)
1349                                 | DWC_CTLL_DST_INC
1350                                 | DWC_CTLL_SRC_FIX
1351                                 | DWC_CTLL_INT_EN));
1352
1353                         lli_set(desc, ctllo, sconfig->device_fc ?
1354                                         DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1355                                         DWC_CTLL_FC(DW_DMA_FC_D_P2M));
1356
1357                         break;
1358                 default:
1359                         break;
1360                 }
1361
1362                 lli_write(desc, ctlhi, period_len >> reg_width);
1363                 cdesc->desc[i] = desc;
1364
1365                 if (last)
1366                         lli_write(last, llp, desc->txd.phys | lms);
1367
1368                 last = desc;
1369         }
1370
1371         /* Let's make a cyclic list */
1372         lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
1373
1374         dev_dbg(chan2dev(&dwc->chan),
1375                         "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1376                         &buf_addr, buf_len, period_len, periods);
1377
1378         cdesc->periods = periods;
1379         dwc->cdesc = cdesc;
1380
1381         return cdesc;
1382
1383 out_err_desc_get:
1384         while (i--)
1385                 dwc_desc_put(dwc, cdesc->desc[i]);
1386 out_err_alloc:
1387         kfree(cdesc);
1388 out_err:
1389         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1390         return (struct dw_cyclic_desc *)retval;
1391 }
1392 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1393
1394 /**
1395  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1396  * @chan: the DMA channel to free
1397  */
1398 void dw_dma_cyclic_free(struct dma_chan *chan)
1399 {
1400         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1401         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1402         struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1403         unsigned int            i;
1404         unsigned long           flags;
1405
1406         dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1407
1408         if (!cdesc)
1409                 return;
1410
1411         spin_lock_irqsave(&dwc->lock, flags);
1412
1413         dwc_chan_disable(dw, dwc);
1414
1415         dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1416         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1417         dma_writel(dw, CLEAR.XFER, dwc->mask);
1418
1419         spin_unlock_irqrestore(&dwc->lock, flags);
1420
1421         for (i = 0; i < cdesc->periods; i++)
1422                 dwc_desc_put(dwc, cdesc->desc[i]);
1423
1424         kfree(cdesc->desc);
1425         kfree(cdesc);
1426
1427         dwc->cdesc = NULL;
1428
1429         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1430 }
1431 EXPORT_SYMBOL(dw_dma_cyclic_free);
1432
1433 /*----------------------------------------------------------------------*/
1434
1435 int dw_dma_probe(struct dw_dma_chip *chip)
1436 {
1437         struct dw_dma_platform_data *pdata;
1438         struct dw_dma           *dw;
1439         bool                    autocfg = false;
1440         unsigned int            dw_params;
1441         unsigned int            i;
1442         int                     err;
1443
1444         dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1445         if (!dw)
1446                 return -ENOMEM;
1447
1448         dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1449         if (!dw->pdata)
1450                 return -ENOMEM;
1451
1452         dw->regs = chip->regs;
1453         chip->dw = dw;
1454
1455         pm_runtime_get_sync(chip->dev);
1456
1457         if (!chip->pdata) {
1458                 dw_params = dma_readl(dw, DW_PARAMS);
1459                 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1460
1461                 autocfg = dw_params >> DW_PARAMS_EN & 1;
1462                 if (!autocfg) {
1463                         err = -EINVAL;
1464                         goto err_pdata;
1465                 }
1466
1467                 /* Reassign the platform data pointer */
1468                 pdata = dw->pdata;
1469
1470                 /* Get hardware configuration parameters */
1471                 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1472                 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1473                 for (i = 0; i < pdata->nr_masters; i++) {
1474                         pdata->data_width[i] =
1475                                 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1476                 }
1477                 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1478
1479                 /* Fill platform data with the default values */
1480                 pdata->is_private = true;
1481                 pdata->is_memcpy = true;
1482                 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1483                 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1484         } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1485                 err = -EINVAL;
1486                 goto err_pdata;
1487         } else {
1488                 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1489
1490                 /* Reassign the platform data pointer */
1491                 pdata = dw->pdata;
1492         }
1493
1494         dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1495                                 GFP_KERNEL);
1496         if (!dw->chan) {
1497                 err = -ENOMEM;
1498                 goto err_pdata;
1499         }
1500
1501         /* Calculate all channel mask before DMA setup */
1502         dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1503
1504         /* Force dma off, just in case */
1505         dw_dma_off(dw);
1506
1507         /* Create a pool of consistent memory blocks for hardware descriptors */
1508         dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1509                                          sizeof(struct dw_desc), 4, 0);
1510         if (!dw->desc_pool) {
1511                 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1512                 err = -ENOMEM;
1513                 goto err_pdata;
1514         }
1515
1516         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1517
1518         err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1519                           "dw_dmac", dw);
1520         if (err)
1521                 goto err_pdata;
1522
1523         INIT_LIST_HEAD(&dw->dma.channels);
1524         for (i = 0; i < pdata->nr_channels; i++) {
1525                 struct dw_dma_chan      *dwc = &dw->chan[i];
1526
1527                 dwc->chan.device = &dw->dma;
1528                 dma_cookie_init(&dwc->chan);
1529                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1530                         list_add_tail(&dwc->chan.device_node,
1531                                         &dw->dma.channels);
1532                 else
1533                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1534
1535                 /* 7 is highest priority & 0 is lowest. */
1536                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1537                         dwc->priority = pdata->nr_channels - i - 1;
1538                 else
1539                         dwc->priority = i;
1540
1541                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1542                 spin_lock_init(&dwc->lock);
1543                 dwc->mask = 1 << i;
1544
1545                 INIT_LIST_HEAD(&dwc->active_list);
1546                 INIT_LIST_HEAD(&dwc->queue);
1547
1548                 channel_clear_bit(dw, CH_EN, dwc->mask);
1549
1550                 dwc->direction = DMA_TRANS_NONE;
1551
1552                 /* Hardware configuration */
1553                 if (autocfg) {
1554                         unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1555                         void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1556                         unsigned int dwc_params = dma_readl_native(addr);
1557
1558                         dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1559                                            dwc_params);
1560
1561                         /*
1562                          * Decode maximum block size for given channel. The
1563                          * stored 4 bit value represents blocks from 0x00 for 3
1564                          * up to 0x0a for 4095.
1565                          */
1566                         dwc->block_size =
1567                                 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1568                         dwc->nollp =
1569                                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1570                 } else {
1571                         dwc->block_size = pdata->block_size;
1572                         dwc->nollp = pdata->is_nollp;
1573                 }
1574         }
1575
1576         /* Clear all interrupts on all channels. */
1577         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1578         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1579         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1580         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1581         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1582
1583         /* Set capabilities */
1584         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1585         if (pdata->is_private)
1586                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1587         if (pdata->is_memcpy)
1588                 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1589
1590         dw->dma.dev = chip->dev;
1591         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1592         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1593
1594         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1595         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1596
1597         dw->dma.device_config = dwc_config;
1598         dw->dma.device_pause = dwc_pause;
1599         dw->dma.device_resume = dwc_resume;
1600         dw->dma.device_terminate_all = dwc_terminate_all;
1601
1602         dw->dma.device_tx_status = dwc_tx_status;
1603         dw->dma.device_issue_pending = dwc_issue_pending;
1604
1605         /* DMA capabilities */
1606         dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1607         dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1608         dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1609                              BIT(DMA_MEM_TO_MEM);
1610         dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1611
1612         err = dma_async_device_register(&dw->dma);
1613         if (err)
1614                 goto err_dma_register;
1615
1616         dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1617                  pdata->nr_channels);
1618
1619         pm_runtime_put_sync_suspend(chip->dev);
1620
1621         return 0;
1622
1623 err_dma_register:
1624         free_irq(chip->irq, dw);
1625 err_pdata:
1626         pm_runtime_put_sync_suspend(chip->dev);
1627         return err;
1628 }
1629 EXPORT_SYMBOL_GPL(dw_dma_probe);
1630
1631 int dw_dma_remove(struct dw_dma_chip *chip)
1632 {
1633         struct dw_dma           *dw = chip->dw;
1634         struct dw_dma_chan      *dwc, *_dwc;
1635
1636         pm_runtime_get_sync(chip->dev);
1637
1638         dw_dma_off(dw);
1639         dma_async_device_unregister(&dw->dma);
1640
1641         free_irq(chip->irq, dw);
1642         tasklet_kill(&dw->tasklet);
1643
1644         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1645                         chan.device_node) {
1646                 list_del(&dwc->chan.device_node);
1647                 channel_clear_bit(dw, CH_EN, dwc->mask);
1648         }
1649
1650         pm_runtime_put_sync_suspend(chip->dev);
1651         return 0;
1652 }
1653 EXPORT_SYMBOL_GPL(dw_dma_remove);
1654
1655 int dw_dma_disable(struct dw_dma_chip *chip)
1656 {
1657         struct dw_dma *dw = chip->dw;
1658
1659         dw_dma_off(dw);
1660         return 0;
1661 }
1662 EXPORT_SYMBOL_GPL(dw_dma_disable);
1663
1664 int dw_dma_enable(struct dw_dma_chip *chip)
1665 {
1666         struct dw_dma *dw = chip->dw;
1667
1668         dw_dma_on(dw);
1669         return 0;
1670 }
1671 EXPORT_SYMBOL_GPL(dw_dma_enable);
1672
1673 MODULE_LICENSE("GPL v2");
1674 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1675 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1676 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");