GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / net / ethernet / ti / davinci_cpdma.c
1 /*
2  * Texas Instruments CPDMA Driver
3  *
4  * Copyright (C) 2010 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/io.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
26
27 /* DMA Registers */
28 #define CPDMA_TXIDVER           0x00
29 #define CPDMA_TXCONTROL         0x04
30 #define CPDMA_TXTEARDOWN        0x08
31 #define CPDMA_RXIDVER           0x10
32 #define CPDMA_RXCONTROL         0x14
33 #define CPDMA_SOFTRESET         0x1c
34 #define CPDMA_RXTEARDOWN        0x18
35 #define CPDMA_TX_PRI0_RATE      0x30
36 #define CPDMA_TXINTSTATRAW      0x80
37 #define CPDMA_TXINTSTATMASKED   0x84
38 #define CPDMA_TXINTMASKSET      0x88
39 #define CPDMA_TXINTMASKCLEAR    0x8c
40 #define CPDMA_MACINVECTOR       0x90
41 #define CPDMA_MACEOIVECTOR      0x94
42 #define CPDMA_RXINTSTATRAW      0xa0
43 #define CPDMA_RXINTSTATMASKED   0xa4
44 #define CPDMA_RXINTMASKSET      0xa8
45 #define CPDMA_RXINTMASKCLEAR    0xac
46 #define CPDMA_DMAINTSTATRAW     0xb0
47 #define CPDMA_DMAINTSTATMASKED  0xb4
48 #define CPDMA_DMAINTMASKSET     0xb8
49 #define CPDMA_DMAINTMASKCLEAR   0xbc
50 #define CPDMA_DMAINT_HOSTERR    BIT(1)
51
52 /* the following exist only if has_ext_regs is set */
53 #define CPDMA_DMACONTROL        0x20
54 #define CPDMA_DMASTATUS         0x24
55 #define CPDMA_RXBUFFOFS         0x28
56 #define CPDMA_EM_CONTROL        0x2c
57
58 /* Descriptor mode bits */
59 #define CPDMA_DESC_SOP          BIT(31)
60 #define CPDMA_DESC_EOP          BIT(30)
61 #define CPDMA_DESC_OWNER        BIT(29)
62 #define CPDMA_DESC_EOQ          BIT(28)
63 #define CPDMA_DESC_TD_COMPLETE  BIT(27)
64 #define CPDMA_DESC_PASS_CRC     BIT(26)
65 #define CPDMA_DESC_TO_PORT_EN   BIT(20)
66 #define CPDMA_TO_PORT_SHIFT     16
67 #define CPDMA_DESC_PORT_MASK    (BIT(18) | BIT(17) | BIT(16))
68 #define CPDMA_DESC_CRC_LEN      4
69
70 #define CPDMA_TEARDOWN_VALUE    0xfffffffc
71
72 #define CPDMA_MAX_RLIM_CNT      16384
73
74 struct cpdma_desc {
75         /* hardware fields */
76         u32                     hw_next;
77         u32                     hw_buffer;
78         u32                     hw_len;
79         u32                     hw_mode;
80         /* software fields */
81         void                    *sw_token;
82         u32                     sw_buffer;
83         u32                     sw_len;
84 };
85
86 struct cpdma_desc_pool {
87         phys_addr_t             phys;
88         dma_addr_t              hw_addr;
89         void __iomem            *iomap;         /* ioremap map */
90         void                    *cpumap;        /* dma_alloc map */
91         int                     desc_size, mem_size;
92         int                     num_desc;
93         struct device           *dev;
94         struct gen_pool         *gen_pool;
95 };
96
97 enum cpdma_state {
98         CPDMA_STATE_IDLE,
99         CPDMA_STATE_ACTIVE,
100         CPDMA_STATE_TEARDOWN,
101 };
102
103 struct cpdma_ctlr {
104         enum cpdma_state        state;
105         struct cpdma_params     params;
106         struct device           *dev;
107         struct cpdma_desc_pool  *pool;
108         spinlock_t              lock;
109         struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
110         int chan_num;
111         int                     num_rx_desc; /* RX descriptors number */
112         int                     num_tx_desc; /* TX descriptors number */
113 };
114
115 struct cpdma_chan {
116         struct cpdma_desc __iomem       *head, *tail;
117         void __iomem                    *hdp, *cp, *rxfree;
118         enum cpdma_state                state;
119         struct cpdma_ctlr               *ctlr;
120         int                             chan_num;
121         spinlock_t                      lock;
122         int                             count;
123         u32                             desc_num;
124         u32                             mask;
125         cpdma_handler_fn                handler;
126         enum dma_data_direction         dir;
127         struct cpdma_chan_stats         stats;
128         /* offsets into dmaregs */
129         int     int_set, int_clear, td;
130         int                             weight;
131         u32                             rate_factor;
132         u32                             rate;
133 };
134
135 struct cpdma_control_info {
136         u32             reg;
137         u32             shift, mask;
138         int             access;
139 #define ACCESS_RO       BIT(0)
140 #define ACCESS_WO       BIT(1)
141 #define ACCESS_RW       (ACCESS_RO | ACCESS_WO)
142 };
143
144 static struct cpdma_control_info controls[] = {
145         [CPDMA_TX_RLIM]           = {CPDMA_DMACONTROL,  8,  0xffff, ACCESS_RW},
146         [CPDMA_CMD_IDLE]          = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
147         [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,  4,  1,      ACCESS_RW},
148         [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,  2,  1,      ACCESS_RW},
149         [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,  1,  1,      ACCESS_RW},
150         [CPDMA_TX_PRIO_FIXED]     = {CPDMA_DMACONTROL,  0,  1,      ACCESS_RW},
151         [CPDMA_STAT_IDLE]         = {CPDMA_DMASTATUS,   31, 1,      ACCESS_RO},
152         [CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,   20, 0xf,    ACCESS_RW},
153         [CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,   16, 0x7,    ACCESS_RW},
154         [CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,   12, 0xf,    ACCESS_RW},
155         [CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,   8,  0x7,    ACCESS_RW},
156         [CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,   0,  0xffff, ACCESS_RW},
157 };
158
159 #define tx_chan_num(chan)       (chan)
160 #define rx_chan_num(chan)       ((chan) + CPDMA_MAX_CHANNELS)
161 #define is_rx_chan(chan)        ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
162 #define is_tx_chan(chan)        (!is_rx_chan(chan))
163 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
164 #define chan_linear(chan)       __chan_linear((chan)->chan_num)
165
166 /* The following make access to common cpdma_ctlr params more readable */
167 #define dmaregs         params.dmaregs
168 #define num_chan        params.num_chan
169
170 /* various accessors */
171 #define dma_reg_read(ctlr, ofs)         readl((ctlr)->dmaregs + (ofs))
172 #define chan_read(chan, fld)            readl((chan)->fld)
173 #define desc_read(desc, fld)            readl(&(desc)->fld)
174 #define dma_reg_write(ctlr, ofs, v)     writel(v, (ctlr)->dmaregs + (ofs))
175 #define chan_write(chan, fld, v)        writel(v, (chan)->fld)
176 #define desc_write(desc, fld, v)        writel((u32)(v), &(desc)->fld)
177
178 #define cpdma_desc_to_port(chan, mode, directed)                        \
179         do {                                                            \
180                 if (!is_rx_chan(chan) && ((directed == 1) ||            \
181                                           (directed == 2)))             \
182                         mode |= (CPDMA_DESC_TO_PORT_EN |                \
183                                  (directed << CPDMA_TO_PORT_SHIFT));    \
184         } while (0)
185
186 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
187 {
188         struct cpdma_desc_pool *pool = ctlr->pool;
189
190         if (!pool)
191                 return;
192
193         WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
194              "cpdma_desc_pool size %zd != avail %zd",
195              gen_pool_size(pool->gen_pool),
196              gen_pool_avail(pool->gen_pool));
197         if (pool->cpumap)
198                 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
199                                   pool->phys);
200 }
201
202 /*
203  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
204  * emac) have dedicated on-chip memory for these descriptors.  Some other
205  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
206  * abstract out these details
207  */
208 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
209 {
210         struct cpdma_params *cpdma_params = &ctlr->params;
211         struct cpdma_desc_pool *pool;
212         int ret = -ENOMEM;
213
214         pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
215         if (!pool)
216                 goto gen_pool_create_fail;
217         ctlr->pool = pool;
218
219         pool->mem_size  = cpdma_params->desc_mem_size;
220         pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
221                                 cpdma_params->desc_align);
222         pool->num_desc  = pool->mem_size / pool->desc_size;
223
224         if (cpdma_params->descs_pool_size) {
225                 /* recalculate memory size required cpdma descriptor pool
226                  * basing on number of descriptors specified by user and
227                  * if memory size > CPPI internal RAM size (desc_mem_size)
228                  * then switch to use DDR
229                  */
230                 pool->num_desc = cpdma_params->descs_pool_size;
231                 pool->mem_size = pool->desc_size * pool->num_desc;
232                 if (pool->mem_size > cpdma_params->desc_mem_size)
233                         cpdma_params->desc_mem_phys = 0;
234         }
235
236         pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
237                                               -1, "cpdma");
238         if (IS_ERR(pool->gen_pool)) {
239                 ret = PTR_ERR(pool->gen_pool);
240                 dev_err(ctlr->dev, "pool create failed %d\n", ret);
241                 goto gen_pool_create_fail;
242         }
243
244         if (cpdma_params->desc_mem_phys) {
245                 pool->phys  = cpdma_params->desc_mem_phys;
246                 pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
247                                            pool->mem_size);
248                 pool->hw_addr = cpdma_params->desc_hw_addr;
249         } else {
250                 pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
251                                                   &pool->hw_addr, GFP_KERNEL);
252                 pool->iomap = (void __iomem __force *)pool->cpumap;
253                 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
254         }
255
256         if (!pool->iomap)
257                 goto gen_pool_create_fail;
258
259         ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
260                                 pool->phys, pool->mem_size, -1);
261         if (ret < 0) {
262                 dev_err(ctlr->dev, "pool add failed %d\n", ret);
263                 goto gen_pool_add_virt_fail;
264         }
265
266         return 0;
267
268 gen_pool_add_virt_fail:
269         cpdma_desc_pool_destroy(ctlr);
270 gen_pool_create_fail:
271         ctlr->pool = NULL;
272         return ret;
273 }
274
275 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
276                   struct cpdma_desc __iomem *desc)
277 {
278         if (!desc)
279                 return 0;
280         return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
281 }
282
283 static inline struct cpdma_desc __iomem *
284 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
285 {
286         return dma ? pool->iomap + dma - pool->hw_addr : NULL;
287 }
288
289 static struct cpdma_desc __iomem *
290 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
291 {
292         return (struct cpdma_desc __iomem *)
293                 gen_pool_alloc(pool->gen_pool, pool->desc_size);
294 }
295
296 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
297                             struct cpdma_desc __iomem *desc, int num_desc)
298 {
299         gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
300 }
301
302 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
303 {
304         struct cpdma_control_info *info = &controls[control];
305         u32 val;
306
307         if (!ctlr->params.has_ext_regs)
308                 return -ENOTSUPP;
309
310         if (ctlr->state != CPDMA_STATE_ACTIVE)
311                 return -EINVAL;
312
313         if (control < 0 || control >= ARRAY_SIZE(controls))
314                 return -ENOENT;
315
316         if ((info->access & ACCESS_WO) != ACCESS_WO)
317                 return -EPERM;
318
319         val  = dma_reg_read(ctlr, info->reg);
320         val &= ~(info->mask << info->shift);
321         val |= (value & info->mask) << info->shift;
322         dma_reg_write(ctlr, info->reg, val);
323
324         return 0;
325 }
326
327 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
328 {
329         struct cpdma_control_info *info = &controls[control];
330         int ret;
331
332         if (!ctlr->params.has_ext_regs)
333                 return -ENOTSUPP;
334
335         if (ctlr->state != CPDMA_STATE_ACTIVE)
336                 return -EINVAL;
337
338         if (control < 0 || control >= ARRAY_SIZE(controls))
339                 return -ENOENT;
340
341         if ((info->access & ACCESS_RO) != ACCESS_RO)
342                 return -EPERM;
343
344         ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
345         return ret;
346 }
347
348 /* cpdma_chan_set_chan_shaper - set shaper for a channel
349  * Has to be called under ctlr lock
350  */
351 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
352 {
353         struct cpdma_ctlr *ctlr = chan->ctlr;
354         u32 rate_reg;
355         u32 rmask;
356         int ret;
357
358         if (!chan->rate)
359                 return 0;
360
361         rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
362         dma_reg_write(ctlr, rate_reg, chan->rate_factor);
363
364         rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
365         rmask |= chan->mask;
366
367         ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
368         return ret;
369 }
370
371 static int cpdma_chan_on(struct cpdma_chan *chan)
372 {
373         struct cpdma_ctlr *ctlr = chan->ctlr;
374         struct cpdma_desc_pool  *pool = ctlr->pool;
375         unsigned long flags;
376
377         spin_lock_irqsave(&chan->lock, flags);
378         if (chan->state != CPDMA_STATE_IDLE) {
379                 spin_unlock_irqrestore(&chan->lock, flags);
380                 return -EBUSY;
381         }
382         if (ctlr->state != CPDMA_STATE_ACTIVE) {
383                 spin_unlock_irqrestore(&chan->lock, flags);
384                 return -EINVAL;
385         }
386         dma_reg_write(ctlr, chan->int_set, chan->mask);
387         chan->state = CPDMA_STATE_ACTIVE;
388         if (chan->head) {
389                 chan_write(chan, hdp, desc_phys(pool, chan->head));
390                 if (chan->rxfree)
391                         chan_write(chan, rxfree, chan->count);
392         }
393
394         spin_unlock_irqrestore(&chan->lock, flags);
395         return 0;
396 }
397
398 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
399  * rmask - mask of rate limited channels
400  * Returns min rate in Kb/s
401  */
402 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
403                                u32 *rmask, int *prio_mode)
404 {
405         struct cpdma_ctlr *ctlr = ch->ctlr;
406         struct cpdma_chan *chan;
407         u32 old_rate = ch->rate;
408         u32 new_rmask = 0;
409         int rlim = 0;
410         int i;
411
412         for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
413                 chan = ctlr->channels[i];
414                 if (!chan)
415                         continue;
416
417                 if (chan == ch)
418                         chan->rate = rate;
419
420                 if (chan->rate) {
421                         rlim = 1;
422                         new_rmask |= chan->mask;
423                         continue;
424                 }
425
426                 if (rlim)
427                         goto err;
428         }
429
430         *rmask = new_rmask;
431         *prio_mode = rlim;
432         return 0;
433
434 err:
435         ch->rate = old_rate;
436         dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
437                 chan->chan_num);
438         return -EINVAL;
439 }
440
441 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
442                                   struct cpdma_chan *ch)
443 {
444         u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
445         u32 best_send_cnt = 0, best_idle_cnt = 0;
446         u32 new_rate, best_rate = 0, rate_reg;
447         u64 send_cnt, idle_cnt;
448         u32 min_send_cnt, freq;
449         u64 divident, divisor;
450
451         if (!ch->rate) {
452                 ch->rate_factor = 0;
453                 goto set_factor;
454         }
455
456         freq = ctlr->params.bus_freq_mhz * 1000 * 32;
457         if (!freq) {
458                 dev_err(ctlr->dev, "The bus frequency is not set\n");
459                 return -EINVAL;
460         }
461
462         min_send_cnt = freq - ch->rate;
463         send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
464         while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
465                 divident = ch->rate * send_cnt;
466                 divisor = min_send_cnt;
467                 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
468
469                 divident = freq * idle_cnt;
470                 divisor = idle_cnt + send_cnt;
471                 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
472
473                 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
474                 if (delta < best_delta) {
475                         best_delta = delta;
476                         best_send_cnt = send_cnt;
477                         best_idle_cnt = idle_cnt;
478                         best_rate = new_rate;
479
480                         if (!delta)
481                                 break;
482                 }
483
484                 if (prev_delta >= delta) {
485                         prev_delta = delta;
486                         send_cnt++;
487                         continue;
488                 }
489
490                 idle_cnt++;
491                 divident = freq * idle_cnt;
492                 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
493                 send_cnt -= idle_cnt;
494                 prev_delta = UINT_MAX;
495         }
496
497         ch->rate = best_rate;
498         ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
499
500 set_factor:
501         rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
502         dma_reg_write(ctlr, rate_reg, ch->rate_factor);
503         return 0;
504 }
505
506 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
507 {
508         struct cpdma_ctlr *ctlr;
509
510         ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
511         if (!ctlr)
512                 return NULL;
513
514         ctlr->state = CPDMA_STATE_IDLE;
515         ctlr->params = *params;
516         ctlr->dev = params->dev;
517         ctlr->chan_num = 0;
518         spin_lock_init(&ctlr->lock);
519
520         if (cpdma_desc_pool_create(ctlr))
521                 return NULL;
522         /* split pool equally between RX/TX by default */
523         ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
524         ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
525
526         if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
527                 ctlr->num_chan = CPDMA_MAX_CHANNELS;
528         return ctlr;
529 }
530 EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
531
532 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
533 {
534         struct cpdma_chan *chan;
535         unsigned long flags;
536         int i, prio_mode;
537
538         spin_lock_irqsave(&ctlr->lock, flags);
539         if (ctlr->state != CPDMA_STATE_IDLE) {
540                 spin_unlock_irqrestore(&ctlr->lock, flags);
541                 return -EBUSY;
542         }
543
544         if (ctlr->params.has_soft_reset) {
545                 unsigned timeout = 10 * 100;
546
547                 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
548                 while (timeout) {
549                         if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
550                                 break;
551                         udelay(10);
552                         timeout--;
553                 }
554                 WARN_ON(!timeout);
555         }
556
557         for (i = 0; i < ctlr->num_chan; i++) {
558                 writel(0, ctlr->params.txhdp + 4 * i);
559                 writel(0, ctlr->params.rxhdp + 4 * i);
560                 writel(0, ctlr->params.txcp + 4 * i);
561                 writel(0, ctlr->params.rxcp + 4 * i);
562         }
563
564         dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
565         dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
566
567         dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
568         dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
569
570         ctlr->state = CPDMA_STATE_ACTIVE;
571
572         prio_mode = 0;
573         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
574                 chan = ctlr->channels[i];
575                 if (chan) {
576                         cpdma_chan_set_chan_shaper(chan);
577                         cpdma_chan_on(chan);
578
579                         /* off prio mode if all tx channels are rate limited */
580                         if (is_tx_chan(chan) && !chan->rate)
581                                 prio_mode = 1;
582                 }
583         }
584
585         _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
586         _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
587
588         spin_unlock_irqrestore(&ctlr->lock, flags);
589         return 0;
590 }
591 EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
592
593 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
594 {
595         unsigned long flags;
596         int i;
597
598         spin_lock_irqsave(&ctlr->lock, flags);
599         if (ctlr->state != CPDMA_STATE_ACTIVE) {
600                 spin_unlock_irqrestore(&ctlr->lock, flags);
601                 return -EINVAL;
602         }
603
604         ctlr->state = CPDMA_STATE_TEARDOWN;
605         spin_unlock_irqrestore(&ctlr->lock, flags);
606
607         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
608                 if (ctlr->channels[i])
609                         cpdma_chan_stop(ctlr->channels[i]);
610         }
611
612         spin_lock_irqsave(&ctlr->lock, flags);
613         dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
614         dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
615
616         dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
617         dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
618
619         ctlr->state = CPDMA_STATE_IDLE;
620
621         spin_unlock_irqrestore(&ctlr->lock, flags);
622         return 0;
623 }
624 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
625
626 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
627 {
628         int ret = 0, i;
629
630         if (!ctlr)
631                 return -EINVAL;
632
633         if (ctlr->state != CPDMA_STATE_IDLE)
634                 cpdma_ctlr_stop(ctlr);
635
636         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
637                 cpdma_chan_destroy(ctlr->channels[i]);
638
639         cpdma_desc_pool_destroy(ctlr);
640         return ret;
641 }
642 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
643
644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
645 {
646         unsigned long flags;
647         int i;
648
649         spin_lock_irqsave(&ctlr->lock, flags);
650         if (ctlr->state != CPDMA_STATE_ACTIVE) {
651                 spin_unlock_irqrestore(&ctlr->lock, flags);
652                 return -EINVAL;
653         }
654
655         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
656                 if (ctlr->channels[i])
657                         cpdma_chan_int_ctrl(ctlr->channels[i], enable);
658         }
659
660         spin_unlock_irqrestore(&ctlr->lock, flags);
661         return 0;
662 }
663 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
664
665 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
666 {
667         dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
668 }
669 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
670
671 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
672 {
673         return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
674 }
675 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
676
677 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
678 {
679         return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
680 }
681 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
682
683 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
684                                  int rx, int desc_num,
685                                  int per_ch_desc)
686 {
687         struct cpdma_chan *chan, *most_chan = NULL;
688         int desc_cnt = desc_num;
689         int most_dnum = 0;
690         int min, max, i;
691
692         if (!desc_num)
693                 return;
694
695         if (rx) {
696                 min = rx_chan_num(0);
697                 max = rx_chan_num(CPDMA_MAX_CHANNELS);
698         } else {
699                 min = tx_chan_num(0);
700                 max = tx_chan_num(CPDMA_MAX_CHANNELS);
701         }
702
703         for (i = min; i < max; i++) {
704                 chan = ctlr->channels[i];
705                 if (!chan)
706                         continue;
707
708                 if (chan->weight)
709                         chan->desc_num = (chan->weight * desc_num) / 100;
710                 else
711                         chan->desc_num = per_ch_desc;
712
713                 desc_cnt -= chan->desc_num;
714
715                 if (most_dnum < chan->desc_num) {
716                         most_dnum = chan->desc_num;
717                         most_chan = chan;
718                 }
719         }
720         /* use remains */
721         if (most_chan)
722                 most_chan->desc_num += desc_cnt;
723 }
724
725 /**
726  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
727  * Has to be called under ctlr lock
728  */
729 int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
730 {
731         int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
732         int free_rx_num = 0, free_tx_num = 0;
733         int rx_weight = 0, tx_weight = 0;
734         int tx_desc_num, rx_desc_num;
735         struct cpdma_chan *chan;
736         int i;
737
738         if (!ctlr->chan_num)
739                 return 0;
740
741         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
742                 chan = ctlr->channels[i];
743                 if (!chan)
744                         continue;
745
746                 if (is_rx_chan(chan)) {
747                         if (!chan->weight)
748                                 free_rx_num++;
749                         rx_weight += chan->weight;
750                 } else {
751                         if (!chan->weight)
752                                 free_tx_num++;
753                         tx_weight += chan->weight;
754                 }
755         }
756
757         if (rx_weight > 100 || tx_weight > 100)
758                 return -EINVAL;
759
760         tx_desc_num = ctlr->num_tx_desc;
761         rx_desc_num = ctlr->num_rx_desc;
762
763         if (free_tx_num) {
764                 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
765                 tx_per_ch_desc /= free_tx_num;
766         }
767         if (free_rx_num) {
768                 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
769                 rx_per_ch_desc /= free_rx_num;
770         }
771
772         cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
773         cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
774
775         return 0;
776 }
777 EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
778
779
780 /* cpdma_chan_set_weight - set weight of a channel in percentage.
781  * Tx and Rx channels have separate weights. That is 100% for RX
782  * and 100% for Tx. The weight is used to split cpdma resources
783  * in correct proportion required by the channels, including number
784  * of descriptors. The channel rate is not enough to know the
785  * weight of a channel as the maximum rate of an interface is needed.
786  * If weight = 0, then channel uses rest of descriptors leaved by
787  * weighted channels.
788  */
789 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
790 {
791         struct cpdma_ctlr *ctlr = ch->ctlr;
792         unsigned long flags, ch_flags;
793         int ret;
794
795         spin_lock_irqsave(&ctlr->lock, flags);
796         spin_lock_irqsave(&ch->lock, ch_flags);
797         if (ch->weight == weight) {
798                 spin_unlock_irqrestore(&ch->lock, ch_flags);
799                 spin_unlock_irqrestore(&ctlr->lock, flags);
800                 return 0;
801         }
802         ch->weight = weight;
803         spin_unlock_irqrestore(&ch->lock, ch_flags);
804
805         /* re-split pool using new channel weight */
806         ret = cpdma_chan_split_pool(ctlr);
807         spin_unlock_irqrestore(&ctlr->lock, flags);
808         return ret;
809 }
810 EXPORT_SYMBOL_GPL(cpdma_chan_set_weight);
811
812 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
813  * Should be called before cpdma_chan_set_rate.
814  * Returns min rate in Kb/s
815  */
816 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
817 {
818         unsigned int divident, divisor;
819
820         divident = ctlr->params.bus_freq_mhz * 32 * 1000;
821         divisor = 1 + CPDMA_MAX_RLIM_CNT;
822
823         return DIV_ROUND_UP(divident, divisor);
824 }
825 EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
826
827 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
828  * The bandwidth * limited channels have to be in order beginning from lowest.
829  * ch - transmit channel the bandwidth is configured for
830  * rate - bandwidth in Kb/s, if 0 - then off shaper
831  */
832 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
833 {
834         unsigned long flags, ch_flags;
835         struct cpdma_ctlr *ctlr;
836         int ret, prio_mode;
837         u32 rmask;
838
839         if (!ch || !is_tx_chan(ch))
840                 return -EINVAL;
841
842         if (ch->rate == rate)
843                 return rate;
844
845         ctlr = ch->ctlr;
846         spin_lock_irqsave(&ctlr->lock, flags);
847         spin_lock_irqsave(&ch->lock, ch_flags);
848
849         ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
850         if (ret)
851                 goto err;
852
853         ret = cpdma_chan_set_factors(ctlr, ch);
854         if (ret)
855                 goto err;
856
857         spin_unlock_irqrestore(&ch->lock, ch_flags);
858
859         /* on shapers */
860         _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
861         _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
862         spin_unlock_irqrestore(&ctlr->lock, flags);
863         return ret;
864
865 err:
866         spin_unlock_irqrestore(&ch->lock, ch_flags);
867         spin_unlock_irqrestore(&ctlr->lock, flags);
868         return ret;
869 }
870 EXPORT_SYMBOL_GPL(cpdma_chan_set_rate);
871
872 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
873 {
874         unsigned long flags;
875         u32 rate;
876
877         spin_lock_irqsave(&ch->lock, flags);
878         rate = ch->rate;
879         spin_unlock_irqrestore(&ch->lock, flags);
880
881         return rate;
882 }
883 EXPORT_SYMBOL_GPL(cpdma_chan_get_rate);
884
885 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
886                                      cpdma_handler_fn handler, int rx_type)
887 {
888         int offset = chan_num * 4;
889         struct cpdma_chan *chan;
890         unsigned long flags;
891
892         chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
893
894         if (__chan_linear(chan_num) >= ctlr->num_chan)
895                 return ERR_PTR(-EINVAL);
896
897         chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
898         if (!chan)
899                 return ERR_PTR(-ENOMEM);
900
901         spin_lock_irqsave(&ctlr->lock, flags);
902         if (ctlr->channels[chan_num]) {
903                 spin_unlock_irqrestore(&ctlr->lock, flags);
904                 devm_kfree(ctlr->dev, chan);
905                 return ERR_PTR(-EBUSY);
906         }
907
908         chan->ctlr      = ctlr;
909         chan->state     = CPDMA_STATE_IDLE;
910         chan->chan_num  = chan_num;
911         chan->handler   = handler;
912         chan->rate      = 0;
913         chan->weight    = 0;
914
915         if (is_rx_chan(chan)) {
916                 chan->hdp       = ctlr->params.rxhdp + offset;
917                 chan->cp        = ctlr->params.rxcp + offset;
918                 chan->rxfree    = ctlr->params.rxfree + offset;
919                 chan->int_set   = CPDMA_RXINTMASKSET;
920                 chan->int_clear = CPDMA_RXINTMASKCLEAR;
921                 chan->td        = CPDMA_RXTEARDOWN;
922                 chan->dir       = DMA_FROM_DEVICE;
923         } else {
924                 chan->hdp       = ctlr->params.txhdp + offset;
925                 chan->cp        = ctlr->params.txcp + offset;
926                 chan->int_set   = CPDMA_TXINTMASKSET;
927                 chan->int_clear = CPDMA_TXINTMASKCLEAR;
928                 chan->td        = CPDMA_TXTEARDOWN;
929                 chan->dir       = DMA_TO_DEVICE;
930         }
931         chan->mask = BIT(chan_linear(chan));
932
933         spin_lock_init(&chan->lock);
934
935         ctlr->channels[chan_num] = chan;
936         ctlr->chan_num++;
937
938         cpdma_chan_split_pool(ctlr);
939
940         spin_unlock_irqrestore(&ctlr->lock, flags);
941         return chan;
942 }
943 EXPORT_SYMBOL_GPL(cpdma_chan_create);
944
945 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
946 {
947         unsigned long flags;
948         int desc_num;
949
950         spin_lock_irqsave(&chan->lock, flags);
951         desc_num = chan->desc_num;
952         spin_unlock_irqrestore(&chan->lock, flags);
953
954         return desc_num;
955 }
956 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
957
958 int cpdma_chan_destroy(struct cpdma_chan *chan)
959 {
960         struct cpdma_ctlr *ctlr;
961         unsigned long flags;
962
963         if (!chan)
964                 return -EINVAL;
965         ctlr = chan->ctlr;
966
967         spin_lock_irqsave(&ctlr->lock, flags);
968         if (chan->state != CPDMA_STATE_IDLE)
969                 cpdma_chan_stop(chan);
970         ctlr->channels[chan->chan_num] = NULL;
971         ctlr->chan_num--;
972         devm_kfree(ctlr->dev, chan);
973         cpdma_chan_split_pool(ctlr);
974
975         spin_unlock_irqrestore(&ctlr->lock, flags);
976         return 0;
977 }
978 EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
979
980 int cpdma_chan_get_stats(struct cpdma_chan *chan,
981                          struct cpdma_chan_stats *stats)
982 {
983         unsigned long flags;
984         if (!chan)
985                 return -EINVAL;
986         spin_lock_irqsave(&chan->lock, flags);
987         memcpy(stats, &chan->stats, sizeof(*stats));
988         spin_unlock_irqrestore(&chan->lock, flags);
989         return 0;
990 }
991 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
992
993 static void __cpdma_chan_submit(struct cpdma_chan *chan,
994                                 struct cpdma_desc __iomem *desc)
995 {
996         struct cpdma_ctlr               *ctlr = chan->ctlr;
997         struct cpdma_desc __iomem       *prev = chan->tail;
998         struct cpdma_desc_pool          *pool = ctlr->pool;
999         dma_addr_t                      desc_dma;
1000         u32                             mode;
1001
1002         desc_dma = desc_phys(pool, desc);
1003
1004         /* simple case - idle channel */
1005         if (!chan->head) {
1006                 chan->stats.head_enqueue++;
1007                 chan->head = desc;
1008                 chan->tail = desc;
1009                 if (chan->state == CPDMA_STATE_ACTIVE)
1010                         chan_write(chan, hdp, desc_dma);
1011                 return;
1012         }
1013
1014         /* first chain the descriptor at the tail of the list */
1015         desc_write(prev, hw_next, desc_dma);
1016         chan->tail = desc;
1017         chan->stats.tail_enqueue++;
1018
1019         /* next check if EOQ has been triggered already */
1020         mode = desc_read(prev, hw_mode);
1021         if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1022             (chan->state == CPDMA_STATE_ACTIVE)) {
1023                 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1024                 chan_write(chan, hdp, desc_dma);
1025                 chan->stats.misqueued++;
1026         }
1027 }
1028
1029 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1030                       int len, int directed)
1031 {
1032         struct cpdma_ctlr               *ctlr = chan->ctlr;
1033         struct cpdma_desc __iomem       *desc;
1034         dma_addr_t                      buffer;
1035         unsigned long                   flags;
1036         u32                             mode;
1037         int                             ret = 0;
1038
1039         spin_lock_irqsave(&chan->lock, flags);
1040
1041         if (chan->state == CPDMA_STATE_TEARDOWN) {
1042                 ret = -EINVAL;
1043                 goto unlock_ret;
1044         }
1045
1046         if (chan->count >= chan->desc_num)      {
1047                 chan->stats.desc_alloc_fail++;
1048                 ret = -ENOMEM;
1049                 goto unlock_ret;
1050         }
1051
1052         desc = cpdma_desc_alloc(ctlr->pool);
1053         if (!desc) {
1054                 chan->stats.desc_alloc_fail++;
1055                 ret = -ENOMEM;
1056                 goto unlock_ret;
1057         }
1058
1059         if (len < ctlr->params.min_packet_size) {
1060                 len = ctlr->params.min_packet_size;
1061                 chan->stats.runt_transmit_buff++;
1062         }
1063
1064         buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
1065         ret = dma_mapping_error(ctlr->dev, buffer);
1066         if (ret) {
1067                 cpdma_desc_free(ctlr->pool, desc, 1);
1068                 ret = -EINVAL;
1069                 goto unlock_ret;
1070         }
1071
1072         mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1073         cpdma_desc_to_port(chan, mode, directed);
1074
1075         /* Relaxed IO accessors can be used here as there is read barrier
1076          * at the end of write sequence.
1077          */
1078         writel_relaxed(0, &desc->hw_next);
1079         writel_relaxed(buffer, &desc->hw_buffer);
1080         writel_relaxed(len, &desc->hw_len);
1081         writel_relaxed(mode | len, &desc->hw_mode);
1082         writel_relaxed((uintptr_t)token, &desc->sw_token);
1083         writel_relaxed(buffer, &desc->sw_buffer);
1084         writel_relaxed(len, &desc->sw_len);
1085         desc_read(desc, sw_len);
1086
1087         __cpdma_chan_submit(chan, desc);
1088
1089         if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1090                 chan_write(chan, rxfree, 1);
1091
1092         chan->count++;
1093
1094 unlock_ret:
1095         spin_unlock_irqrestore(&chan->lock, flags);
1096         return ret;
1097 }
1098 EXPORT_SYMBOL_GPL(cpdma_chan_submit);
1099
1100 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1101 {
1102         struct cpdma_ctlr       *ctlr = chan->ctlr;
1103         struct cpdma_desc_pool  *pool = ctlr->pool;
1104         bool                    free_tx_desc;
1105         unsigned long           flags;
1106
1107         spin_lock_irqsave(&chan->lock, flags);
1108         free_tx_desc = (chan->count < chan->desc_num) &&
1109                          gen_pool_avail(pool->gen_pool);
1110         spin_unlock_irqrestore(&chan->lock, flags);
1111         return free_tx_desc;
1112 }
1113 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
1114
1115 static void __cpdma_chan_free(struct cpdma_chan *chan,
1116                               struct cpdma_desc __iomem *desc,
1117                               int outlen, int status)
1118 {
1119         struct cpdma_ctlr               *ctlr = chan->ctlr;
1120         struct cpdma_desc_pool          *pool = ctlr->pool;
1121         dma_addr_t                      buff_dma;
1122         int                             origlen;
1123         uintptr_t                       token;
1124
1125         token      = desc_read(desc, sw_token);
1126         buff_dma   = desc_read(desc, sw_buffer);
1127         origlen    = desc_read(desc, sw_len);
1128
1129         dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1130         cpdma_desc_free(pool, desc, 1);
1131         (*chan->handler)((void *)token, outlen, status);
1132 }
1133
1134 static int __cpdma_chan_process(struct cpdma_chan *chan)
1135 {
1136         struct cpdma_ctlr               *ctlr = chan->ctlr;
1137         struct cpdma_desc __iomem       *desc;
1138         int                             status, outlen;
1139         int                             cb_status = 0;
1140         struct cpdma_desc_pool          *pool = ctlr->pool;
1141         dma_addr_t                      desc_dma;
1142         unsigned long                   flags;
1143
1144         spin_lock_irqsave(&chan->lock, flags);
1145
1146         desc = chan->head;
1147         if (!desc) {
1148                 chan->stats.empty_dequeue++;
1149                 status = -ENOENT;
1150                 goto unlock_ret;
1151         }
1152         desc_dma = desc_phys(pool, desc);
1153
1154         status  = desc_read(desc, hw_mode);
1155         outlen  = status & 0x7ff;
1156         if (status & CPDMA_DESC_OWNER) {
1157                 chan->stats.busy_dequeue++;
1158                 status = -EBUSY;
1159                 goto unlock_ret;
1160         }
1161
1162         if (status & CPDMA_DESC_PASS_CRC)
1163                 outlen -= CPDMA_DESC_CRC_LEN;
1164
1165         status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1166                             CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1167
1168         chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1169         chan_write(chan, cp, desc_dma);
1170         chan->count--;
1171         chan->stats.good_dequeue++;
1172
1173         if ((status & CPDMA_DESC_EOQ) && chan->head) {
1174                 chan->stats.requeue++;
1175                 chan_write(chan, hdp, desc_phys(pool, chan->head));
1176         }
1177
1178         spin_unlock_irqrestore(&chan->lock, flags);
1179         if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1180                 cb_status = -ENOSYS;
1181         else
1182                 cb_status = status;
1183
1184         __cpdma_chan_free(chan, desc, outlen, cb_status);
1185         return status;
1186
1187 unlock_ret:
1188         spin_unlock_irqrestore(&chan->lock, flags);
1189         return status;
1190 }
1191
1192 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1193 {
1194         int used = 0, ret = 0;
1195
1196         if (chan->state != CPDMA_STATE_ACTIVE)
1197                 return -EINVAL;
1198
1199         while (used < quota) {
1200                 ret = __cpdma_chan_process(chan);
1201                 if (ret < 0)
1202                         break;
1203                 used++;
1204         }
1205         return used;
1206 }
1207 EXPORT_SYMBOL_GPL(cpdma_chan_process);
1208
1209 int cpdma_chan_start(struct cpdma_chan *chan)
1210 {
1211         struct cpdma_ctlr *ctlr = chan->ctlr;
1212         unsigned long flags;
1213         int ret;
1214
1215         spin_lock_irqsave(&ctlr->lock, flags);
1216         ret = cpdma_chan_set_chan_shaper(chan);
1217         spin_unlock_irqrestore(&ctlr->lock, flags);
1218         if (ret)
1219                 return ret;
1220
1221         ret = cpdma_chan_on(chan);
1222         if (ret)
1223                 return ret;
1224
1225         return 0;
1226 }
1227 EXPORT_SYMBOL_GPL(cpdma_chan_start);
1228
1229 int cpdma_chan_stop(struct cpdma_chan *chan)
1230 {
1231         struct cpdma_ctlr       *ctlr = chan->ctlr;
1232         struct cpdma_desc_pool  *pool = ctlr->pool;
1233         unsigned long           flags;
1234         int                     ret;
1235         unsigned                timeout;
1236
1237         spin_lock_irqsave(&chan->lock, flags);
1238         if (chan->state == CPDMA_STATE_TEARDOWN) {
1239                 spin_unlock_irqrestore(&chan->lock, flags);
1240                 return -EINVAL;
1241         }
1242
1243         chan->state = CPDMA_STATE_TEARDOWN;
1244         dma_reg_write(ctlr, chan->int_clear, chan->mask);
1245
1246         /* trigger teardown */
1247         dma_reg_write(ctlr, chan->td, chan_linear(chan));
1248
1249         /* wait for teardown complete */
1250         timeout = 100 * 100; /* 100 ms */
1251         while (timeout) {
1252                 u32 cp = chan_read(chan, cp);
1253                 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1254                         break;
1255                 udelay(10);
1256                 timeout--;
1257         }
1258         WARN_ON(!timeout);
1259         chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1260
1261         /* handle completed packets */
1262         spin_unlock_irqrestore(&chan->lock, flags);
1263         do {
1264                 ret = __cpdma_chan_process(chan);
1265                 if (ret < 0)
1266                         break;
1267         } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1268         spin_lock_irqsave(&chan->lock, flags);
1269
1270         /* remaining packets haven't been tx/rx'ed, clean them up */
1271         while (chan->head) {
1272                 struct cpdma_desc __iomem *desc = chan->head;
1273                 dma_addr_t next_dma;
1274
1275                 next_dma = desc_read(desc, hw_next);
1276                 chan->head = desc_from_phys(pool, next_dma);
1277                 chan->count--;
1278                 chan->stats.teardown_dequeue++;
1279
1280                 /* issue callback without locks held */
1281                 spin_unlock_irqrestore(&chan->lock, flags);
1282                 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1283                 spin_lock_irqsave(&chan->lock, flags);
1284         }
1285
1286         chan->state = CPDMA_STATE_IDLE;
1287         spin_unlock_irqrestore(&chan->lock, flags);
1288         return 0;
1289 }
1290 EXPORT_SYMBOL_GPL(cpdma_chan_stop);
1291
1292 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1293 {
1294         unsigned long flags;
1295
1296         spin_lock_irqsave(&chan->lock, flags);
1297         if (chan->state != CPDMA_STATE_ACTIVE) {
1298                 spin_unlock_irqrestore(&chan->lock, flags);
1299                 return -EINVAL;
1300         }
1301
1302         dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1303                       chan->mask);
1304         spin_unlock_irqrestore(&chan->lock, flags);
1305
1306         return 0;
1307 }
1308
1309 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1310 {
1311         unsigned long flags;
1312         int ret;
1313
1314         spin_lock_irqsave(&ctlr->lock, flags);
1315         ret = _cpdma_control_get(ctlr, control);
1316         spin_unlock_irqrestore(&ctlr->lock, flags);
1317
1318         return ret;
1319 }
1320
1321 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1322 {
1323         unsigned long flags;
1324         int ret;
1325
1326         spin_lock_irqsave(&ctlr->lock, flags);
1327         ret = _cpdma_control_set(ctlr, control, value);
1328         spin_unlock_irqrestore(&ctlr->lock, flags);
1329
1330         return ret;
1331 }
1332 EXPORT_SYMBOL_GPL(cpdma_control_set);
1333
1334 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1335 {
1336         return ctlr->num_rx_desc;
1337 }
1338 EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
1339
1340 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1341 {
1342         return ctlr->num_tx_desc;
1343 }
1344 EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
1345
1346 void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1347 {
1348         ctlr->num_rx_desc = num_rx_desc;
1349         ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1350 }
1351 EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
1352
1353 MODULE_LICENSE("GPL");