Mention branches and keyring.
[releases.git] / xilinx / xdma.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DMA driver for Xilinx DMA/Bridge Subsystem
4  *
5  * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6  * Copyright (C) 2022, Advanced Micro Devices, Inc.
7  */
8
9 /*
10  * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11  * between Host memory and the DMA subsystem. It does this by operating on
12  * 'descriptors' that contain information about the source, destination and
13  * amount of data to transfer. These direct memory transfers can be both in
14  * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15  * configured to have a single AXI4 Master interface shared by all channels
16  * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17  * specified on a per-channel basis in descriptor linked lists, which the DMA
18  * fetches from host memory and processes. Events such as descriptor completion
19  * and errors are signaled using interrupts. The core also provides up to 16
20  * user interrupt wires that generate interrupts to the host.
21  */
22
23 #include <linux/mod_devicetable.h>
24 #include <linux/bitfield.h>
25 #include <linux/dmapool.h>
26 #include <linux/regmap.h>
27 #include <linux/dmaengine.h>
28 #include <linux/dma/amd_xdma.h>
29 #include <linux/platform_device.h>
30 #include <linux/platform_data/amd_xdma.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/pci.h>
33 #include "../virt-dma.h"
34 #include "xdma-regs.h"
35
36 /* mmio regmap config for all XDMA registers */
37 static const struct regmap_config xdma_regmap_config = {
38         .reg_bits = 32,
39         .val_bits = 32,
40         .reg_stride = 4,
41         .max_register = XDMA_REG_SPACE_LEN,
42 };
43
44 /**
45  * struct xdma_desc_block - Descriptor block
46  * @virt_addr: Virtual address of block start
47  * @dma_addr: DMA address of block start
48  */
49 struct xdma_desc_block {
50         void            *virt_addr;
51         dma_addr_t      dma_addr;
52 };
53
54 /**
55  * struct xdma_chan - Driver specific DMA channel structure
56  * @vchan: Virtual channel
57  * @xdev_hdl: Pointer to DMA device structure
58  * @base: Offset of channel registers
59  * @desc_pool: Descriptor pool
60  * @busy: Busy flag of the channel
61  * @dir: Transferring direction of the channel
62  * @cfg: Transferring config of the channel
63  * @irq: IRQ assigned to the channel
64  */
65 struct xdma_chan {
66         struct virt_dma_chan            vchan;
67         void                            *xdev_hdl;
68         u32                             base;
69         struct dma_pool                 *desc_pool;
70         bool                            busy;
71         enum dma_transfer_direction     dir;
72         struct dma_slave_config         cfg;
73         u32                             irq;
74 };
75
76 /**
77  * struct xdma_desc - DMA desc structure
78  * @vdesc: Virtual DMA descriptor
79  * @chan: DMA channel pointer
80  * @dir: Transferring direction of the request
81  * @dev_addr: Physical address on DMA device side
82  * @desc_blocks: Hardware descriptor blocks
83  * @dblk_num: Number of hardware descriptor blocks
84  * @desc_num: Number of hardware descriptors
85  * @completed_desc_num: Completed hardware descriptors
86  * @cyclic: Cyclic transfer vs. scatter-gather
87  * @periods: Number of periods in the cyclic transfer
88  * @period_size: Size of a period in bytes in cyclic transfers
89  */
90 struct xdma_desc {
91         struct virt_dma_desc            vdesc;
92         struct xdma_chan                *chan;
93         enum dma_transfer_direction     dir;
94         u64                             dev_addr;
95         struct xdma_desc_block          *desc_blocks;
96         u32                             dblk_num;
97         u32                             desc_num;
98         u32                             completed_desc_num;
99         bool                            cyclic;
100         u32                             periods;
101         u32                             period_size;
102 };
103
104 #define XDMA_DEV_STATUS_REG_DMA         BIT(0)
105 #define XDMA_DEV_STATUS_INIT_MSIX       BIT(1)
106
107 /**
108  * struct xdma_device - DMA device structure
109  * @pdev: Platform device pointer
110  * @dma_dev: DMA device structure
111  * @rmap: MMIO regmap for DMA registers
112  * @h2c_chans: Host to Card channels
113  * @c2h_chans: Card to Host channels
114  * @h2c_chan_num: Number of H2C channels
115  * @c2h_chan_num: Number of C2H channels
116  * @irq_start: Start IRQ assigned to device
117  * @irq_num: Number of IRQ assigned to device
118  * @status: Initialization status
119  */
120 struct xdma_device {
121         struct platform_device  *pdev;
122         struct dma_device       dma_dev;
123         struct regmap           *rmap;
124         struct xdma_chan        *h2c_chans;
125         struct xdma_chan        *c2h_chans;
126         u32                     h2c_chan_num;
127         u32                     c2h_chan_num;
128         u32                     irq_start;
129         u32                     irq_num;
130         u32                     status;
131 };
132
133 #define xdma_err(xdev, fmt, args...)                                    \
134         dev_err(&(xdev)->pdev->dev, fmt, ##args)
135 #define XDMA_CHAN_NUM(_xd) ({                                           \
136         typeof(_xd) (xd) = (_xd);                                       \
137         ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
138
139 /* Get the last desc in a desc block */
140 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
141 {
142         return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
143 }
144
145 /**
146  * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
147  * @sw_desc: Tx descriptor pointer
148  */
149 static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
150 {
151         struct xdma_desc_block *block;
152         u32 last_blk_desc, desc_control;
153         struct xdma_hw_desc *desc;
154         int i;
155
156         desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
157         for (i = 1; i < sw_desc->dblk_num; i++) {
158                 block = &sw_desc->desc_blocks[i - 1];
159                 desc = xdma_blk_last_desc(block);
160
161                 if (!(i & XDMA_DESC_BLOCK_MASK)) {
162                         desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
163                         continue;
164                 }
165                 desc->control = cpu_to_le32(desc_control);
166                 desc->next_desc = cpu_to_le64(block[1].dma_addr);
167         }
168
169         /* update the last block */
170         last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
171         if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
172                 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
173                 desc = xdma_blk_last_desc(block);
174                 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
175                 desc->control = cpu_to_le32(desc_control);
176         }
177
178         block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
179         desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
180         desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
181 }
182
183 /**
184  * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
185  * @sw_desc: Tx descriptor pointer
186  */
187 static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
188 {
189         struct xdma_desc_block *block;
190         struct xdma_hw_desc *desc;
191         int i;
192
193         block = sw_desc->desc_blocks;
194         for (i = 0; i < sw_desc->desc_num - 1; i++) {
195                 desc = block->virt_addr + i * XDMA_DESC_SIZE;
196                 desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
197         }
198         desc = block->virt_addr + i * XDMA_DESC_SIZE;
199         desc->next_desc = cpu_to_le64(block->dma_addr);
200 }
201
202 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
203 {
204         return container_of(chan, struct xdma_chan, vchan.chan);
205 }
206
207 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
208 {
209         return container_of(vdesc, struct xdma_desc, vdesc);
210 }
211
212 /**
213  * xdma_channel_init - Initialize DMA channel registers
214  * @chan: DMA channel pointer
215  */
216 static int xdma_channel_init(struct xdma_chan *chan)
217 {
218         struct xdma_device *xdev = chan->xdev_hdl;
219         int ret;
220
221         ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
222                            CHAN_CTRL_NON_INCR_ADDR);
223         if (ret)
224                 return ret;
225
226         ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
227                            CHAN_IM_ALL);
228         if (ret)
229                 return ret;
230
231         return 0;
232 }
233
234 /**
235  * xdma_free_desc - Free descriptor
236  * @vdesc: Virtual DMA descriptor
237  */
238 static void xdma_free_desc(struct virt_dma_desc *vdesc)
239 {
240         struct xdma_desc *sw_desc;
241         int i;
242
243         sw_desc = to_xdma_desc(vdesc);
244         for (i = 0; i < sw_desc->dblk_num; i++) {
245                 if (!sw_desc->desc_blocks[i].virt_addr)
246                         break;
247                 dma_pool_free(sw_desc->chan->desc_pool,
248                               sw_desc->desc_blocks[i].virt_addr,
249                               sw_desc->desc_blocks[i].dma_addr);
250         }
251         kfree(sw_desc->desc_blocks);
252         kfree(sw_desc);
253 }
254
255 /**
256  * xdma_alloc_desc - Allocate descriptor
257  * @chan: DMA channel pointer
258  * @desc_num: Number of hardware descriptors
259  * @cyclic: Whether this is a cyclic transfer
260  */
261 static struct xdma_desc *
262 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
263 {
264         struct xdma_desc *sw_desc;
265         struct xdma_hw_desc *desc;
266         dma_addr_t dma_addr;
267         u32 dblk_num;
268         u32 control;
269         void *addr;
270         int i, j;
271
272         sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
273         if (!sw_desc)
274                 return NULL;
275
276         sw_desc->chan = chan;
277         sw_desc->desc_num = desc_num;
278         sw_desc->cyclic = cyclic;
279         dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
280         sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
281                                        GFP_NOWAIT);
282         if (!sw_desc->desc_blocks)
283                 goto failed;
284
285         if (cyclic)
286                 control = XDMA_DESC_CONTROL_CYCLIC;
287         else
288                 control = XDMA_DESC_CONTROL(1, 0);
289
290         sw_desc->dblk_num = dblk_num;
291         for (i = 0; i < sw_desc->dblk_num; i++) {
292                 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
293                 if (!addr)
294                         goto failed;
295
296                 sw_desc->desc_blocks[i].virt_addr = addr;
297                 sw_desc->desc_blocks[i].dma_addr = dma_addr;
298                 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
299                         desc[j].control = cpu_to_le32(control);
300         }
301
302         if (cyclic)
303                 xdma_link_cyclic_desc_blocks(sw_desc);
304         else
305                 xdma_link_sg_desc_blocks(sw_desc);
306
307         return sw_desc;
308
309 failed:
310         xdma_free_desc(&sw_desc->vdesc);
311         return NULL;
312 }
313
314 /**
315  * xdma_xfer_start - Start DMA transfer
316  * @xchan: DMA channel pointer
317  */
318 static int xdma_xfer_start(struct xdma_chan *xchan)
319 {
320         struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
321         struct xdma_device *xdev = xchan->xdev_hdl;
322         struct xdma_desc_block *block;
323         u32 val, completed_blocks;
324         struct xdma_desc *desc;
325         int ret;
326
327         /*
328          * check if there is not any submitted descriptor or channel is busy.
329          * vchan lock should be held where this function is called.
330          */
331         if (!vd || xchan->busy)
332                 return -EINVAL;
333
334         /* clear run stop bit to get ready for transfer */
335         ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
336                            CHAN_CTRL_RUN_STOP);
337         if (ret)
338                 return ret;
339
340         desc = to_xdma_desc(vd);
341         if (desc->dir != xchan->dir) {
342                 xdma_err(xdev, "incorrect request direction");
343                 return -EINVAL;
344         }
345
346         /* set DMA engine to the first descriptor block */
347         completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
348         block = &desc->desc_blocks[completed_blocks];
349         val = lower_32_bits(block->dma_addr);
350         ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
351         if (ret)
352                 return ret;
353
354         val = upper_32_bits(block->dma_addr);
355         ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
356         if (ret)
357                 return ret;
358
359         if (completed_blocks + 1 == desc->dblk_num)
360                 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
361         else
362                 val = XDMA_DESC_ADJACENT - 1;
363         ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
364         if (ret)
365                 return ret;
366
367         /* kick off DMA transfer */
368         ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
369                            CHAN_CTRL_START);
370         if (ret)
371                 return ret;
372
373         xchan->busy = true;
374         return 0;
375 }
376
377 /**
378  * xdma_alloc_channels - Detect and allocate DMA channels
379  * @xdev: DMA device pointer
380  * @dir: Channel direction
381  */
382 static int xdma_alloc_channels(struct xdma_device *xdev,
383                                enum dma_transfer_direction dir)
384 {
385         struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
386         struct xdma_chan **chans, *xchan;
387         u32 base, identifier, target;
388         u32 *chan_num;
389         int i, j, ret;
390
391         if (dir == DMA_MEM_TO_DEV) {
392                 base = XDMA_CHAN_H2C_OFFSET;
393                 target = XDMA_CHAN_H2C_TARGET;
394                 chans = &xdev->h2c_chans;
395                 chan_num = &xdev->h2c_chan_num;
396         } else if (dir == DMA_DEV_TO_MEM) {
397                 base = XDMA_CHAN_C2H_OFFSET;
398                 target = XDMA_CHAN_C2H_TARGET;
399                 chans = &xdev->c2h_chans;
400                 chan_num = &xdev->c2h_chan_num;
401         } else {
402                 xdma_err(xdev, "invalid direction specified");
403                 return -EINVAL;
404         }
405
406         /* detect number of available DMA channels */
407         for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
408                 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
409                                   &identifier);
410                 if (ret)
411                         return ret;
412
413                 /* check if it is available DMA channel */
414                 if (XDMA_CHAN_CHECK_TARGET(identifier, target))
415                         (*chan_num)++;
416         }
417
418         if (!*chan_num) {
419                 xdma_err(xdev, "does not probe any channel");
420                 return -EINVAL;
421         }
422
423         *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
424                               GFP_KERNEL);
425         if (!*chans)
426                 return -ENOMEM;
427
428         for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
429                 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
430                                   &identifier);
431                 if (ret)
432                         return ret;
433
434                 if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
435                         continue;
436
437                 if (j == *chan_num) {
438                         xdma_err(xdev, "invalid channel number");
439                         return -EIO;
440                 }
441
442                 /* init channel structure and hardware */
443                 xchan = &(*chans)[j];
444                 xchan->xdev_hdl = xdev;
445                 xchan->base = base + i * XDMA_CHAN_STRIDE;
446                 xchan->dir = dir;
447
448                 ret = xdma_channel_init(xchan);
449                 if (ret)
450                         return ret;
451                 xchan->vchan.desc_free = xdma_free_desc;
452                 vchan_init(&xchan->vchan, &xdev->dma_dev);
453
454                 j++;
455         }
456
457         dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
458                  (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
459
460         return 0;
461 }
462
463 /**
464  * xdma_issue_pending - Issue pending transactions
465  * @chan: DMA channel pointer
466  */
467 static void xdma_issue_pending(struct dma_chan *chan)
468 {
469         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
470         unsigned long flags;
471
472         spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
473         if (vchan_issue_pending(&xdma_chan->vchan))
474                 xdma_xfer_start(xdma_chan);
475         spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
476 }
477
478 /**
479  * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
480  * @chan: DMA channel pointer
481  * @sgl: Transfer scatter gather list
482  * @sg_len: Length of scatter gather list
483  * @dir: Transfer direction
484  * @flags: transfer ack flags
485  * @context: APP words of the descriptor
486  */
487 static struct dma_async_tx_descriptor *
488 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
489                     unsigned int sg_len, enum dma_transfer_direction dir,
490                     unsigned long flags, void *context)
491 {
492         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
493         struct dma_async_tx_descriptor *tx_desc;
494         u32 desc_num = 0, i, len, rest;
495         struct xdma_desc_block *dblk;
496         struct xdma_hw_desc *desc;
497         struct xdma_desc *sw_desc;
498         u64 dev_addr, *src, *dst;
499         struct scatterlist *sg;
500         u64 addr;
501
502         for_each_sg(sgl, sg, sg_len, i)
503                 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
504
505         sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
506         if (!sw_desc)
507                 return NULL;
508         sw_desc->dir = dir;
509
510         if (dir == DMA_MEM_TO_DEV) {
511                 dev_addr = xdma_chan->cfg.dst_addr;
512                 src = &addr;
513                 dst = &dev_addr;
514         } else {
515                 dev_addr = xdma_chan->cfg.src_addr;
516                 src = &dev_addr;
517                 dst = &addr;
518         }
519
520         dblk = sw_desc->desc_blocks;
521         desc = dblk->virt_addr;
522         desc_num = 1;
523         for_each_sg(sgl, sg, sg_len, i) {
524                 addr = sg_dma_address(sg);
525                 rest = sg_dma_len(sg);
526
527                 do {
528                         len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
529                         /* set hardware descriptor */
530                         desc->bytes = cpu_to_le32(len);
531                         desc->src_addr = cpu_to_le64(*src);
532                         desc->dst_addr = cpu_to_le64(*dst);
533
534                         if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
535                                 dblk++;
536                                 desc = dblk->virt_addr;
537                         } else {
538                                 desc++;
539                         }
540
541                         desc_num++;
542                         dev_addr += len;
543                         addr += len;
544                         rest -= len;
545                 } while (rest);
546         }
547
548         tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
549         if (!tx_desc)
550                 goto failed;
551
552         return tx_desc;
553
554 failed:
555         xdma_free_desc(&sw_desc->vdesc);
556
557         return NULL;
558 }
559
560 /**
561  * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
562  * @chan: DMA channel pointer
563  * @address: Device DMA address to access
564  * @size: Total length to transfer
565  * @period_size: Period size to use for each transfer
566  * @dir: Transfer direction
567  * @flags: Transfer ack flags
568  */
569 static struct dma_async_tx_descriptor *
570 xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
571                      size_t size, size_t period_size,
572                      enum dma_transfer_direction dir,
573                      unsigned long flags)
574 {
575         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
576         struct xdma_device *xdev = xdma_chan->xdev_hdl;
577         unsigned int periods = size / period_size;
578         struct dma_async_tx_descriptor *tx_desc;
579         struct xdma_desc_block *dblk;
580         struct xdma_hw_desc *desc;
581         struct xdma_desc *sw_desc;
582         unsigned int i;
583
584         /*
585          * Simplify the whole logic by preventing an abnormally high number of
586          * periods and periods size.
587          */
588         if (period_size > XDMA_DESC_BLEN_MAX) {
589                 xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
590                 return NULL;
591         }
592
593         if (periods > XDMA_DESC_ADJACENT) {
594                 xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
595                 return NULL;
596         }
597
598         sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
599         if (!sw_desc)
600                 return NULL;
601
602         sw_desc->periods = periods;
603         sw_desc->period_size = period_size;
604         sw_desc->dir = dir;
605
606         dblk = sw_desc->desc_blocks;
607         desc = dblk->virt_addr;
608
609         /* fill hardware descriptor */
610         for (i = 0; i < periods; i++) {
611                 desc->bytes = cpu_to_le32(period_size);
612                 if (dir == DMA_MEM_TO_DEV) {
613                         desc->src_addr = cpu_to_le64(address + i * period_size);
614                         desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
615                 } else {
616                         desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
617                         desc->dst_addr = cpu_to_le64(address + i * period_size);
618                 }
619
620                 desc++;
621         }
622
623         tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
624         if (!tx_desc)
625                 goto failed;
626
627         return tx_desc;
628
629 failed:
630         xdma_free_desc(&sw_desc->vdesc);
631
632         return NULL;
633 }
634
635 /**
636  * xdma_device_config - Configure the DMA channel
637  * @chan: DMA channel
638  * @cfg: channel configuration
639  */
640 static int xdma_device_config(struct dma_chan *chan,
641                               struct dma_slave_config *cfg)
642 {
643         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
644
645         memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
646
647         return 0;
648 }
649
650 /**
651  * xdma_free_chan_resources - Free channel resources
652  * @chan: DMA channel
653  */
654 static void xdma_free_chan_resources(struct dma_chan *chan)
655 {
656         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
657
658         vchan_free_chan_resources(&xdma_chan->vchan);
659         dma_pool_destroy(xdma_chan->desc_pool);
660         xdma_chan->desc_pool = NULL;
661 }
662
663 /**
664  * xdma_alloc_chan_resources - Allocate channel resources
665  * @chan: DMA channel
666  */
667 static int xdma_alloc_chan_resources(struct dma_chan *chan)
668 {
669         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
670         struct xdma_device *xdev = xdma_chan->xdev_hdl;
671         struct device *dev = xdev->dma_dev.dev;
672
673         while (dev && !dev_is_pci(dev))
674                 dev = dev->parent;
675         if (!dev) {
676                 xdma_err(xdev, "unable to find pci device");
677                 return -EINVAL;
678         }
679
680         xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
681                                                dev, XDMA_DESC_BLOCK_SIZE,
682                                                XDMA_DESC_BLOCK_ALIGN, 0);
683         if (!xdma_chan->desc_pool) {
684                 xdma_err(xdev, "unable to allocate descriptor pool");
685                 return -ENOMEM;
686         }
687
688         return 0;
689 }
690
691 static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
692                                       struct dma_tx_state *state)
693 {
694         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
695         struct xdma_desc *desc = NULL;
696         struct virt_dma_desc *vd;
697         enum dma_status ret;
698         unsigned long flags;
699         unsigned int period_idx;
700         u32 residue = 0;
701
702         ret = dma_cookie_status(chan, cookie, state);
703         if (ret == DMA_COMPLETE)
704                 return ret;
705
706         spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
707
708         vd = vchan_find_desc(&xdma_chan->vchan, cookie);
709         if (vd)
710                 desc = to_xdma_desc(vd);
711         if (!desc || !desc->cyclic) {
712                 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
713                 return ret;
714         }
715
716         period_idx = desc->completed_desc_num % desc->periods;
717         residue = (desc->periods - period_idx) * desc->period_size;
718
719         spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
720
721         dma_set_residue(state, residue);
722
723         return ret;
724 }
725
726 /**
727  * xdma_channel_isr - XDMA channel interrupt handler
728  * @irq: IRQ number
729  * @dev_id: Pointer to the DMA channel structure
730  */
731 static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
732 {
733         struct xdma_chan *xchan = dev_id;
734         u32 complete_desc_num = 0;
735         struct xdma_device *xdev;
736         struct virt_dma_desc *vd;
737         struct xdma_desc *desc;
738         int ret;
739         u32 st;
740
741         spin_lock(&xchan->vchan.lock);
742
743         /* get submitted request */
744         vd = vchan_next_desc(&xchan->vchan);
745         if (!vd)
746                 goto out;
747
748         xchan->busy = false;
749         desc = to_xdma_desc(vd);
750         xdev = xchan->xdev_hdl;
751
752         ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
753                           &complete_desc_num);
754         if (ret)
755                 goto out;
756
757         if (desc->cyclic) {
758                 desc->completed_desc_num = complete_desc_num;
759
760                 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
761                                   &st);
762                 if (ret)
763                         goto out;
764
765                 regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
766
767                 vchan_cyclic_callback(vd);
768                 goto out;
769         }
770
771         desc->completed_desc_num += complete_desc_num;
772
773         /*
774          * if all data blocks are transferred, remove and complete the request
775          */
776         if (desc->completed_desc_num == desc->desc_num) {
777                 list_del(&vd->node);
778                 vchan_cookie_complete(vd);
779                 goto out;
780         }
781
782         if (desc->completed_desc_num > desc->desc_num ||
783             complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
784                 goto out;
785
786         /* transfer the rest of data (SG only) */
787         xdma_xfer_start(xchan);
788
789 out:
790         spin_unlock(&xchan->vchan.lock);
791         return IRQ_HANDLED;
792 }
793
794 /**
795  * xdma_irq_fini - Uninitialize IRQ
796  * @xdev: DMA device pointer
797  */
798 static void xdma_irq_fini(struct xdma_device *xdev)
799 {
800         int i;
801
802         /* disable interrupt */
803         regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
804
805         /* free irq handler */
806         for (i = 0; i < xdev->h2c_chan_num; i++)
807                 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
808
809         for (i = 0; i < xdev->c2h_chan_num; i++)
810                 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
811 }
812
813 /**
814  * xdma_set_vector_reg - configure hardware IRQ registers
815  * @xdev: DMA device pointer
816  * @vec_tbl_start: Start of IRQ registers
817  * @irq_start: Start of IRQ
818  * @irq_num: Number of IRQ
819  */
820 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
821                                u32 irq_start, u32 irq_num)
822 {
823         u32 shift, i, val = 0;
824         int ret;
825
826         /* Each IRQ register is 32 bit and contains 4 IRQs */
827         while (irq_num > 0) {
828                 for (i = 0; i < 4; i++) {
829                         shift = XDMA_IRQ_VEC_SHIFT * i;
830                         val |= irq_start << shift;
831                         irq_start++;
832                         irq_num--;
833                         if (!irq_num)
834                                 break;
835                 }
836
837                 /* write IRQ register */
838                 ret = regmap_write(xdev->rmap, vec_tbl_start, val);
839                 if (ret)
840                         return ret;
841                 vec_tbl_start += sizeof(u32);
842                 val = 0;
843         }
844
845         return 0;
846 }
847
848 /**
849  * xdma_irq_init - initialize IRQs
850  * @xdev: DMA device pointer
851  */
852 static int xdma_irq_init(struct xdma_device *xdev)
853 {
854         u32 irq = xdev->irq_start;
855         u32 user_irq_start;
856         int i, j, ret;
857
858         /* return failure if there are not enough IRQs */
859         if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
860                 xdma_err(xdev, "not enough irq");
861                 return -EINVAL;
862         }
863
864         /* setup H2C interrupt handler */
865         for (i = 0; i < xdev->h2c_chan_num; i++) {
866                 ret = request_irq(irq, xdma_channel_isr, 0,
867                                   "xdma-h2c-channel", &xdev->h2c_chans[i]);
868                 if (ret) {
869                         xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
870                                  i, irq, ret);
871                         goto failed_init_h2c;
872                 }
873                 xdev->h2c_chans[i].irq = irq;
874                 irq++;
875         }
876
877         /* setup C2H interrupt handler */
878         for (j = 0; j < xdev->c2h_chan_num; j++) {
879                 ret = request_irq(irq, xdma_channel_isr, 0,
880                                   "xdma-c2h-channel", &xdev->c2h_chans[j]);
881                 if (ret) {
882                         xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
883                                  j, irq, ret);
884                         goto failed_init_c2h;
885                 }
886                 xdev->c2h_chans[j].irq = irq;
887                 irq++;
888         }
889
890         /* config hardware IRQ registers */
891         ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
892                                   XDMA_CHAN_NUM(xdev));
893         if (ret) {
894                 xdma_err(xdev, "failed to set channel vectors: %d", ret);
895                 goto failed_init_c2h;
896         }
897
898         /* config user IRQ registers if needed */
899         user_irq_start = XDMA_CHAN_NUM(xdev);
900         if (xdev->irq_num > user_irq_start) {
901                 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
902                                           user_irq_start,
903                                           xdev->irq_num - user_irq_start);
904                 if (ret) {
905                         xdma_err(xdev, "failed to set user vectors: %d", ret);
906                         goto failed_init_c2h;
907                 }
908         }
909
910         /* enable interrupt */
911         ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
912         if (ret)
913                 goto failed_init_c2h;
914
915         return 0;
916
917 failed_init_c2h:
918         while (j--)
919                 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
920 failed_init_h2c:
921         while (i--)
922                 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
923
924         return ret;
925 }
926
927 static bool xdma_filter_fn(struct dma_chan *chan, void *param)
928 {
929         struct xdma_chan *xdma_chan = to_xdma_chan(chan);
930         struct xdma_chan_info *chan_info = param;
931
932         return chan_info->dir == xdma_chan->dir;
933 }
934
935 /**
936  * xdma_disable_user_irq - Disable user interrupt
937  * @pdev: Pointer to the platform_device structure
938  * @irq_num: System IRQ number
939  */
940 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
941 {
942         struct xdma_device *xdev = platform_get_drvdata(pdev);
943         u32 index;
944
945         index = irq_num - xdev->irq_start;
946         if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
947                 xdma_err(xdev, "invalid user irq number");
948                 return;
949         }
950         index -= XDMA_CHAN_NUM(xdev);
951
952         regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
953 }
954 EXPORT_SYMBOL(xdma_disable_user_irq);
955
956 /**
957  * xdma_enable_user_irq - Enable user logic interrupt
958  * @pdev: Pointer to the platform_device structure
959  * @irq_num: System IRQ number
960  */
961 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
962 {
963         struct xdma_device *xdev = platform_get_drvdata(pdev);
964         u32 index;
965         int ret;
966
967         index = irq_num - xdev->irq_start;
968         if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
969                 xdma_err(xdev, "invalid user irq number");
970                 return -EINVAL;
971         }
972         index -= XDMA_CHAN_NUM(xdev);
973
974         ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
975         if (ret)
976                 return ret;
977
978         return 0;
979 }
980 EXPORT_SYMBOL(xdma_enable_user_irq);
981
982 /**
983  * xdma_get_user_irq - Get system IRQ number
984  * @pdev: Pointer to the platform_device structure
985  * @user_irq_index: User logic IRQ wire index
986  *
987  * Return: The system IRQ number allocated for the given wire index.
988  */
989 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
990 {
991         struct xdma_device *xdev = platform_get_drvdata(pdev);
992
993         if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
994                 xdma_err(xdev, "invalid user irq index");
995                 return -EINVAL;
996         }
997
998         return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
999 }
1000 EXPORT_SYMBOL(xdma_get_user_irq);
1001
1002 /**
1003  * xdma_remove - Driver remove function
1004  * @pdev: Pointer to the platform_device structure
1005  */
1006 static void xdma_remove(struct platform_device *pdev)
1007 {
1008         struct xdma_device *xdev = platform_get_drvdata(pdev);
1009
1010         if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
1011                 xdma_irq_fini(xdev);
1012
1013         if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
1014                 dma_async_device_unregister(&xdev->dma_dev);
1015 }
1016
1017 /**
1018  * xdma_probe - Driver probe function
1019  * @pdev: Pointer to the platform_device structure
1020  */
1021 static int xdma_probe(struct platform_device *pdev)
1022 {
1023         struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1024         struct xdma_device *xdev;
1025         void __iomem *reg_base;
1026         struct resource *res;
1027         int ret = -ENODEV;
1028
1029         if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
1030                 dev_err(&pdev->dev, "invalid max dma channels %d",
1031                         pdata->max_dma_channels);
1032                 return -EINVAL;
1033         }
1034
1035         xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1036         if (!xdev)
1037                 return -ENOMEM;
1038
1039         platform_set_drvdata(pdev, xdev);
1040         xdev->pdev = pdev;
1041
1042         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1043         if (!res) {
1044                 xdma_err(xdev, "failed to get irq resource");
1045                 goto failed;
1046         }
1047         xdev->irq_start = res->start;
1048         xdev->irq_num = resource_size(res);
1049
1050         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1051         if (!res) {
1052                 xdma_err(xdev, "failed to get io resource");
1053                 goto failed;
1054         }
1055
1056         reg_base = devm_ioremap_resource(&pdev->dev, res);
1057         if (IS_ERR(reg_base)) {
1058                 xdma_err(xdev, "ioremap failed");
1059                 goto failed;
1060         }
1061
1062         xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
1063                                            &xdma_regmap_config);
1064         if (!xdev->rmap) {
1065                 xdma_err(xdev, "config regmap failed: %d", ret);
1066                 goto failed;
1067         }
1068         INIT_LIST_HEAD(&xdev->dma_dev.channels);
1069
1070         ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
1071         if (ret) {
1072                 xdma_err(xdev, "config H2C channels failed: %d", ret);
1073                 goto failed;
1074         }
1075
1076         ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
1077         if (ret) {
1078                 xdma_err(xdev, "config C2H channels failed: %d", ret);
1079                 goto failed;
1080         }
1081
1082         dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
1083         dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
1084         dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
1085
1086         xdev->dma_dev.dev = &pdev->dev;
1087         xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1088         xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
1089         xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
1090         xdev->dma_dev.device_tx_status = xdma_tx_status;
1091         xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
1092         xdev->dma_dev.device_config = xdma_device_config;
1093         xdev->dma_dev.device_issue_pending = xdma_issue_pending;
1094         xdev->dma_dev.filter.map = pdata->device_map;
1095         xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
1096         xdev->dma_dev.filter.fn = xdma_filter_fn;
1097         xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
1098
1099         ret = dma_async_device_register(&xdev->dma_dev);
1100         if (ret) {
1101                 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
1102                 goto failed;
1103         }
1104         xdev->status |= XDMA_DEV_STATUS_REG_DMA;
1105
1106         ret = xdma_irq_init(xdev);
1107         if (ret) {
1108                 xdma_err(xdev, "failed to init msix: %d", ret);
1109                 goto failed;
1110         }
1111         xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
1112
1113         return 0;
1114
1115 failed:
1116         xdma_remove(pdev);
1117
1118         return ret;
1119 }
1120
1121 static const struct platform_device_id xdma_id_table[] = {
1122         { "xdma", 0},
1123         { },
1124 };
1125
1126 static struct platform_driver xdma_driver = {
1127         .driver         = {
1128                 .name = "xdma",
1129         },
1130         .id_table       = xdma_id_table,
1131         .probe          = xdma_probe,
1132         .remove_new     = xdma_remove,
1133 };
1134
1135 module_platform_driver(xdma_driver);
1136
1137 MODULE_DESCRIPTION("AMD XDMA driver");
1138 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1139 MODULE_LICENSE("GPL");