1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA PCIe driver
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/device.h>
13 #include <linux/dma/edma.h>
14 #include <linux/pci-epf.h>
15 #include <linux/msi.h>
16 #include <linux/bitfield.h>
18 #include "dw-edma-core.h"
20 #define DW_PCIE_VSEC_DMA_ID 0x6
21 #define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
22 #define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
23 #define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
24 #define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
26 #define DW_BLOCK(a, b, c) \
33 struct dw_edma_block {
39 struct dw_edma_pcie_data {
40 /* eDMA registers location */
41 struct dw_edma_block rg;
42 /* eDMA memory linked list location */
43 struct dw_edma_block ll_wr[EDMA_MAX_WR_CH];
44 struct dw_edma_block ll_rd[EDMA_MAX_RD_CH];
45 /* eDMA memory data location */
46 struct dw_edma_block dt_wr[EDMA_MAX_WR_CH];
47 struct dw_edma_block dt_rd[EDMA_MAX_RD_CH];
49 enum dw_edma_map_format mf;
55 static const struct dw_edma_pcie_data snps_edda_data = {
56 /* eDMA registers location */
58 .rg.off = 0x00001000, /* 4 Kbytes */
59 .rg.sz = 0x00002000, /* 8 Kbytes */
60 /* eDMA memory linked list location */
62 /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */
63 DW_BLOCK(BAR_2, 0x00000000, 0x00000800)
64 /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */
65 DW_BLOCK(BAR_2, 0x00200000, 0x00000800)
68 /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */
69 DW_BLOCK(BAR_2, 0x00400000, 0x00000800)
70 /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */
71 DW_BLOCK(BAR_2, 0x00600000, 0x00000800)
73 /* eDMA memory data location */
75 /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */
76 DW_BLOCK(BAR_2, 0x00800000, 0x00000800)
77 /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */
78 DW_BLOCK(BAR_2, 0x00900000, 0x00000800)
81 /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */
82 DW_BLOCK(BAR_2, 0x00a00000, 0x00000800)
83 /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */
84 DW_BLOCK(BAR_2, 0x00b00000, 0x00000800)
87 .mf = EDMA_MF_EDMA_UNROLL,
93 static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
95 return pci_irq_vector(to_pci_dev(dev), nr);
98 static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
99 .irq_vector = dw_edma_pcie_irq_vector,
102 static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
103 struct dw_edma_pcie_data *pdata)
109 vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
110 DW_PCIE_VSEC_DMA_ID);
114 pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
115 if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
116 PCI_VNDR_HEADER_LEN(val) != 0x18)
119 pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
120 pci_read_config_dword(pdev, vsec + 0x8, &val);
121 map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
122 if (map != EDMA_MF_EDMA_LEGACY &&
123 map != EDMA_MF_EDMA_UNROLL &&
124 map != EDMA_MF_HDMA_COMPAT)
128 pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
130 pci_read_config_dword(pdev, vsec + 0xc, &val);
131 pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
132 FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
133 pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
134 FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
136 pci_read_config_dword(pdev, vsec + 0x14, &val);
138 pci_read_config_dword(pdev, vsec + 0x10, &val);
144 static int dw_edma_pcie_probe(struct pci_dev *pdev,
145 const struct pci_device_id *pid)
147 struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
148 struct dw_edma_pcie_data vsec_data;
149 struct device *dev = &pdev->dev;
150 struct dw_edma_chip *chip;
155 /* Enable PCI device */
156 err = pcim_enable_device(pdev);
158 pci_err(pdev, "enabling device failed\n");
162 memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
165 * Tries to find if exists a PCIe Vendor-Specific Extended Capability
166 * for the DMA, if one exists, then reconfigures it.
168 dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
170 /* Mapping PCI BAR regions */
171 mask = BIT(vsec_data.rg.bar);
172 for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
173 mask |= BIT(vsec_data.ll_wr[i].bar);
174 mask |= BIT(vsec_data.dt_wr[i].bar);
176 for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
177 mask |= BIT(vsec_data.ll_rd[i].bar);
178 mask |= BIT(vsec_data.dt_rd[i].bar);
180 err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
182 pci_err(pdev, "eDMA BAR I/O remapping failed\n");
186 pci_set_master(pdev);
188 /* DMA configuration */
189 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
191 pci_err(pdev, "DMA mask 64 set failed\n");
195 /* Data structure allocation */
196 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
200 dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
204 /* IRQs allocation */
205 nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
206 PCI_IRQ_MSI | PCI_IRQ_MSIX);
208 pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
213 /* Data structure initialization */
216 chip->id = pdev->devfn;
217 chip->irq = pdev->irq;
219 dw->mf = vsec_data.mf;
220 dw->nr_irqs = nr_irqs;
221 dw->ops = &dw_edma_pcie_core_ops;
222 dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
223 dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
225 dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
226 if (!dw->rg_region.vaddr)
229 dw->rg_region.vaddr += vsec_data.rg.off;
230 dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
231 dw->rg_region.paddr += vsec_data.rg.off;
232 dw->rg_region.sz = vsec_data.rg.sz;
234 for (i = 0; i < dw->wr_ch_cnt; i++) {
235 struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
236 struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
237 struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
238 struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
240 ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
241 if (!ll_region->vaddr)
244 ll_region->vaddr += ll_block->off;
245 ll_region->paddr = pdev->resource[ll_block->bar].start;
246 ll_region->paddr += ll_block->off;
247 ll_region->sz = ll_block->sz;
249 dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
250 if (!dt_region->vaddr)
253 dt_region->vaddr += dt_block->off;
254 dt_region->paddr = pdev->resource[dt_block->bar].start;
255 dt_region->paddr += dt_block->off;
256 dt_region->sz = dt_block->sz;
259 for (i = 0; i < dw->rd_ch_cnt; i++) {
260 struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
261 struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
262 struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
263 struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
265 ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
266 if (!ll_region->vaddr)
269 ll_region->vaddr += ll_block->off;
270 ll_region->paddr = pdev->resource[ll_block->bar].start;
271 ll_region->paddr += ll_block->off;
272 ll_region->sz = ll_block->sz;
274 dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
275 if (!dt_region->vaddr)
278 dt_region->vaddr += dt_block->off;
279 dt_region->paddr = pdev->resource[dt_block->bar].start;
280 dt_region->paddr += dt_block->off;
281 dt_region->sz = dt_block->sz;
285 if (dw->mf == EDMA_MF_EDMA_LEGACY)
286 pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
287 else if (dw->mf == EDMA_MF_EDMA_UNROLL)
288 pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
289 else if (dw->mf == EDMA_MF_HDMA_COMPAT)
290 pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
292 pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
294 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
295 vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
296 dw->rg_region.vaddr, &dw->rg_region.paddr);
299 for (i = 0; i < dw->wr_ch_cnt; i++) {
300 pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
301 i, vsec_data.ll_wr[i].bar,
302 vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
303 dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
305 pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
306 i, vsec_data.dt_wr[i].bar,
307 vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
308 dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
311 for (i = 0; i < dw->rd_ch_cnt; i++) {
312 pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
313 i, vsec_data.ll_rd[i].bar,
314 vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
315 dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
317 pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
318 i, vsec_data.dt_rd[i].bar,
319 vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
320 dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
323 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
325 /* Validating if PCI interrupts were enabled */
326 if (!pci_dev_msi_enabled(pdev)) {
327 pci_err(pdev, "enable interrupt failed\n");
331 dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
335 /* Starting eDMA driver */
336 err = dw_edma_probe(chip);
338 pci_err(pdev, "eDMA probe failed\n");
342 /* Saving data structure reference */
343 pci_set_drvdata(pdev, chip);
348 static void dw_edma_pcie_remove(struct pci_dev *pdev)
350 struct dw_edma_chip *chip = pci_get_drvdata(pdev);
353 /* Stopping eDMA driver */
354 err = dw_edma_remove(chip);
356 pci_warn(pdev, "can't remove device properly: %d\n", err);
359 pci_free_irq_vectors(pdev);
362 static const struct pci_device_id dw_edma_pcie_id_table[] = {
363 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
366 MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
368 static struct pci_driver dw_edma_pcie_driver = {
369 .name = "dw-edma-pcie",
370 .id_table = dw_edma_pcie_id_table,
371 .probe = dw_edma_pcie_probe,
372 .remove = dw_edma_pcie_remove,
375 module_pci_driver(dw_edma_pcie_driver);
377 MODULE_LICENSE("GPL v2");
378 MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver");
379 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");