2 * Platform driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
8 * Some parts of this driver are derived from the original dw_dmac.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_device.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/of_dma.h>
24 #include <linux/acpi.h>
25 #include <linux/acpi_dma.h>
29 #define DRV_NAME "dw_dmac"
31 static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
34 struct dw_dma *dw = ofdma->of_dma_data;
35 struct dw_dma_slave slave = {
36 .dma_dev = dw->dma.dev,
40 if (dma_spec->args_count != 3)
43 slave.src_id = dma_spec->args[0];
44 slave.dst_id = dma_spec->args[0];
45 slave.m_master = dma_spec->args[1];
46 slave.p_master = dma_spec->args[2];
48 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
49 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
50 slave.m_master >= dw->pdata->nr_masters ||
51 slave.p_master >= dw->pdata->nr_masters))
55 dma_cap_set(DMA_SLAVE, cap);
57 /* TODO: there should be a simpler way to do this */
58 return dma_request_channel(cap, dw_dma_filter, &slave);
62 static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
64 struct acpi_dma_spec *dma_spec = param;
65 struct dw_dma_slave slave = {
66 .dma_dev = dma_spec->dev,
67 .src_id = dma_spec->slave_id,
68 .dst_id = dma_spec->slave_id,
73 return dw_dma_filter(chan, &slave);
76 static void dw_dma_acpi_controller_register(struct dw_dma *dw)
78 struct device *dev = dw->dma.dev;
79 struct acpi_dma_filter_info *info;
82 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
86 dma_cap_zero(info->dma_cap);
87 dma_cap_set(DMA_SLAVE, info->dma_cap);
88 info->filter_fn = dw_dma_acpi_filter;
90 ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
92 dev_err(dev, "could not register acpi_dma_controller\n");
95 static void dw_dma_acpi_controller_free(struct dw_dma *dw)
97 struct device *dev = dw->dma.dev;
99 acpi_dma_controller_free(dev);
101 #else /* !CONFIG_ACPI */
102 static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
103 static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
104 #endif /* !CONFIG_ACPI */
107 static struct dw_dma_platform_data *
108 dw_dma_parse_dt(struct platform_device *pdev)
110 struct device_node *np = pdev->dev.of_node;
111 struct dw_dma_platform_data *pdata;
112 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
117 dev_err(&pdev->dev, "Missing DT data\n");
121 if (of_property_read_u32(np, "dma-masters", &nr_masters))
123 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
126 if (of_property_read_u32(np, "dma-channels", &nr_channels))
128 if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
131 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
135 pdata->nr_masters = nr_masters;
136 pdata->nr_channels = nr_channels;
138 if (of_property_read_bool(np, "is_private"))
139 pdata->is_private = true;
142 * All known devices, which use DT for configuration, support
143 * memory-to-memory transfers. So enable it by default.
145 pdata->is_memcpy = true;
147 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
148 pdata->chan_allocation_order = (unsigned char)tmp;
150 if (!of_property_read_u32(np, "chan_priority", &tmp))
151 pdata->chan_priority = tmp;
153 if (!of_property_read_u32(np, "block_size", &tmp))
154 pdata->block_size = tmp;
156 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
157 for (tmp = 0; tmp < nr_masters; tmp++)
158 pdata->data_width[tmp] = arr[tmp];
159 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
160 for (tmp = 0; tmp < nr_masters; tmp++)
161 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
164 if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
165 for (tmp = 0; tmp < nr_channels; tmp++)
166 pdata->multi_block[tmp] = mb[tmp];
168 for (tmp = 0; tmp < nr_channels; tmp++)
169 pdata->multi_block[tmp] = 1;
172 if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
173 if (tmp > CHAN_PROTCTL_MASK)
175 pdata->protctl = tmp;
181 static inline struct dw_dma_platform_data *
182 dw_dma_parse_dt(struct platform_device *pdev)
188 static int dw_probe(struct platform_device *pdev)
190 struct dw_dma_chip *chip;
191 struct device *dev = &pdev->dev;
192 struct resource *mem;
193 const struct dw_dma_platform_data *pdata;
196 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
200 chip->irq = platform_get_irq(pdev, 0);
204 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
205 chip->regs = devm_ioremap_resource(dev, mem);
206 if (IS_ERR(chip->regs))
207 return PTR_ERR(chip->regs);
209 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
213 pdata = dev_get_platdata(dev);
215 pdata = dw_dma_parse_dt(pdev);
221 chip->clk = devm_clk_get(chip->dev, "hclk");
222 if (IS_ERR(chip->clk))
223 return PTR_ERR(chip->clk);
224 err = clk_prepare_enable(chip->clk);
228 pm_runtime_enable(&pdev->dev);
230 err = dw_dma_probe(chip);
232 goto err_dw_dma_probe;
234 platform_set_drvdata(pdev, chip);
236 if (pdev->dev.of_node) {
237 err = of_dma_controller_register(pdev->dev.of_node,
238 dw_dma_of_xlate, chip->dw);
241 "could not register of_dma_controller\n");
244 if (ACPI_HANDLE(&pdev->dev))
245 dw_dma_acpi_controller_register(chip->dw);
250 pm_runtime_disable(&pdev->dev);
251 clk_disable_unprepare(chip->clk);
255 static int dw_remove(struct platform_device *pdev)
257 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
259 if (ACPI_HANDLE(&pdev->dev))
260 dw_dma_acpi_controller_free(chip->dw);
262 if (pdev->dev.of_node)
263 of_dma_controller_free(pdev->dev.of_node);
266 pm_runtime_disable(&pdev->dev);
267 clk_disable_unprepare(chip->clk);
272 static void dw_shutdown(struct platform_device *pdev)
274 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
277 * We have to call dw_dma_disable() to stop any ongoing transfer. On
278 * some platforms we can't do that since DMA device is powered off.
279 * Moreover we have no possibility to check if the platform is affected
280 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
281 * unconditionally. On the other hand we can't use
282 * pm_runtime_suspended() because runtime PM framework is not fully
283 * used by the driver.
285 pm_runtime_get_sync(chip->dev);
286 dw_dma_disable(chip);
287 pm_runtime_put_sync_suspend(chip->dev);
289 clk_disable_unprepare(chip->clk);
293 static const struct of_device_id dw_dma_of_id_table[] = {
294 { .compatible = "snps,dma-spear1340" },
297 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
301 static const struct acpi_device_id dw_dma_acpi_id_table[] = {
305 MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
308 #ifdef CONFIG_PM_SLEEP
310 static int dw_suspend_late(struct device *dev)
312 struct dw_dma_chip *chip = dev_get_drvdata(dev);
314 dw_dma_disable(chip);
315 clk_disable_unprepare(chip->clk);
320 static int dw_resume_early(struct device *dev)
322 struct dw_dma_chip *chip = dev_get_drvdata(dev);
325 ret = clk_prepare_enable(chip->clk);
329 return dw_dma_enable(chip);
332 #endif /* CONFIG_PM_SLEEP */
334 static const struct dev_pm_ops dw_dev_pm_ops = {
335 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
338 static struct platform_driver dw_driver = {
341 .shutdown = dw_shutdown,
344 .pm = &dw_dev_pm_ops,
345 .of_match_table = of_match_ptr(dw_dma_of_id_table),
346 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
350 static int __init dw_init(void)
352 return platform_driver_register(&dw_driver);
354 subsys_initcall(dw_init);
356 static void __exit dw_exit(void)
358 platform_driver_unregister(&dw_driver);
360 module_exit(dw_exit);
362 MODULE_LICENSE("GPL v2");
363 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
364 MODULE_ALIAS("platform:" DRV_NAME);