GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / mmc / host / renesas_sdhi_sys_dmac.c
1 /*
2  * DMA support use of SYS DMAC with SDHI SD/SDIO controller
3  *
4  * Copyright (C) 2016-17 Renesas Electronics Corporation
5  * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
6  * Copyright (C) 2017 Horms Solutions, Simon Horman
7  * Copyright (C) 2010-2011 Guennadi Liakhovetski
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/mfd/tmio.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/of_device.h>
22 #include <linux/pagemap.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sys_soc.h>
25
26 #include "renesas_sdhi.h"
27 #include "tmio_mmc.h"
28
29 #define TMIO_MMC_MIN_DMA_LEN 8
30
31 static const struct renesas_sdhi_of_data of_default_cfg = {
32         .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
33 };
34
35 static const struct renesas_sdhi_of_data of_rz_compatible = {
36         .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT |
37                           TMIO_MMC_HAVE_CBSY,
38         .tmio_ocr_mask  = MMC_VDD_32_33,
39         .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
40 };
41
42 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
43         .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
44                           TMIO_MMC_CLK_ACTUAL,
45         .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
46 };
47
48 /* Definitions for sampling clocks */
49 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
50         {
51                 .clk_rate = 156000000,
52                 .tap = 0x00000703,
53         },
54         {
55                 .clk_rate = 0,
56                 .tap = 0x00000300,
57         },
58 };
59
60 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
61         .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
62                           TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY |
63                           TMIO_MMC_MIN_RCAR2,
64         .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
65                           MMC_CAP_CMD23,
66         .dma_buswidth   = DMA_SLAVE_BUSWIDTH_4_BYTES,
67         .dma_rx_offset  = 0x2000,
68         .scc_offset     = 0x0300,
69         .taps           = rcar_gen2_scc_taps,
70         .taps_num       = ARRAY_SIZE(rcar_gen2_scc_taps),
71         .max_blk_count  = 0xffffffff,
72 };
73
74 /* Definitions for sampling clocks */
75 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
76         {
77                 .clk_rate = 0,
78                 .tap = 0x00000300,
79         },
80 };
81
82 static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
83         .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
84                           TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY |
85                           TMIO_MMC_MIN_RCAR2,
86         .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
87                           MMC_CAP_CMD23,
88         .bus_shift      = 2,
89         .scc_offset     = 0x1000,
90         .taps           = rcar_gen3_scc_taps,
91         .taps_num       = ARRAY_SIZE(rcar_gen3_scc_taps),
92 };
93
94 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
95         { .compatible = "renesas,sdhi-shmobile" },
96         { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
97         { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
98         { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
99         { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
100         { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
101         { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
102         { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, },
103         { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, },
104         { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
105         { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
106         { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
107         { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
108         { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
109         { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
110         { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
111         {},
112 };
113 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
114
115 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
116                                              bool enable)
117 {
118         if (!host->chan_tx || !host->chan_rx)
119                 return;
120
121         if (host->dma->enable)
122                 host->dma->enable(host, enable);
123 }
124
125 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
126 {
127         renesas_sdhi_sys_dmac_enable_dma(host, false);
128
129         if (host->chan_rx)
130                 dmaengine_terminate_all(host->chan_rx);
131         if (host->chan_tx)
132                 dmaengine_terminate_all(host->chan_tx);
133
134         renesas_sdhi_sys_dmac_enable_dma(host, true);
135 }
136
137 static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
138 {
139         complete(&host->dma_dataend);
140 }
141
142 static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
143 {
144         struct tmio_mmc_host *host = arg;
145
146         spin_lock_irq(&host->lock);
147
148         if (!host->data)
149                 goto out;
150
151         if (host->data->flags & MMC_DATA_READ)
152                 dma_unmap_sg(host->chan_rx->device->dev,
153                              host->sg_ptr, host->sg_len,
154                              DMA_FROM_DEVICE);
155         else
156                 dma_unmap_sg(host->chan_tx->device->dev,
157                              host->sg_ptr, host->sg_len,
158                              DMA_TO_DEVICE);
159
160         spin_unlock_irq(&host->lock);
161
162         wait_for_completion(&host->dma_dataend);
163
164         spin_lock_irq(&host->lock);
165         tmio_mmc_do_data_irq(host);
166 out:
167         spin_unlock_irq(&host->lock);
168 }
169
170 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
171 {
172         struct scatterlist *sg = host->sg_ptr, *sg_tmp;
173         struct dma_async_tx_descriptor *desc = NULL;
174         struct dma_chan *chan = host->chan_rx;
175         dma_cookie_t cookie;
176         int ret, i;
177         bool aligned = true, multiple = true;
178         unsigned int align = (1 << host->pdata->alignment_shift) - 1;
179
180         for_each_sg(sg, sg_tmp, host->sg_len, i) {
181                 if (sg_tmp->offset & align)
182                         aligned = false;
183                 if (sg_tmp->length & align) {
184                         multiple = false;
185                         break;
186                 }
187         }
188
189         if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
190                           (align & PAGE_MASK))) || !multiple) {
191                 ret = -EINVAL;
192                 goto pio;
193         }
194
195         if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
196                 host->force_pio = true;
197                 return;
198         }
199
200         tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
201
202         /* The only sg element can be unaligned, use our bounce buffer then */
203         if (!aligned) {
204                 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
205                 host->sg_ptr = &host->bounce_sg;
206                 sg = host->sg_ptr;
207         }
208
209         ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
210         if (ret > 0)
211                 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
212                                                DMA_CTRL_ACK);
213
214         if (desc) {
215                 reinit_completion(&host->dma_dataend);
216                 desc->callback = renesas_sdhi_sys_dmac_dma_callback;
217                 desc->callback_param = host;
218
219                 cookie = dmaengine_submit(desc);
220                 if (cookie < 0) {
221                         desc = NULL;
222                         ret = cookie;
223                 }
224         }
225 pio:
226         if (!desc) {
227                 /* DMA failed, fall back to PIO */
228                 renesas_sdhi_sys_dmac_enable_dma(host, false);
229                 if (ret >= 0)
230                         ret = -EIO;
231                 host->chan_rx = NULL;
232                 dma_release_channel(chan);
233                 /* Free the Tx channel too */
234                 chan = host->chan_tx;
235                 if (chan) {
236                         host->chan_tx = NULL;
237                         dma_release_channel(chan);
238                 }
239                 dev_warn(&host->pdev->dev,
240                          "DMA failed: %d, falling back to PIO\n", ret);
241         }
242 }
243
244 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
245 {
246         struct scatterlist *sg = host->sg_ptr, *sg_tmp;
247         struct dma_async_tx_descriptor *desc = NULL;
248         struct dma_chan *chan = host->chan_tx;
249         dma_cookie_t cookie;
250         int ret, i;
251         bool aligned = true, multiple = true;
252         unsigned int align = (1 << host->pdata->alignment_shift) - 1;
253
254         for_each_sg(sg, sg_tmp, host->sg_len, i) {
255                 if (sg_tmp->offset & align)
256                         aligned = false;
257                 if (sg_tmp->length & align) {
258                         multiple = false;
259                         break;
260                 }
261         }
262
263         if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
264                           (align & PAGE_MASK))) || !multiple) {
265                 ret = -EINVAL;
266                 goto pio;
267         }
268
269         if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
270                 host->force_pio = true;
271                 return;
272         }
273
274         tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
275
276         /* The only sg element can be unaligned, use our bounce buffer then */
277         if (!aligned) {
278                 unsigned long flags;
279                 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
280
281                 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
282                 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
283                 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
284                 host->sg_ptr = &host->bounce_sg;
285                 sg = host->sg_ptr;
286         }
287
288         ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
289         if (ret > 0)
290                 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
291                                                DMA_CTRL_ACK);
292
293         if (desc) {
294                 reinit_completion(&host->dma_dataend);
295                 desc->callback = renesas_sdhi_sys_dmac_dma_callback;
296                 desc->callback_param = host;
297
298                 cookie = dmaengine_submit(desc);
299                 if (cookie < 0) {
300                         desc = NULL;
301                         ret = cookie;
302                 }
303         }
304 pio:
305         if (!desc) {
306                 /* DMA failed, fall back to PIO */
307                 renesas_sdhi_sys_dmac_enable_dma(host, false);
308                 if (ret >= 0)
309                         ret = -EIO;
310                 host->chan_tx = NULL;
311                 dma_release_channel(chan);
312                 /* Free the Rx channel too */
313                 chan = host->chan_rx;
314                 if (chan) {
315                         host->chan_rx = NULL;
316                         dma_release_channel(chan);
317                 }
318                 dev_warn(&host->pdev->dev,
319                          "DMA failed: %d, falling back to PIO\n", ret);
320         }
321 }
322
323 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
324                                             struct mmc_data *data)
325 {
326         if (data->flags & MMC_DATA_READ) {
327                 if (host->chan_rx)
328                         renesas_sdhi_sys_dmac_start_dma_rx(host);
329         } else {
330                 if (host->chan_tx)
331                         renesas_sdhi_sys_dmac_start_dma_tx(host);
332         }
333 }
334
335 static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
336 {
337         struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
338         struct dma_chan *chan = NULL;
339
340         spin_lock_irq(&host->lock);
341
342         if (host && host->data) {
343                 if (host->data->flags & MMC_DATA_READ)
344                         chan = host->chan_rx;
345                 else
346                         chan = host->chan_tx;
347         }
348
349         spin_unlock_irq(&host->lock);
350
351         tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
352
353         if (chan)
354                 dma_async_issue_pending(chan);
355 }
356
357 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
358                                               struct tmio_mmc_data *pdata)
359 {
360         /* We can only either use DMA for both Tx and Rx or not use it at all */
361         if (!host->dma || (!host->pdev->dev.of_node &&
362                            (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
363                 return;
364
365         if (!host->chan_tx && !host->chan_rx) {
366                 struct resource *res = platform_get_resource(host->pdev,
367                                                              IORESOURCE_MEM, 0);
368                 struct dma_slave_config cfg = {};
369                 dma_cap_mask_t mask;
370                 int ret;
371
372                 if (!res)
373                         return;
374
375                 dma_cap_zero(mask);
376                 dma_cap_set(DMA_SLAVE, mask);
377
378                 host->chan_tx = dma_request_slave_channel_compat(mask,
379                                         host->dma->filter, pdata->chan_priv_tx,
380                                         &host->pdev->dev, "tx");
381                 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
382                         host->chan_tx);
383
384                 if (!host->chan_tx)
385                         return;
386
387                 cfg.direction = DMA_MEM_TO_DEV;
388                 cfg.dst_addr = res->start +
389                         (CTL_SD_DATA_PORT << host->bus_shift);
390                 cfg.dst_addr_width = host->dma->dma_buswidth;
391                 if (!cfg.dst_addr_width)
392                         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
393                 cfg.src_addr = 0;
394                 ret = dmaengine_slave_config(host->chan_tx, &cfg);
395                 if (ret < 0)
396                         goto ecfgtx;
397
398                 host->chan_rx = dma_request_slave_channel_compat(mask,
399                                         host->dma->filter, pdata->chan_priv_rx,
400                                         &host->pdev->dev, "rx");
401                 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
402                         host->chan_rx);
403
404                 if (!host->chan_rx)
405                         goto ereqrx;
406
407                 cfg.direction = DMA_DEV_TO_MEM;
408                 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
409                 cfg.src_addr_width = host->dma->dma_buswidth;
410                 if (!cfg.src_addr_width)
411                         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
412                 cfg.dst_addr = 0;
413                 ret = dmaengine_slave_config(host->chan_rx, &cfg);
414                 if (ret < 0)
415                         goto ecfgrx;
416
417                 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
418                 if (!host->bounce_buf)
419                         goto ebouncebuf;
420
421                 init_completion(&host->dma_dataend);
422                 tasklet_init(&host->dma_issue,
423                              renesas_sdhi_sys_dmac_issue_tasklet_fn,
424                              (unsigned long)host);
425         }
426
427         renesas_sdhi_sys_dmac_enable_dma(host, true);
428
429         return;
430
431 ebouncebuf:
432 ecfgrx:
433         dma_release_channel(host->chan_rx);
434         host->chan_rx = NULL;
435 ereqrx:
436 ecfgtx:
437         dma_release_channel(host->chan_tx);
438         host->chan_tx = NULL;
439 }
440
441 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
442 {
443         if (host->chan_tx) {
444                 struct dma_chan *chan = host->chan_tx;
445
446                 host->chan_tx = NULL;
447                 dma_release_channel(chan);
448         }
449         if (host->chan_rx) {
450                 struct dma_chan *chan = host->chan_rx;
451
452                 host->chan_rx = NULL;
453                 dma_release_channel(chan);
454         }
455         if (host->bounce_buf) {
456                 free_pages((unsigned long)host->bounce_buf, 0);
457                 host->bounce_buf = NULL;
458         }
459 }
460
461 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
462         .start = renesas_sdhi_sys_dmac_start_dma,
463         .enable = renesas_sdhi_sys_dmac_enable_dma,
464         .request = renesas_sdhi_sys_dmac_request_dma,
465         .release = renesas_sdhi_sys_dmac_release_dma,
466         .abort = renesas_sdhi_sys_dmac_abort_dma,
467         .dataend = renesas_sdhi_sys_dmac_dataend_dma,
468 };
469
470 /*
471  * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
472  * implementation. Currently empty as all supported ES versions use
473  * the internal DMAC.
474  */
475 static const struct soc_device_attribute gen3_soc_whitelist[] = {
476         { /* sentinel */ }
477 };
478
479 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
480 {
481         if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
482             !soc_device_match(gen3_soc_whitelist))
483                 return -ENODEV;
484
485         return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
486 }
487
488 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
489         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
490                                 pm_runtime_force_resume)
491         SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
492                            tmio_mmc_host_runtime_resume,
493                            NULL)
494 };
495
496 static struct platform_driver renesas_sys_dmac_sdhi_driver = {
497         .driver         = {
498                 .name   = "sh_mobile_sdhi",
499                 .pm     = &renesas_sdhi_sys_dmac_dev_pm_ops,
500                 .of_match_table = renesas_sdhi_sys_dmac_of_match,
501         },
502         .probe          = renesas_sdhi_sys_dmac_probe,
503         .remove         = renesas_sdhi_remove,
504 };
505
506 module_platform_driver(renesas_sys_dmac_sdhi_driver);
507
508 MODULE_DESCRIPTION("Renesas SDHI driver");
509 MODULE_AUTHOR("Magnus Damm");
510 MODULE_LICENSE("GPL v2");
511 MODULE_ALIAS("platform:sh_mobile_sdhi");