GNU Linux-libre 6.9.1-gnu
[releases.git] / drivers / mmc / host / mmci.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4  *
5  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6  *  Copyright (C) 2010 ST-Ericsson SA
7  */
8 #include <linux/module.h>
9 #include <linux/moduleparam.h>
10 #include <linux/init.h>
11 #include <linux/ioport.h>
12 #include <linux/device.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/mmc.h>
22 #include <linux/mmc/pm.h>
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/sd.h>
26 #include <linux/mmc/slot-gpio.h>
27 #include <linux/amba/bus.h>
28 #include <linux/clk.h>
29 #include <linux/scatterlist.h>
30 #include <linux/of.h>
31 #include <linux/regulator/consumer.h>
32 #include <linux/dmaengine.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/amba/mmci.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/types.h>
37 #include <linux/pinctrl/consumer.h>
38 #include <linux/reset.h>
39 #include <linux/gpio/consumer.h>
40 #include <linux/workqueue.h>
41
42 #include <asm/div64.h>
43 #include <asm/io.h>
44
45 #include "mmci.h"
46
47 #define DRIVER_NAME "mmci-pl18x"
48
49 static void mmci_variant_init(struct mmci_host *host);
50 static void ux500_variant_init(struct mmci_host *host);
51 static void ux500v2_variant_init(struct mmci_host *host);
52
53 static unsigned int fmax = 515633;
54
55 static struct variant_data variant_arm = {
56         .fifosize               = 16 * 4,
57         .fifohalfsize           = 8 * 4,
58         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
59         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
60         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
61         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
62         .datalength_bits        = 16,
63         .datactrl_blocksz       = 11,
64         .pwrreg_powerup         = MCI_PWR_UP,
65         .f_max                  = 100000000,
66         .reversed_irq_handling  = true,
67         .mmcimask1              = true,
68         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
69         .start_err              = MCI_STARTBITERR,
70         .opendrain              = MCI_ROD,
71         .init                   = mmci_variant_init,
72 };
73
74 static struct variant_data variant_arm_extended_fifo = {
75         .fifosize               = 128 * 4,
76         .fifohalfsize           = 64 * 4,
77         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
78         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
79         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
80         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
81         .datalength_bits        = 16,
82         .datactrl_blocksz       = 11,
83         .pwrreg_powerup         = MCI_PWR_UP,
84         .f_max                  = 100000000,
85         .mmcimask1              = true,
86         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
87         .start_err              = MCI_STARTBITERR,
88         .opendrain              = MCI_ROD,
89         .init                   = mmci_variant_init,
90 };
91
92 static struct variant_data variant_arm_extended_fifo_hwfc = {
93         .fifosize               = 128 * 4,
94         .fifohalfsize           = 64 * 4,
95         .clkreg_enable          = MCI_ARM_HWFCEN,
96         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
97         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
98         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
99         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
100         .datalength_bits        = 16,
101         .datactrl_blocksz       = 11,
102         .pwrreg_powerup         = MCI_PWR_UP,
103         .f_max                  = 100000000,
104         .mmcimask1              = true,
105         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
106         .start_err              = MCI_STARTBITERR,
107         .opendrain              = MCI_ROD,
108         .init                   = mmci_variant_init,
109 };
110
111 static struct variant_data variant_u300 = {
112         .fifosize               = 16 * 4,
113         .fifohalfsize           = 8 * 4,
114         .clkreg_enable          = MCI_ST_U300_HWFCEN,
115         .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
116         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
117         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
118         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
119         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
120         .datalength_bits        = 16,
121         .datactrl_blocksz       = 11,
122         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
123         .st_sdio                        = true,
124         .pwrreg_powerup         = MCI_PWR_ON,
125         .f_max                  = 100000000,
126         .signal_direction       = true,
127         .pwrreg_clkgate         = true,
128         .pwrreg_nopower         = true,
129         .mmcimask1              = true,
130         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
131         .start_err              = MCI_STARTBITERR,
132         .opendrain              = MCI_OD,
133         .init                   = mmci_variant_init,
134 };
135
136 static struct variant_data variant_nomadik = {
137         .fifosize               = 16 * 4,
138         .fifohalfsize           = 8 * 4,
139         .clkreg                 = MCI_CLK_ENABLE,
140         .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
141         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
142         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
143         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
144         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
145         .datalength_bits        = 24,
146         .datactrl_blocksz       = 11,
147         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
148         .st_sdio                = true,
149         .st_clkdiv              = true,
150         .pwrreg_powerup         = MCI_PWR_ON,
151         .f_max                  = 100000000,
152         .signal_direction       = true,
153         .pwrreg_clkgate         = true,
154         .pwrreg_nopower         = true,
155         .mmcimask1              = true,
156         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
157         .start_err              = MCI_STARTBITERR,
158         .opendrain              = MCI_OD,
159         .init                   = mmci_variant_init,
160 };
161
162 static struct variant_data variant_ux500 = {
163         .fifosize               = 30 * 4,
164         .fifohalfsize           = 8 * 4,
165         .clkreg                 = MCI_CLK_ENABLE,
166         .clkreg_enable          = MCI_ST_UX500_HWFCEN,
167         .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
168         .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
169         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
170         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
171         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
172         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
173         .datalength_bits        = 24,
174         .datactrl_blocksz       = 11,
175         .datactrl_any_blocksz   = true,
176         .dma_power_of_2         = true,
177         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
178         .st_sdio                = true,
179         .st_clkdiv              = true,
180         .pwrreg_powerup         = MCI_PWR_ON,
181         .f_max                  = 100000000,
182         .signal_direction       = true,
183         .pwrreg_clkgate         = true,
184         .busy_detect            = true,
185         .busy_dpsm_flag         = MCI_DPSM_ST_BUSYMODE,
186         .busy_detect_flag       = MCI_ST_CARDBUSY,
187         .busy_detect_mask       = MCI_ST_BUSYENDMASK,
188         .pwrreg_nopower         = true,
189         .mmcimask1              = true,
190         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
191         .start_err              = MCI_STARTBITERR,
192         .opendrain              = MCI_OD,
193         .init                   = ux500_variant_init,
194 };
195
196 static struct variant_data variant_ux500v2 = {
197         .fifosize               = 30 * 4,
198         .fifohalfsize           = 8 * 4,
199         .clkreg                 = MCI_CLK_ENABLE,
200         .clkreg_enable          = MCI_ST_UX500_HWFCEN,
201         .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
202         .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
203         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
204         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
205         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
206         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
207         .datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
208         .datalength_bits        = 24,
209         .datactrl_blocksz       = 11,
210         .datactrl_any_blocksz   = true,
211         .dma_power_of_2         = true,
212         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
213         .st_sdio                = true,
214         .st_clkdiv              = true,
215         .pwrreg_powerup         = MCI_PWR_ON,
216         .f_max                  = 100000000,
217         .signal_direction       = true,
218         .pwrreg_clkgate         = true,
219         .busy_detect            = true,
220         .busy_dpsm_flag         = MCI_DPSM_ST_BUSYMODE,
221         .busy_detect_flag       = MCI_ST_CARDBUSY,
222         .busy_detect_mask       = MCI_ST_BUSYENDMASK,
223         .pwrreg_nopower         = true,
224         .mmcimask1              = true,
225         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
226         .start_err              = MCI_STARTBITERR,
227         .opendrain              = MCI_OD,
228         .init                   = ux500v2_variant_init,
229 };
230
231 static struct variant_data variant_stm32 = {
232         .fifosize               = 32 * 4,
233         .fifohalfsize           = 8 * 4,
234         .clkreg                 = MCI_CLK_ENABLE,
235         .clkreg_enable          = MCI_ST_UX500_HWFCEN,
236         .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
237         .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
238         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
239         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
240         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
241         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
242         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
243         .datalength_bits        = 24,
244         .datactrl_blocksz       = 11,
245         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
246         .st_sdio                = true,
247         .st_clkdiv              = true,
248         .pwrreg_powerup         = MCI_PWR_ON,
249         .f_max                  = 48000000,
250         .pwrreg_clkgate         = true,
251         .pwrreg_nopower         = true,
252         .dma_flow_controller    = true,
253         .init                   = mmci_variant_init,
254 };
255
256 static struct variant_data variant_stm32_sdmmc = {
257         .fifosize               = 16 * 4,
258         .fifohalfsize           = 8 * 4,
259         .f_max                  = 208000000,
260         .stm32_clkdiv           = true,
261         .cmdreg_cpsm_enable     = MCI_CPSM_STM32_ENABLE,
262         .cmdreg_lrsp_crc        = MCI_CPSM_STM32_LRSP_CRC,
263         .cmdreg_srsp_crc        = MCI_CPSM_STM32_SRSP_CRC,
264         .cmdreg_srsp            = MCI_CPSM_STM32_SRSP,
265         .cmdreg_stop            = MCI_CPSM_STM32_CMDSTOP,
266         .data_cmd_enable        = MCI_CPSM_STM32_CMDTRANS,
267         .irq_pio_mask           = MCI_IRQ_PIO_STM32_MASK,
268         .datactrl_first         = true,
269         .datacnt_useless        = true,
270         .datalength_bits        = 25,
271         .datactrl_blocksz       = 14,
272         .datactrl_any_blocksz   = true,
273         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
274         .stm32_idmabsize_mask   = GENMASK(12, 5),
275         .stm32_idmabsize_align  = BIT(5),
276         .supports_sdio_irq      = true,
277         .busy_timeout           = true,
278         .busy_detect            = true,
279         .busy_detect_flag       = MCI_STM32_BUSYD0,
280         .busy_detect_mask       = MCI_STM32_BUSYD0ENDMASK,
281         .init                   = sdmmc_variant_init,
282 };
283
284 static struct variant_data variant_stm32_sdmmcv2 = {
285         .fifosize               = 16 * 4,
286         .fifohalfsize           = 8 * 4,
287         .f_max                  = 267000000,
288         .stm32_clkdiv           = true,
289         .cmdreg_cpsm_enable     = MCI_CPSM_STM32_ENABLE,
290         .cmdreg_lrsp_crc        = MCI_CPSM_STM32_LRSP_CRC,
291         .cmdreg_srsp_crc        = MCI_CPSM_STM32_SRSP_CRC,
292         .cmdreg_srsp            = MCI_CPSM_STM32_SRSP,
293         .cmdreg_stop            = MCI_CPSM_STM32_CMDSTOP,
294         .data_cmd_enable        = MCI_CPSM_STM32_CMDTRANS,
295         .irq_pio_mask           = MCI_IRQ_PIO_STM32_MASK,
296         .datactrl_first         = true,
297         .datacnt_useless        = true,
298         .datalength_bits        = 25,
299         .datactrl_blocksz       = 14,
300         .datactrl_any_blocksz   = true,
301         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
302         .stm32_idmabsize_mask   = GENMASK(16, 5),
303         .stm32_idmabsize_align  = BIT(5),
304         .supports_sdio_irq      = true,
305         .dma_lli                = true,
306         .busy_timeout           = true,
307         .busy_detect            = true,
308         .busy_detect_flag       = MCI_STM32_BUSYD0,
309         .busy_detect_mask       = MCI_STM32_BUSYD0ENDMASK,
310         .init                   = sdmmc_variant_init,
311 };
312
313 static struct variant_data variant_stm32_sdmmcv3 = {
314         .fifosize               = 256 * 4,
315         .fifohalfsize           = 128 * 4,
316         .f_max                  = 267000000,
317         .stm32_clkdiv           = true,
318         .cmdreg_cpsm_enable     = MCI_CPSM_STM32_ENABLE,
319         .cmdreg_lrsp_crc        = MCI_CPSM_STM32_LRSP_CRC,
320         .cmdreg_srsp_crc        = MCI_CPSM_STM32_SRSP_CRC,
321         .cmdreg_srsp            = MCI_CPSM_STM32_SRSP,
322         .cmdreg_stop            = MCI_CPSM_STM32_CMDSTOP,
323         .data_cmd_enable        = MCI_CPSM_STM32_CMDTRANS,
324         .irq_pio_mask           = MCI_IRQ_PIO_STM32_MASK,
325         .datactrl_first         = true,
326         .datacnt_useless        = true,
327         .datalength_bits        = 25,
328         .datactrl_blocksz       = 14,
329         .datactrl_any_blocksz   = true,
330         .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
331         .stm32_idmabsize_mask   = GENMASK(16, 6),
332         .stm32_idmabsize_align  = BIT(6),
333         .supports_sdio_irq      = true,
334         .dma_lli                = true,
335         .busy_timeout           = true,
336         .busy_detect            = true,
337         .busy_detect_flag       = MCI_STM32_BUSYD0,
338         .busy_detect_mask       = MCI_STM32_BUSYD0ENDMASK,
339         .init                   = sdmmc_variant_init,
340 };
341
342 static struct variant_data variant_qcom = {
343         .fifosize               = 16 * 4,
344         .fifohalfsize           = 8 * 4,
345         .clkreg                 = MCI_CLK_ENABLE,
346         .clkreg_enable          = MCI_QCOM_CLK_FLOWENA |
347                                   MCI_QCOM_CLK_SELECT_IN_FBCLK,
348         .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
349         .datactrl_mask_ddrmode  = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
350         .cmdreg_cpsm_enable     = MCI_CPSM_ENABLE,
351         .cmdreg_lrsp_crc        = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
352         .cmdreg_srsp_crc        = MCI_CPSM_RESPONSE,
353         .cmdreg_srsp            = MCI_CPSM_RESPONSE,
354         .data_cmd_enable        = MCI_CPSM_QCOM_DATCMD,
355         .datalength_bits        = 24,
356         .datactrl_blocksz       = 11,
357         .datactrl_any_blocksz   = true,
358         .pwrreg_powerup         = MCI_PWR_UP,
359         .f_max                  = 208000000,
360         .explicit_mclk_control  = true,
361         .qcom_fifo              = true,
362         .qcom_dml               = true,
363         .mmcimask1              = true,
364         .irq_pio_mask           = MCI_IRQ_PIO_MASK,
365         .start_err              = MCI_STARTBITERR,
366         .opendrain              = MCI_ROD,
367         .init                   = qcom_variant_init,
368 };
369
370 /* Busy detection for the ST Micro variant */
371 static int mmci_card_busy(struct mmc_host *mmc)
372 {
373         struct mmci_host *host = mmc_priv(mmc);
374         unsigned long flags;
375         int busy = 0;
376
377         spin_lock_irqsave(&host->lock, flags);
378         if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
379                 busy = 1;
380         spin_unlock_irqrestore(&host->lock, flags);
381
382         return busy;
383 }
384
385 static void mmci_reg_delay(struct mmci_host *host)
386 {
387         /*
388          * According to the spec, at least three feedback clock cycles
389          * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
390          * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
391          * Worst delay time during card init is at 100 kHz => 30 us.
392          * Worst delay time when up and running is at 25 MHz => 120 ns.
393          */
394         if (host->cclk < 25000000)
395                 udelay(30);
396         else
397                 ndelay(120);
398 }
399
400 /*
401  * This must be called with host->lock held
402  */
403 void mmci_write_clkreg(struct mmci_host *host, u32 clk)
404 {
405         if (host->clk_reg != clk) {
406                 host->clk_reg = clk;
407                 writel(clk, host->base + MMCICLOCK);
408         }
409 }
410
411 /*
412  * This must be called with host->lock held
413  */
414 void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
415 {
416         if (host->pwr_reg != pwr) {
417                 host->pwr_reg = pwr;
418                 writel(pwr, host->base + MMCIPOWER);
419         }
420 }
421
422 /*
423  * This must be called with host->lock held
424  */
425 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
426 {
427         /* Keep busy mode in DPSM and SDIO mask if enabled */
428         datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag |
429                                           host->variant->datactrl_mask_sdio);
430
431         if (host->datactrl_reg != datactrl) {
432                 host->datactrl_reg = datactrl;
433                 writel(datactrl, host->base + MMCIDATACTRL);
434         }
435 }
436
437 /*
438  * This must be called with host->lock held
439  */
440 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
441 {
442         struct variant_data *variant = host->variant;
443         u32 clk = variant->clkreg;
444
445         /* Make sure cclk reflects the current calculated clock */
446         host->cclk = 0;
447
448         if (desired) {
449                 if (variant->explicit_mclk_control) {
450                         host->cclk = host->mclk;
451                 } else if (desired >= host->mclk) {
452                         clk = MCI_CLK_BYPASS;
453                         if (variant->st_clkdiv)
454                                 clk |= MCI_ST_UX500_NEG_EDGE;
455                         host->cclk = host->mclk;
456                 } else if (variant->st_clkdiv) {
457                         /*
458                          * DB8500 TRM says f = mclk / (clkdiv + 2)
459                          * => clkdiv = (mclk / f) - 2
460                          * Round the divider up so we don't exceed the max
461                          * frequency
462                          */
463                         clk = DIV_ROUND_UP(host->mclk, desired) - 2;
464                         if (clk >= 256)
465                                 clk = 255;
466                         host->cclk = host->mclk / (clk + 2);
467                 } else {
468                         /*
469                          * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
470                          * => clkdiv = mclk / (2 * f) - 1
471                          */
472                         clk = host->mclk / (2 * desired) - 1;
473                         if (clk >= 256)
474                                 clk = 255;
475                         host->cclk = host->mclk / (2 * (clk + 1));
476                 }
477
478                 clk |= variant->clkreg_enable;
479                 clk |= MCI_CLK_ENABLE;
480                 /* This hasn't proven to be worthwhile */
481                 /* clk |= MCI_CLK_PWRSAVE; */
482         }
483
484         /* Set actual clock for debug */
485         host->mmc->actual_clock = host->cclk;
486
487         if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
488                 clk |= MCI_4BIT_BUS;
489         if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
490                 clk |= variant->clkreg_8bit_bus_enable;
491
492         if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
493             host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
494                 clk |= variant->clkreg_neg_edge_enable;
495
496         mmci_write_clkreg(host, clk);
497 }
498
499 static void mmci_dma_release(struct mmci_host *host)
500 {
501         if (host->ops && host->ops->dma_release)
502                 host->ops->dma_release(host);
503
504         host->use_dma = false;
505 }
506
507 static void mmci_dma_setup(struct mmci_host *host)
508 {
509         if (!host->ops || !host->ops->dma_setup)
510                 return;
511
512         if (host->ops->dma_setup(host))
513                 return;
514
515         /* initialize pre request cookie */
516         host->next_cookie = 1;
517
518         host->use_dma = true;
519 }
520
521 /*
522  * Validate mmc prerequisites
523  */
524 static int mmci_validate_data(struct mmci_host *host,
525                               struct mmc_data *data)
526 {
527         struct variant_data *variant = host->variant;
528
529         if (!data)
530                 return 0;
531         if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
532                 dev_err(mmc_dev(host->mmc),
533                         "unsupported block size (%d bytes)\n", data->blksz);
534                 return -EINVAL;
535         }
536
537         if (host->ops && host->ops->validate_data)
538                 return host->ops->validate_data(host, data);
539
540         return 0;
541 }
542
543 static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
544 {
545         int err;
546
547         if (!host->ops || !host->ops->prep_data)
548                 return 0;
549
550         err = host->ops->prep_data(host, data, next);
551
552         if (next && !err)
553                 data->host_cookie = ++host->next_cookie < 0 ?
554                         1 : host->next_cookie;
555
556         return err;
557 }
558
559 static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
560                       int err)
561 {
562         if (host->ops && host->ops->unprep_data)
563                 host->ops->unprep_data(host, data, err);
564
565         data->host_cookie = 0;
566 }
567
568 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
569 {
570         WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
571
572         if (host->ops && host->ops->get_next_data)
573                 host->ops->get_next_data(host, data);
574 }
575
576 static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
577 {
578         struct mmc_data *data = host->data;
579         int ret;
580
581         if (!host->use_dma)
582                 return -EINVAL;
583
584         ret = mmci_prep_data(host, data, false);
585         if (ret)
586                 return ret;
587
588         if (!host->ops || !host->ops->dma_start)
589                 return -EINVAL;
590
591         /* Okay, go for it. */
592         dev_vdbg(mmc_dev(host->mmc),
593                  "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
594                  data->sg_len, data->blksz, data->blocks, data->flags);
595
596         ret = host->ops->dma_start(host, &datactrl);
597         if (ret)
598                 return ret;
599
600         /* Trigger the DMA transfer */
601         mmci_write_datactrlreg(host, datactrl);
602
603         /*
604          * Let the MMCI say when the data is ended and it's time
605          * to fire next DMA request. When that happens, MMCI will
606          * call mmci_data_end()
607          */
608         writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
609                host->base + MMCIMASK0);
610         return 0;
611 }
612
613 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
614 {
615         if (!host->use_dma)
616                 return;
617
618         if (host->ops && host->ops->dma_finalize)
619                 host->ops->dma_finalize(host, data);
620 }
621
622 static void mmci_dma_error(struct mmci_host *host)
623 {
624         if (!host->use_dma)
625                 return;
626
627         if (host->ops && host->ops->dma_error)
628                 host->ops->dma_error(host);
629 }
630
631 static void
632 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
633 {
634         writel(0, host->base + MMCICOMMAND);
635
636         BUG_ON(host->data);
637
638         host->mrq = NULL;
639         host->cmd = NULL;
640
641         mmc_request_done(host->mmc, mrq);
642 }
643
644 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
645 {
646         void __iomem *base = host->base;
647         struct variant_data *variant = host->variant;
648
649         if (host->singleirq) {
650                 unsigned int mask0 = readl(base + MMCIMASK0);
651
652                 mask0 &= ~variant->irq_pio_mask;
653                 mask0 |= mask;
654
655                 writel(mask0, base + MMCIMASK0);
656         }
657
658         if (variant->mmcimask1)
659                 writel(mask, base + MMCIMASK1);
660
661         host->mask1_reg = mask;
662 }
663
664 static void mmci_stop_data(struct mmci_host *host)
665 {
666         mmci_write_datactrlreg(host, 0);
667         mmci_set_mask1(host, 0);
668         host->data = NULL;
669 }
670
671 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
672 {
673         unsigned int flags = SG_MITER_ATOMIC;
674
675         if (data->flags & MMC_DATA_READ)
676                 flags |= SG_MITER_TO_SG;
677         else
678                 flags |= SG_MITER_FROM_SG;
679
680         sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
681 }
682
683 static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
684 {
685         return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
686 }
687
688 static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
689 {
690         return MCI_DPSM_ENABLE | (host->data->blksz << 16);
691 }
692
693 static void ux500_busy_clear_mask_done(struct mmci_host *host)
694 {
695         void __iomem *base = host->base;
696
697         writel(host->variant->busy_detect_mask, base + MMCICLEAR);
698         writel(readl(base + MMCIMASK0) &
699                ~host->variant->busy_detect_mask, base + MMCIMASK0);
700         host->busy_state = MMCI_BUSY_DONE;
701         host->busy_status = 0;
702 }
703
704 /*
705  * ux500_busy_complete() - this will wait until the busy status
706  * goes off, saving any status that occur in the meantime into
707  * host->busy_status until we know the card is not busy any more.
708  * The function returns true when the busy detection is ended
709  * and we should continue processing the command.
710  *
711  * The Ux500 typically fires two IRQs over a busy cycle like this:
712  *
713  *  DAT0 busy          +-----------------+
714  *                     |                 |
715  *  DAT0 not busy  ----+                 +--------
716  *
717  *                     ^                 ^
718  *                     |                 |
719  *                    IRQ1              IRQ2
720  */
721 static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
722                                 u32 status, u32 err_msk)
723 {
724         void __iomem *base = host->base;
725         int retries = 10;
726
727         if (status & err_msk) {
728                 /* Stop any ongoing busy detection if an error occurs */
729                 ux500_busy_clear_mask_done(host);
730                 goto out_ret_state;
731         }
732
733         /*
734          * The state transitions are encoded in a state machine crossing
735          * the edges in this switch statement.
736          */
737         switch (host->busy_state) {
738
739         /*
740          * Before unmasking for the busy end IRQ, confirm that the
741          * command was sent successfully. To keep track of having a
742          * command in-progress, waiting for busy signaling to end,
743          * store the status in host->busy_status.
744          *
745          * Note that, the card may need a couple of clock cycles before
746          * it starts signaling busy on DAT0, hence re-read the
747          * MMCISTATUS register here, to allow the busy bit to be set.
748          */
749         case MMCI_BUSY_DONE:
750                 /*
751                  * Save the first status register read to be sure to catch
752                  * all bits that may be lost will retrying. If the command
753                  * is still busy this will result in assigning 0 to
754                  * host->busy_status, which is what it should be in IDLE.
755                  */
756                 host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
757                 while (retries) {
758                         status = readl(base + MMCISTATUS);
759                         /* Keep accumulating status bits */
760                         host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
761                         if (status & host->variant->busy_detect_flag) {
762                                 writel(readl(base + MMCIMASK0) |
763                                        host->variant->busy_detect_mask,
764                                        base + MMCIMASK0);
765                                 host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
766                                 schedule_delayed_work(&host->ux500_busy_timeout_work,
767                                       msecs_to_jiffies(cmd->busy_timeout));
768                                 goto out_ret_state;
769                         }
770                         retries--;
771                 }
772                 dev_dbg(mmc_dev(host->mmc),
773                         "no busy signalling in time CMD%02x\n", cmd->opcode);
774                 ux500_busy_clear_mask_done(host);
775                 break;
776
777         /*
778          * If there is a command in-progress that has been successfully
779          * sent, then bail out if busy status is set and wait for the
780          * busy end IRQ.
781          *
782          * Note that, the HW triggers an IRQ on both edges while
783          * monitoring DAT0 for busy completion, but there is only one
784          * status bit in MMCISTATUS for the busy state. Therefore
785          * both the start and the end interrupts needs to be cleared,
786          * one after the other. So, clear the busy start IRQ here.
787          */
788         case MMCI_BUSY_WAITING_FOR_START_IRQ:
789                 if (status & host->variant->busy_detect_flag) {
790                         host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
791                         writel(host->variant->busy_detect_mask, base + MMCICLEAR);
792                         host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
793                 } else {
794                         dev_dbg(mmc_dev(host->mmc),
795                                 "lost busy status when waiting for busy start IRQ CMD%02x\n",
796                                 cmd->opcode);
797                         cancel_delayed_work(&host->ux500_busy_timeout_work);
798                         ux500_busy_clear_mask_done(host);
799                 }
800                 break;
801
802         case MMCI_BUSY_WAITING_FOR_END_IRQ:
803                 if (!(status & host->variant->busy_detect_flag)) {
804                         host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
805                         writel(host->variant->busy_detect_mask, base + MMCICLEAR);
806                         cancel_delayed_work(&host->ux500_busy_timeout_work);
807                         ux500_busy_clear_mask_done(host);
808                 } else {
809                         dev_dbg(mmc_dev(host->mmc),
810                                 "busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
811                                 cmd->opcode);
812                 }
813                 break;
814
815         default:
816                 dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
817                         host->busy_state, cmd->opcode);
818                 break;
819         }
820
821 out_ret_state:
822         return (host->busy_state == MMCI_BUSY_DONE);
823 }
824
825 /*
826  * All the DMA operation mode stuff goes inside this ifdef.
827  * This assumes that you have a generic DMA device interface,
828  * no custom DMA interfaces are supported.
829  */
830 #ifdef CONFIG_DMA_ENGINE
831 struct mmci_dmae_next {
832         struct dma_async_tx_descriptor *desc;
833         struct dma_chan *chan;
834 };
835
836 struct mmci_dmae_priv {
837         struct dma_chan *cur;
838         struct dma_chan *rx_channel;
839         struct dma_chan *tx_channel;
840         struct dma_async_tx_descriptor  *desc_current;
841         struct mmci_dmae_next next_data;
842 };
843
844 int mmci_dmae_setup(struct mmci_host *host)
845 {
846         const char *rxname, *txname;
847         struct mmci_dmae_priv *dmae;
848
849         dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
850         if (!dmae)
851                 return -ENOMEM;
852
853         host->dma_priv = dmae;
854
855         dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
856         if (IS_ERR(dmae->rx_channel)) {
857                 int ret = PTR_ERR(dmae->rx_channel);
858                 dmae->rx_channel = NULL;
859                 return ret;
860         }
861
862         dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
863         if (IS_ERR(dmae->tx_channel)) {
864                 if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
865                         dev_warn(mmc_dev(host->mmc),
866                                  "Deferred probe for TX channel ignored\n");
867                 dmae->tx_channel = NULL;
868         }
869
870         /*
871          * If only an RX channel is specified, the driver will
872          * attempt to use it bidirectionally, however if it
873          * is specified but cannot be located, DMA will be disabled.
874          */
875         if (dmae->rx_channel && !dmae->tx_channel)
876                 dmae->tx_channel = dmae->rx_channel;
877
878         if (dmae->rx_channel)
879                 rxname = dma_chan_name(dmae->rx_channel);
880         else
881                 rxname = "none";
882
883         if (dmae->tx_channel)
884                 txname = dma_chan_name(dmae->tx_channel);
885         else
886                 txname = "none";
887
888         dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
889                  rxname, txname);
890
891         /*
892          * Limit the maximum segment size in any SG entry according to
893          * the parameters of the DMA engine device.
894          */
895         if (dmae->tx_channel) {
896                 struct device *dev = dmae->tx_channel->device->dev;
897                 unsigned int max_seg_size = dma_get_max_seg_size(dev);
898
899                 if (max_seg_size < host->mmc->max_seg_size)
900                         host->mmc->max_seg_size = max_seg_size;
901         }
902         if (dmae->rx_channel) {
903                 struct device *dev = dmae->rx_channel->device->dev;
904                 unsigned int max_seg_size = dma_get_max_seg_size(dev);
905
906                 if (max_seg_size < host->mmc->max_seg_size)
907                         host->mmc->max_seg_size = max_seg_size;
908         }
909
910         if (!dmae->tx_channel || !dmae->rx_channel) {
911                 mmci_dmae_release(host);
912                 return -EINVAL;
913         }
914
915         return 0;
916 }
917
918 /*
919  * This is used in or so inline it
920  * so it can be discarded.
921  */
922 void mmci_dmae_release(struct mmci_host *host)
923 {
924         struct mmci_dmae_priv *dmae = host->dma_priv;
925
926         if (dmae->rx_channel)
927                 dma_release_channel(dmae->rx_channel);
928         if (dmae->tx_channel)
929                 dma_release_channel(dmae->tx_channel);
930         dmae->rx_channel = dmae->tx_channel = NULL;
931 }
932
933 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
934 {
935         struct mmci_dmae_priv *dmae = host->dma_priv;
936         struct dma_chan *chan;
937
938         if (data->flags & MMC_DATA_READ)
939                 chan = dmae->rx_channel;
940         else
941                 chan = dmae->tx_channel;
942
943         dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
944                      mmc_get_dma_dir(data));
945 }
946
947 void mmci_dmae_error(struct mmci_host *host)
948 {
949         struct mmci_dmae_priv *dmae = host->dma_priv;
950
951         if (!dma_inprogress(host))
952                 return;
953
954         dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
955         dmaengine_terminate_all(dmae->cur);
956         host->dma_in_progress = false;
957         dmae->cur = NULL;
958         dmae->desc_current = NULL;
959         host->data->host_cookie = 0;
960
961         mmci_dma_unmap(host, host->data);
962 }
963
964 void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
965 {
966         struct mmci_dmae_priv *dmae = host->dma_priv;
967         u32 status;
968         int i;
969
970         if (!dma_inprogress(host))
971                 return;
972
973         /* Wait up to 1ms for the DMA to complete */
974         for (i = 0; ; i++) {
975                 status = readl(host->base + MMCISTATUS);
976                 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
977                         break;
978                 udelay(10);
979         }
980
981         /*
982          * Check to see whether we still have some data left in the FIFO -
983          * this catches DMA controllers which are unable to monitor the
984          * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
985          * contiguous buffers.  On TX, we'll get a FIFO underrun error.
986          */
987         if (status & MCI_RXDATAAVLBLMASK) {
988                 mmci_dma_error(host);
989                 if (!data->error)
990                         data->error = -EIO;
991         } else if (!data->host_cookie) {
992                 mmci_dma_unmap(host, data);
993         }
994
995         /*
996          * Use of DMA with scatter-gather is impossible.
997          * Give up with DMA and switch back to PIO mode.
998          */
999         if (status & MCI_RXDATAAVLBLMASK) {
1000                 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
1001                 mmci_dma_release(host);
1002         }
1003
1004         host->dma_in_progress = false;
1005         dmae->cur = NULL;
1006         dmae->desc_current = NULL;
1007 }
1008
1009 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
1010 static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
1011                                 struct dma_chan **dma_chan,
1012                                 struct dma_async_tx_descriptor **dma_desc)
1013 {
1014         struct mmci_dmae_priv *dmae = host->dma_priv;
1015         struct variant_data *variant = host->variant;
1016         struct dma_slave_config conf = {
1017                 .src_addr = host->phybase + MMCIFIFO,
1018                 .dst_addr = host->phybase + MMCIFIFO,
1019                 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1020                 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1021                 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
1022                 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
1023                 .device_fc = variant->dma_flow_controller,
1024         };
1025         struct dma_chan *chan;
1026         struct dma_device *device;
1027         struct dma_async_tx_descriptor *desc;
1028         int nr_sg;
1029         unsigned long flags = DMA_CTRL_ACK;
1030
1031         if (data->flags & MMC_DATA_READ) {
1032                 conf.direction = DMA_DEV_TO_MEM;
1033                 chan = dmae->rx_channel;
1034         } else {
1035                 conf.direction = DMA_MEM_TO_DEV;
1036                 chan = dmae->tx_channel;
1037         }
1038
1039         /* If there's no DMA channel, fall back to PIO */
1040         if (!chan)
1041                 return -EINVAL;
1042
1043         /* If less than or equal to the fifo size, don't bother with DMA */
1044         if (data->blksz * data->blocks <= variant->fifosize)
1045                 return -EINVAL;
1046
1047         /*
1048          * This is necessary to get SDIO working on the Ux500. We do not yet
1049          * know if this is a bug in:
1050          * - The Ux500 DMA controller (DMA40)
1051          * - The MMCI DMA interface on the Ux500
1052          * some power of two blocks (such as 64 bytes) are sent regularly
1053          * during SDIO traffic and those work fine so for these we enable DMA
1054          * transfers.
1055          */
1056         if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
1057                 return -EINVAL;
1058
1059         device = chan->device;
1060         nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
1061                            mmc_get_dma_dir(data));
1062         if (nr_sg == 0)
1063                 return -EINVAL;
1064
1065         if (host->variant->qcom_dml)
1066                 flags |= DMA_PREP_INTERRUPT;
1067
1068         dmaengine_slave_config(chan, &conf);
1069         desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
1070                                             conf.direction, flags);
1071         if (!desc)
1072                 goto unmap_exit;
1073
1074         *dma_chan = chan;
1075         *dma_desc = desc;
1076
1077         return 0;
1078
1079  unmap_exit:
1080         dma_unmap_sg(device->dev, data->sg, data->sg_len,
1081                      mmc_get_dma_dir(data));
1082         return -ENOMEM;
1083 }
1084
1085 int mmci_dmae_prep_data(struct mmci_host *host,
1086                         struct mmc_data *data,
1087                         bool next)
1088 {
1089         struct mmci_dmae_priv *dmae = host->dma_priv;
1090         struct mmci_dmae_next *nd = &dmae->next_data;
1091
1092         if (!host->use_dma)
1093                 return -EINVAL;
1094
1095         if (next)
1096                 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
1097         /* Check if next job is already prepared. */
1098         if (dmae->cur && dmae->desc_current)
1099                 return 0;
1100
1101         /* No job were prepared thus do it now. */
1102         return _mmci_dmae_prep_data(host, data, &dmae->cur,
1103                                     &dmae->desc_current);
1104 }
1105
1106 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
1107 {
1108         struct mmci_dmae_priv *dmae = host->dma_priv;
1109         int ret;
1110
1111         host->dma_in_progress = true;
1112         ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
1113         if (ret < 0) {
1114                 host->dma_in_progress = false;
1115                 return ret;
1116         }
1117         dma_async_issue_pending(dmae->cur);
1118
1119         *datactrl |= MCI_DPSM_DMAENABLE;
1120
1121         return 0;
1122 }
1123
1124 void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
1125 {
1126         struct mmci_dmae_priv *dmae = host->dma_priv;
1127         struct mmci_dmae_next *next = &dmae->next_data;
1128
1129         if (!host->use_dma)
1130                 return;
1131
1132         WARN_ON(!data->host_cookie && (next->desc || next->chan));
1133
1134         dmae->desc_current = next->desc;
1135         dmae->cur = next->chan;
1136         next->desc = NULL;
1137         next->chan = NULL;
1138 }
1139
1140 void mmci_dmae_unprep_data(struct mmci_host *host,
1141                            struct mmc_data *data, int err)
1142
1143 {
1144         struct mmci_dmae_priv *dmae = host->dma_priv;
1145
1146         if (!host->use_dma)
1147                 return;
1148
1149         mmci_dma_unmap(host, data);
1150
1151         if (err) {
1152                 struct mmci_dmae_next *next = &dmae->next_data;
1153                 struct dma_chan *chan;
1154                 if (data->flags & MMC_DATA_READ)
1155                         chan = dmae->rx_channel;
1156                 else
1157                         chan = dmae->tx_channel;
1158                 dmaengine_terminate_all(chan);
1159
1160                 if (dmae->desc_current == next->desc)
1161                         dmae->desc_current = NULL;
1162
1163                 if (dmae->cur == next->chan) {
1164                         host->dma_in_progress = false;
1165                         dmae->cur = NULL;
1166                 }
1167
1168                 next->desc = NULL;
1169                 next->chan = NULL;
1170         }
1171 }
1172
1173 static struct mmci_host_ops mmci_variant_ops = {
1174         .prep_data = mmci_dmae_prep_data,
1175         .unprep_data = mmci_dmae_unprep_data,
1176         .get_datactrl_cfg = mmci_get_dctrl_cfg,
1177         .get_next_data = mmci_dmae_get_next_data,
1178         .dma_setup = mmci_dmae_setup,
1179         .dma_release = mmci_dmae_release,
1180         .dma_start = mmci_dmae_start,
1181         .dma_finalize = mmci_dmae_finalize,
1182         .dma_error = mmci_dmae_error,
1183 };
1184 #else
1185 static struct mmci_host_ops mmci_variant_ops = {
1186         .get_datactrl_cfg = mmci_get_dctrl_cfg,
1187 };
1188 #endif
1189
1190 static void mmci_variant_init(struct mmci_host *host)
1191 {
1192         host->ops = &mmci_variant_ops;
1193 }
1194
1195 static void ux500_variant_init(struct mmci_host *host)
1196 {
1197         host->ops = &mmci_variant_ops;
1198         host->ops->busy_complete = ux500_busy_complete;
1199 }
1200
1201 static void ux500v2_variant_init(struct mmci_host *host)
1202 {
1203         host->ops = &mmci_variant_ops;
1204         host->ops->busy_complete = ux500_busy_complete;
1205         host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
1206 }
1207
1208 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
1209 {
1210         struct mmci_host *host = mmc_priv(mmc);
1211         struct mmc_data *data = mrq->data;
1212
1213         if (!data)
1214                 return;
1215
1216         WARN_ON(data->host_cookie);
1217
1218         if (mmci_validate_data(host, data))
1219                 return;
1220
1221         mmci_prep_data(host, data, true);
1222 }
1223
1224 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
1225                               int err)
1226 {
1227         struct mmci_host *host = mmc_priv(mmc);
1228         struct mmc_data *data = mrq->data;
1229
1230         if (!data || !data->host_cookie)
1231                 return;
1232
1233         mmci_unprep_data(host, data, err);
1234 }
1235
1236 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
1237 {
1238         struct variant_data *variant = host->variant;
1239         unsigned int datactrl, timeout, irqmask;
1240         unsigned long long clks;
1241         void __iomem *base;
1242
1243         dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1244                 data->blksz, data->blocks, data->flags);
1245
1246         host->data = data;
1247         host->size = data->blksz * data->blocks;
1248         data->bytes_xfered = 0;
1249
1250         clks = (unsigned long long)data->timeout_ns * host->cclk;
1251         do_div(clks, NSEC_PER_SEC);
1252
1253         timeout = data->timeout_clks + (unsigned int)clks;
1254
1255         base = host->base;
1256         writel(timeout, base + MMCIDATATIMER);
1257         writel(host->size, base + MMCIDATALENGTH);
1258
1259         datactrl = host->ops->get_datactrl_cfg(host);
1260         datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
1261
1262         if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1263                 u32 clk;
1264
1265                 datactrl |= variant->datactrl_mask_sdio;
1266
1267                 /*
1268                  * The ST Micro variant for SDIO small write transfers
1269                  * needs to have clock H/W flow control disabled,
1270                  * otherwise the transfer will not start. The threshold
1271                  * depends on the rate of MCLK.
1272                  */
1273                 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1274                     (host->size < 8 ||
1275                      (host->size <= 8 && host->mclk > 50000000)))
1276                         clk = host->clk_reg & ~variant->clkreg_enable;
1277                 else
1278                         clk = host->clk_reg | variant->clkreg_enable;
1279
1280                 mmci_write_clkreg(host, clk);
1281         }
1282
1283         if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1284             host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1285                 datactrl |= variant->datactrl_mask_ddrmode;
1286
1287         /*
1288          * Attempt to use DMA operation mode, if this
1289          * should fail, fall back to PIO mode
1290          */
1291         if (!mmci_dma_start(host, datactrl))
1292                 return;
1293
1294         /* IRQ mode, map the SG list for CPU reading/writing */
1295         mmci_init_sg(host, data);
1296
1297         if (data->flags & MMC_DATA_READ) {
1298                 irqmask = MCI_RXFIFOHALFFULLMASK;
1299
1300                 /*
1301                  * If we have less than the fifo 'half-full' threshold to
1302                  * transfer, trigger a PIO interrupt as soon as any data
1303                  * is available.
1304                  */
1305                 if (host->size < variant->fifohalfsize)
1306                         irqmask |= MCI_RXDATAAVLBLMASK;
1307         } else {
1308                 /*
1309                  * We don't actually need to include "FIFO empty" here
1310                  * since its implicit in "FIFO half empty".
1311                  */
1312                 irqmask = MCI_TXFIFOHALFEMPTYMASK;
1313         }
1314
1315         mmci_write_datactrlreg(host, datactrl);
1316         writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1317         mmci_set_mask1(host, irqmask);
1318 }
1319
1320 static void
1321 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1322 {
1323         void __iomem *base = host->base;
1324         bool busy_resp = cmd->flags & MMC_RSP_BUSY;
1325         unsigned long long clks;
1326
1327         dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1328             cmd->opcode, cmd->arg, cmd->flags);
1329
1330         if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1331                 writel(0, base + MMCICOMMAND);
1332                 mmci_reg_delay(host);
1333         }
1334
1335         if (host->variant->cmdreg_stop &&
1336             cmd->opcode == MMC_STOP_TRANSMISSION)
1337                 c |= host->variant->cmdreg_stop;
1338
1339         c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1340         if (cmd->flags & MMC_RSP_PRESENT) {
1341                 if (cmd->flags & MMC_RSP_136)
1342                         c |= host->variant->cmdreg_lrsp_crc;
1343                 else if (cmd->flags & MMC_RSP_CRC)
1344                         c |= host->variant->cmdreg_srsp_crc;
1345                 else
1346                         c |= host->variant->cmdreg_srsp;
1347         }
1348
1349         host->busy_status = 0;
1350         host->busy_state = MMCI_BUSY_DONE;
1351
1352         /* Assign a default timeout if the core does not provide one */
1353         if (busy_resp && !cmd->busy_timeout)
1354                 cmd->busy_timeout = 10 * MSEC_PER_SEC;
1355
1356         if (busy_resp && host->variant->busy_timeout) {
1357                 if (cmd->busy_timeout > host->mmc->max_busy_timeout)
1358                         clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
1359                 else
1360                         clks = (unsigned long long)cmd->busy_timeout * host->cclk;
1361
1362                 do_div(clks, MSEC_PER_SEC);
1363                 writel_relaxed(clks, host->base + MMCIDATATIMER);
1364         }
1365
1366         if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
1367                 host->ops->pre_sig_volt_switch(host);
1368
1369         if (/*interrupt*/0)
1370                 c |= MCI_CPSM_INTERRUPT;
1371
1372         if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1373                 c |= host->variant->data_cmd_enable;
1374
1375         host->cmd = cmd;
1376
1377         writel(cmd->arg, base + MMCIARGUMENT);
1378         writel(c, base + MMCICOMMAND);
1379 }
1380
1381 static void mmci_stop_command(struct mmci_host *host)
1382 {
1383         host->stop_abort.error = 0;
1384         mmci_start_command(host, &host->stop_abort, 0);
1385 }
1386
1387 static void
1388 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1389               unsigned int status)
1390 {
1391         unsigned int status_err;
1392
1393         /* Make sure we have data to handle */
1394         if (!data)
1395                 return;
1396
1397         /* First check for errors */
1398         status_err = status & (host->variant->start_err |
1399                                MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1400                                MCI_TXUNDERRUN | MCI_RXOVERRUN);
1401
1402         if (status_err) {
1403                 u32 remain, success;
1404
1405                 /* Terminate the DMA transfer */
1406                 mmci_dma_error(host);
1407
1408                 /*
1409                  * Calculate how far we are into the transfer.  Note that
1410                  * the data counter gives the number of bytes transferred
1411                  * on the MMC bus, not on the host side.  On reads, this
1412                  * can be as much as a FIFO-worth of data ahead.  This
1413                  * matters for FIFO overruns only.
1414                  */
1415                 if (!host->variant->datacnt_useless) {
1416                         remain = readl(host->base + MMCIDATACNT);
1417                         success = data->blksz * data->blocks - remain;
1418                 } else {
1419                         success = 0;
1420                 }
1421
1422                 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1423                         status_err, success);
1424                 if (status_err & MCI_DATACRCFAIL) {
1425                         /* Last block was not successful */
1426                         success -= 1;
1427                         data->error = -EILSEQ;
1428                 } else if (status_err & MCI_DATATIMEOUT) {
1429                         data->error = -ETIMEDOUT;
1430                 } else if (status_err & MCI_STARTBITERR) {
1431                         data->error = -ECOMM;
1432                 } else if (status_err & MCI_TXUNDERRUN) {
1433                         data->error = -EIO;
1434                 } else if (status_err & MCI_RXOVERRUN) {
1435                         if (success > host->variant->fifosize)
1436                                 success -= host->variant->fifosize;
1437                         else
1438                                 success = 0;
1439                         data->error = -EIO;
1440                 }
1441                 data->bytes_xfered = round_down(success, data->blksz);
1442         }
1443
1444         if (status & MCI_DATABLOCKEND)
1445                 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1446
1447         if (status & MCI_DATAEND || data->error) {
1448                 mmci_dma_finalize(host, data);
1449
1450                 mmci_stop_data(host);
1451
1452                 if (!data->error)
1453                         /* The error clause is handled above, success! */
1454                         data->bytes_xfered = data->blksz * data->blocks;
1455
1456                 if (!data->stop) {
1457                         if (host->variant->cmdreg_stop && data->error)
1458                                 mmci_stop_command(host);
1459                         else
1460                                 mmci_request_end(host, data->mrq);
1461                 } else if (host->mrq->sbc && !data->error) {
1462                         mmci_request_end(host, data->mrq);
1463                 } else {
1464                         mmci_start_command(host, data->stop, 0);
1465                 }
1466         }
1467 }
1468
1469 static void
1470 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1471              unsigned int status)
1472 {
1473         u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
1474         void __iomem *base = host->base;
1475         bool sbc, busy_resp;
1476
1477         if (!cmd)
1478                 return;
1479
1480         sbc = (cmd == host->mrq->sbc);
1481         busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1482
1483         /*
1484          * We need to be one of these interrupts to be considered worth
1485          * handling. Note that we tag on any latent IRQs postponed
1486          * due to waiting for busy status.
1487          */
1488         if (host->variant->busy_timeout && busy_resp)
1489                 err_msk |= MCI_DATATIMEOUT;
1490
1491         if (!((status | host->busy_status) &
1492               (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
1493                 return;
1494
1495         /* Handle busy detection on DAT0 if the variant supports it. */
1496         if (busy_resp && host->variant->busy_detect)
1497                 if (!host->ops->busy_complete(host, cmd, status, err_msk))
1498                         return;
1499
1500         host->cmd = NULL;
1501
1502         if (status & MCI_CMDTIMEOUT) {
1503                 cmd->error = -ETIMEDOUT;
1504         } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1505                 cmd->error = -EILSEQ;
1506         } else if (host->variant->busy_timeout && busy_resp &&
1507                    status & MCI_DATATIMEOUT) {
1508                 cmd->error = -ETIMEDOUT;
1509                 /*
1510                  * This will wake up mmci_irq_thread() which will issue
1511                  * a hardware reset of the MMCI block.
1512                  */
1513                 host->irq_action = IRQ_WAKE_THREAD;
1514         } else {
1515                 cmd->resp[0] = readl(base + MMCIRESPONSE0);
1516                 cmd->resp[1] = readl(base + MMCIRESPONSE1);
1517                 cmd->resp[2] = readl(base + MMCIRESPONSE2);
1518                 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1519         }
1520
1521         if ((!sbc && !cmd->data) || cmd->error) {
1522                 if (host->data) {
1523                         /* Terminate the DMA transfer */
1524                         mmci_dma_error(host);
1525
1526                         mmci_stop_data(host);
1527                         if (host->variant->cmdreg_stop && cmd->error) {
1528                                 mmci_stop_command(host);
1529                                 return;
1530                         }
1531                 }
1532
1533                 if (host->irq_action != IRQ_WAKE_THREAD)
1534                         mmci_request_end(host, host->mrq);
1535
1536         } else if (sbc) {
1537                 mmci_start_command(host, host->mrq->cmd, 0);
1538         } else if (!host->variant->datactrl_first &&
1539                    !(cmd->data->flags & MMC_DATA_READ)) {
1540                 mmci_start_data(host, cmd->data);
1541         }
1542 }
1543
1544 static char *ux500_state_str(struct mmci_host *host)
1545 {
1546         switch (host->busy_state) {
1547         case MMCI_BUSY_WAITING_FOR_START_IRQ:
1548                 return "waiting for start IRQ";
1549         case MMCI_BUSY_WAITING_FOR_END_IRQ:
1550                 return "waiting for end IRQ";
1551         case MMCI_BUSY_DONE:
1552                 return "not waiting for IRQs";
1553         default:
1554                 return "unknown";
1555         }
1556 }
1557
1558 /*
1559  * This busy timeout worker is used to "kick" the command IRQ if a
1560  * busy detect IRQ fails to appear in reasonable time. Only used on
1561  * variants with busy detection IRQ delivery.
1562  */
1563 static void ux500_busy_timeout_work(struct work_struct *work)
1564 {
1565         struct mmci_host *host = container_of(work, struct mmci_host,
1566                                         ux500_busy_timeout_work.work);
1567         unsigned long flags;
1568         u32 status;
1569
1570         spin_lock_irqsave(&host->lock, flags);
1571
1572         if (host->cmd) {
1573                 /* If we are still busy let's tag on a cmd-timeout error. */
1574                 status = readl(host->base + MMCISTATUS);
1575                 if (status & host->variant->busy_detect_flag) {
1576                         status |= MCI_CMDTIMEOUT;
1577                         dev_err(mmc_dev(host->mmc),
1578                                 "timeout in state %s still busy with CMD%02x\n",
1579                                 ux500_state_str(host), host->cmd->opcode);
1580                 } else {
1581                         dev_err(mmc_dev(host->mmc),
1582                                 "timeout in state %s waiting for busy CMD%02x\n",
1583                                 ux500_state_str(host), host->cmd->opcode);
1584                 }
1585
1586                 mmci_cmd_irq(host, host->cmd, status);
1587         }
1588
1589         spin_unlock_irqrestore(&host->lock, flags);
1590 }
1591
1592 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1593 {
1594         return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1595 }
1596
1597 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1598 {
1599         /*
1600          * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1601          * from the fifo range should be used
1602          */
1603         if (status & MCI_RXFIFOHALFFULL)
1604                 return host->variant->fifohalfsize;
1605         else if (status & MCI_RXDATAAVLBL)
1606                 return 4;
1607
1608         return 0;
1609 }
1610
1611 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1612 {
1613         void __iomem *base = host->base;
1614         char *ptr = buffer;
1615         u32 status = readl(host->base + MMCISTATUS);
1616         int host_remain = host->size;
1617
1618         do {
1619                 int count = host->get_rx_fifocnt(host, status, host_remain);
1620
1621                 if (count > remain)
1622                         count = remain;
1623
1624                 if (count <= 0)
1625                         break;
1626
1627                 /*
1628                  * SDIO especially may want to send something that is
1629                  * not divisible by 4 (as opposed to card sectors
1630                  * etc). Therefore make sure to always read the last bytes
1631                  * while only doing full 32-bit reads towards the FIFO.
1632                  */
1633                 if (unlikely(count & 0x3)) {
1634                         if (count < 4) {
1635                                 unsigned char buf[4];
1636                                 ioread32_rep(base + MMCIFIFO, buf, 1);
1637                                 memcpy(ptr, buf, count);
1638                         } else {
1639                                 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1640                                 count &= ~0x3;
1641                         }
1642                 } else {
1643                         ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1644                 }
1645
1646                 ptr += count;
1647                 remain -= count;
1648                 host_remain -= count;
1649
1650                 if (remain == 0)
1651                         break;
1652
1653                 status = readl(base + MMCISTATUS);
1654         } while (status & MCI_RXDATAAVLBL);
1655
1656         return ptr - buffer;
1657 }
1658
1659 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1660 {
1661         struct variant_data *variant = host->variant;
1662         void __iomem *base = host->base;
1663         char *ptr = buffer;
1664
1665         do {
1666                 unsigned int count, maxcnt;
1667
1668                 maxcnt = status & MCI_TXFIFOEMPTY ?
1669                          variant->fifosize : variant->fifohalfsize;
1670                 count = min(remain, maxcnt);
1671
1672                 /*
1673                  * SDIO especially may want to send something that is
1674                  * not divisible by 4 (as opposed to card sectors
1675                  * etc), and the FIFO only accept full 32-bit writes.
1676                  * So compensate by adding +3 on the count, a single
1677                  * byte become a 32bit write, 7 bytes will be two
1678                  * 32bit writes etc.
1679                  */
1680                 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1681
1682                 ptr += count;
1683                 remain -= count;
1684
1685                 if (remain == 0)
1686                         break;
1687
1688                 status = readl(base + MMCISTATUS);
1689         } while (status & MCI_TXFIFOHALFEMPTY);
1690
1691         return ptr - buffer;
1692 }
1693
1694 /*
1695  * PIO data transfer IRQ handler.
1696  */
1697 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1698 {
1699         struct mmci_host *host = dev_id;
1700         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1701         struct variant_data *variant = host->variant;
1702         void __iomem *base = host->base;
1703         u32 status;
1704
1705         status = readl(base + MMCISTATUS);
1706
1707         dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1708
1709         do {
1710                 unsigned int remain, len;
1711                 char *buffer;
1712
1713                 /*
1714                  * For write, we only need to test the half-empty flag
1715                  * here - if the FIFO is completely empty, then by
1716                  * definition it is more than half empty.
1717                  *
1718                  * For read, check for data available.
1719                  */
1720                 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1721                         break;
1722
1723                 if (!sg_miter_next(sg_miter))
1724                         break;
1725
1726                 buffer = sg_miter->addr;
1727                 remain = sg_miter->length;
1728
1729                 len = 0;
1730                 if (status & MCI_RXACTIVE)
1731                         len = mmci_pio_read(host, buffer, remain);
1732                 if (status & MCI_TXACTIVE)
1733                         len = mmci_pio_write(host, buffer, remain, status);
1734
1735                 sg_miter->consumed = len;
1736
1737                 host->size -= len;
1738                 remain -= len;
1739
1740                 if (remain)
1741                         break;
1742
1743                 status = readl(base + MMCISTATUS);
1744         } while (1);
1745
1746         sg_miter_stop(sg_miter);
1747
1748         /*
1749          * If we have less than the fifo 'half-full' threshold to transfer,
1750          * trigger a PIO interrupt as soon as any data is available.
1751          */
1752         if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1753                 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1754
1755         /*
1756          * If we run out of data, disable the data IRQs; this
1757          * prevents a race where the FIFO becomes empty before
1758          * the chip itself has disabled the data path, and
1759          * stops us racing with our data end IRQ.
1760          */
1761         if (host->size == 0) {
1762                 mmci_set_mask1(host, 0);
1763                 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1764         }
1765
1766         return IRQ_HANDLED;
1767 }
1768
1769 static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable)
1770 {
1771         void __iomem *base = host->base;
1772         u32 mask = readl_relaxed(base + MMCIMASK0);
1773
1774         if (enable)
1775                 writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0);
1776         else
1777                 writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0);
1778 }
1779
1780 static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status)
1781 {
1782         if (status & MCI_ST_SDIOIT) {
1783                 mmci_write_sdio_irq_bit(host, 0);
1784                 sdio_signal_irq(host->mmc);
1785         }
1786 }
1787
1788 /*
1789  * Handle completion of command and data transfers.
1790  */
1791 static irqreturn_t mmci_irq(int irq, void *dev_id)
1792 {
1793         struct mmci_host *host = dev_id;
1794         u32 status;
1795
1796         spin_lock(&host->lock);
1797         host->irq_action = IRQ_HANDLED;
1798
1799         do {
1800                 status = readl(host->base + MMCISTATUS);
1801                 if (!status)
1802                         break;
1803
1804                 if (host->singleirq) {
1805                         if (status & host->mask1_reg)
1806                                 mmci_pio_irq(irq, dev_id);
1807
1808                         status &= ~host->variant->irq_pio_mask;
1809                 }
1810
1811                 /*
1812                  * Busy detection is managed by mmci_cmd_irq(), including to
1813                  * clear the corresponding IRQ.
1814                  */
1815                 status &= readl(host->base + MMCIMASK0);
1816                 if (host->variant->busy_detect)
1817                         writel(status & ~host->variant->busy_detect_mask,
1818                                host->base + MMCICLEAR);
1819                 else
1820                         writel(status, host->base + MMCICLEAR);
1821
1822                 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1823
1824                 if (host->variant->reversed_irq_handling) {
1825                         mmci_data_irq(host, host->data, status);
1826                         mmci_cmd_irq(host, host->cmd, status);
1827                 } else {
1828                         mmci_cmd_irq(host, host->cmd, status);
1829                         mmci_data_irq(host, host->data, status);
1830                 }
1831
1832                 if (host->variant->supports_sdio_irq)
1833                         mmci_signal_sdio_irq(host, status);
1834
1835                 /*
1836                  * Busy detection has been handled by mmci_cmd_irq() above.
1837                  * Clear the status bit to prevent polling in IRQ context.
1838                  */
1839                 if (host->variant->busy_detect_flag)
1840                         status &= ~host->variant->busy_detect_flag;
1841
1842         } while (status);
1843
1844         spin_unlock(&host->lock);
1845
1846         return host->irq_action;
1847 }
1848
1849 /*
1850  * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
1851  *
1852  * A reset is needed for some variants, where a datatimeout for a R1B request
1853  * causes the DPSM to stay busy (non-functional).
1854  */
1855 static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
1856 {
1857         struct mmci_host *host = dev_id;
1858         unsigned long flags;
1859
1860         if (host->rst) {
1861                 reset_control_assert(host->rst);
1862                 udelay(2);
1863                 reset_control_deassert(host->rst);
1864         }
1865
1866         spin_lock_irqsave(&host->lock, flags);
1867         writel(host->clk_reg, host->base + MMCICLOCK);
1868         writel(host->pwr_reg, host->base + MMCIPOWER);
1869         writel(MCI_IRQENABLE | host->variant->start_err,
1870                host->base + MMCIMASK0);
1871
1872         host->irq_action = IRQ_HANDLED;
1873         mmci_request_end(host, host->mrq);
1874         spin_unlock_irqrestore(&host->lock, flags);
1875
1876         return host->irq_action;
1877 }
1878
1879 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1880 {
1881         struct mmci_host *host = mmc_priv(mmc);
1882         unsigned long flags;
1883
1884         WARN_ON(host->mrq != NULL);
1885
1886         mrq->cmd->error = mmci_validate_data(host, mrq->data);
1887         if (mrq->cmd->error) {
1888                 mmc_request_done(mmc, mrq);
1889                 return;
1890         }
1891
1892         spin_lock_irqsave(&host->lock, flags);
1893
1894         host->mrq = mrq;
1895
1896         if (mrq->data)
1897                 mmci_get_next_data(host, mrq->data);
1898
1899         if (mrq->data &&
1900             (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1901                 mmci_start_data(host, mrq->data);
1902
1903         if (mrq->sbc)
1904                 mmci_start_command(host, mrq->sbc, 0);
1905         else
1906                 mmci_start_command(host, mrq->cmd, 0);
1907
1908         spin_unlock_irqrestore(&host->lock, flags);
1909 }
1910
1911 static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
1912 {
1913         struct mmci_host *host = mmc_priv(mmc);
1914         u32 max_busy_timeout = 0;
1915
1916         if (!host->variant->busy_detect)
1917                 return;
1918
1919         if (host->variant->busy_timeout && mmc->actual_clock)
1920                 max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
1921                                                           MSEC_PER_SEC);
1922
1923         mmc->max_busy_timeout = max_busy_timeout;
1924 }
1925
1926 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1927 {
1928         struct mmci_host *host = mmc_priv(mmc);
1929         struct variant_data *variant = host->variant;
1930         u32 pwr = 0;
1931         unsigned long flags;
1932         int ret;
1933
1934         switch (ios->power_mode) {
1935         case MMC_POWER_OFF:
1936                 if (!IS_ERR(mmc->supply.vmmc))
1937                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1938
1939                 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1940                         regulator_disable(mmc->supply.vqmmc);
1941                         host->vqmmc_enabled = false;
1942                 }
1943
1944                 break;
1945         case MMC_POWER_UP:
1946                 if (!IS_ERR(mmc->supply.vmmc))
1947                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1948
1949                 /*
1950                  * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1951                  * and instead uses MCI_PWR_ON so apply whatever value is
1952                  * configured in the variant data.
1953                  */
1954                 pwr |= variant->pwrreg_powerup;
1955
1956                 break;
1957         case MMC_POWER_ON:
1958                 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1959                         ret = regulator_enable(mmc->supply.vqmmc);
1960                         if (ret < 0)
1961                                 dev_err(mmc_dev(mmc),
1962                                         "failed to enable vqmmc regulator\n");
1963                         else
1964                                 host->vqmmc_enabled = true;
1965                 }
1966
1967                 pwr |= MCI_PWR_ON;
1968                 break;
1969         }
1970
1971         if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1972                 /*
1973                  * The ST Micro variant has some additional bits
1974                  * indicating signal direction for the signals in
1975                  * the SD/MMC bus and feedback-clock usage.
1976                  */
1977                 pwr |= host->pwr_reg_add;
1978
1979                 if (ios->bus_width == MMC_BUS_WIDTH_4)
1980                         pwr &= ~MCI_ST_DATA74DIREN;
1981                 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1982                         pwr &= (~MCI_ST_DATA74DIREN &
1983                                 ~MCI_ST_DATA31DIREN &
1984                                 ~MCI_ST_DATA2DIREN);
1985         }
1986
1987         if (variant->opendrain) {
1988                 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1989                         pwr |= variant->opendrain;
1990         } else {
1991                 /*
1992                  * If the variant cannot configure the pads by its own, then we
1993                  * expect the pinctrl to be able to do that for us
1994                  */
1995                 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1996                         pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1997                 else
1998                         pinctrl_select_default_state(mmc_dev(mmc));
1999         }
2000
2001         /*
2002          * If clock = 0 and the variant requires the MMCIPOWER to be used for
2003          * gating the clock, the MCI_PWR_ON bit is cleared.
2004          */
2005         if (!ios->clock && variant->pwrreg_clkgate)
2006                 pwr &= ~MCI_PWR_ON;
2007
2008         if (host->variant->explicit_mclk_control &&
2009             ios->clock != host->clock_cache) {
2010                 ret = clk_set_rate(host->clk, ios->clock);
2011                 if (ret < 0)
2012                         dev_err(mmc_dev(host->mmc),
2013                                 "Error setting clock rate (%d)\n", ret);
2014                 else
2015                         host->mclk = clk_get_rate(host->clk);
2016         }
2017         host->clock_cache = ios->clock;
2018
2019         spin_lock_irqsave(&host->lock, flags);
2020
2021         if (host->ops && host->ops->set_clkreg)
2022                 host->ops->set_clkreg(host, ios->clock);
2023         else
2024                 mmci_set_clkreg(host, ios->clock);
2025
2026         mmci_set_max_busy_timeout(mmc);
2027
2028         if (host->ops && host->ops->set_pwrreg)
2029                 host->ops->set_pwrreg(host, pwr);
2030         else
2031                 mmci_write_pwrreg(host, pwr);
2032
2033         mmci_reg_delay(host);
2034
2035         spin_unlock_irqrestore(&host->lock, flags);
2036 }
2037
2038 static int mmci_get_cd(struct mmc_host *mmc)
2039 {
2040         struct mmci_host *host = mmc_priv(mmc);
2041         struct mmci_platform_data *plat = host->plat;
2042         unsigned int status = mmc_gpio_get_cd(mmc);
2043
2044         if (status == -ENOSYS) {
2045                 if (!plat->status)
2046                         return 1; /* Assume always present */
2047
2048                 status = plat->status(mmc_dev(host->mmc));
2049         }
2050         return status;
2051 }
2052
2053 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
2054 {
2055         struct mmci_host *host = mmc_priv(mmc);
2056         int ret;
2057
2058         ret = mmc_regulator_set_vqmmc(mmc, ios);
2059
2060         if (!ret && host->ops && host->ops->post_sig_volt_switch)
2061                 ret = host->ops->post_sig_volt_switch(host, ios);
2062         else if (ret)
2063                 ret = 0;
2064
2065         if (ret < 0)
2066                 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
2067
2068         return ret;
2069 }
2070
2071 static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2072 {
2073         struct mmci_host *host = mmc_priv(mmc);
2074         unsigned long flags;
2075
2076         if (enable)
2077                 /* Keep the SDIO mode bit if SDIO irqs are enabled */
2078                 pm_runtime_get_sync(mmc_dev(mmc));
2079
2080         spin_lock_irqsave(&host->lock, flags);
2081         mmci_write_sdio_irq_bit(host, enable);
2082         spin_unlock_irqrestore(&host->lock, flags);
2083
2084         if (!enable) {
2085                 pm_runtime_mark_last_busy(mmc_dev(mmc));
2086                 pm_runtime_put_autosuspend(mmc_dev(mmc));
2087         }
2088 }
2089
2090 static void mmci_ack_sdio_irq(struct mmc_host *mmc)
2091 {
2092         struct mmci_host *host = mmc_priv(mmc);
2093         unsigned long flags;
2094
2095         spin_lock_irqsave(&host->lock, flags);
2096         mmci_write_sdio_irq_bit(host, 1);
2097         spin_unlock_irqrestore(&host->lock, flags);
2098 }
2099
2100 static struct mmc_host_ops mmci_ops = {
2101         .request        = mmci_request,
2102         .pre_req        = mmci_pre_request,
2103         .post_req       = mmci_post_request,
2104         .set_ios        = mmci_set_ios,
2105         .get_ro         = mmc_gpio_get_ro,
2106         .get_cd         = mmci_get_cd,
2107         .start_signal_voltage_switch = mmci_sig_volt_switch,
2108 };
2109
2110 static void mmci_probe_level_translator(struct mmc_host *mmc)
2111 {
2112         struct device *dev = mmc_dev(mmc);
2113         struct mmci_host *host = mmc_priv(mmc);
2114         struct gpio_desc *cmd_gpio;
2115         struct gpio_desc *ck_gpio;
2116         struct gpio_desc *ckin_gpio;
2117         int clk_hi, clk_lo;
2118
2119         /*
2120          * Assume the level translator is present if st,use-ckin is set.
2121          * This is to cater for DTs which do not implement this test.
2122          */
2123         host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
2124
2125         cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
2126         if (IS_ERR(cmd_gpio))
2127                 goto exit_cmd;
2128
2129         ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
2130         if (IS_ERR(ck_gpio))
2131                 goto exit_ck;
2132
2133         ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
2134         if (IS_ERR(ckin_gpio))
2135                 goto exit_ckin;
2136
2137         /* All GPIOs are valid, test whether level translator works */
2138
2139         /* Sample CKIN */
2140         clk_hi = !!gpiod_get_value(ckin_gpio);
2141
2142         /* Set CK low */
2143         gpiod_set_value(ck_gpio, 0);
2144
2145         /* Sample CKIN */
2146         clk_lo = !!gpiod_get_value(ckin_gpio);
2147
2148         /* Tristate all */
2149         gpiod_direction_input(cmd_gpio);
2150         gpiod_direction_input(ck_gpio);
2151
2152         /* Level translator is present if CK signal is propagated to CKIN */
2153         if (!clk_hi || clk_lo) {
2154                 host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
2155                 dev_warn(dev,
2156                          "Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
2157         }
2158
2159         gpiod_put(ckin_gpio);
2160
2161 exit_ckin:
2162         gpiod_put(ck_gpio);
2163 exit_ck:
2164         gpiod_put(cmd_gpio);
2165 exit_cmd:
2166         pinctrl_select_default_state(dev);
2167 }
2168
2169 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
2170 {
2171         struct mmci_host *host = mmc_priv(mmc);
2172         int ret = mmc_of_parse(mmc);
2173
2174         if (ret)
2175                 return ret;
2176
2177         if (of_property_read_bool(np, "st,sig-dir-dat0"))
2178                 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
2179         if (of_property_read_bool(np, "st,sig-dir-dat2"))
2180                 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
2181         if (of_property_read_bool(np, "st,sig-dir-dat31"))
2182                 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
2183         if (of_property_read_bool(np, "st,sig-dir-dat74"))
2184                 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
2185         if (of_property_read_bool(np, "st,sig-dir-cmd"))
2186                 host->pwr_reg_add |= MCI_ST_CMDDIREN;
2187         if (of_property_read_bool(np, "st,sig-pin-fbclk"))
2188                 host->pwr_reg_add |= MCI_ST_FBCLKEN;
2189         if (of_property_read_bool(np, "st,sig-dir"))
2190                 host->pwr_reg_add |= MCI_STM32_DIRPOL;
2191         if (of_property_read_bool(np, "st,neg-edge"))
2192                 host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
2193         if (of_property_read_bool(np, "st,use-ckin"))
2194                 mmci_probe_level_translator(mmc);
2195
2196         if (of_property_read_bool(np, "mmc-cap-mmc-highspeed"))
2197                 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2198         if (of_property_read_bool(np, "mmc-cap-sd-highspeed"))
2199                 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2200
2201         return 0;
2202 }
2203
2204 static int mmci_probe(struct amba_device *dev,
2205         const struct amba_id *id)
2206 {
2207         struct mmci_platform_data *plat = dev->dev.platform_data;
2208         struct device_node *np = dev->dev.of_node;
2209         struct variant_data *variant = id->data;
2210         struct mmci_host *host;
2211         struct mmc_host *mmc;
2212         int ret;
2213
2214         /* Must have platform data or Device Tree. */
2215         if (!plat && !np) {
2216                 dev_err(&dev->dev, "No plat data or DT found\n");
2217                 return -EINVAL;
2218         }
2219
2220         if (!plat) {
2221                 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
2222                 if (!plat)
2223                         return -ENOMEM;
2224         }
2225
2226         mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
2227         if (!mmc)
2228                 return -ENOMEM;
2229
2230         host = mmc_priv(mmc);
2231         host->mmc = mmc;
2232         host->mmc_ops = &mmci_ops;
2233         mmc->ops = &mmci_ops;
2234
2235         ret = mmci_of_parse(np, mmc);
2236         if (ret)
2237                 goto host_free;
2238
2239         /*
2240          * Some variant (STM32) doesn't have opendrain bit, nevertheless
2241          * pins can be set accordingly using pinctrl
2242          */
2243         if (!variant->opendrain) {
2244                 host->pinctrl = devm_pinctrl_get(&dev->dev);
2245                 if (IS_ERR(host->pinctrl)) {
2246                         dev_err(&dev->dev, "failed to get pinctrl");
2247                         ret = PTR_ERR(host->pinctrl);
2248                         goto host_free;
2249                 }
2250
2251                 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
2252                                                             MMCI_PINCTRL_STATE_OPENDRAIN);
2253                 if (IS_ERR(host->pins_opendrain)) {
2254                         dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
2255                         ret = PTR_ERR(host->pins_opendrain);
2256                         goto host_free;
2257                 }
2258         }
2259
2260         host->hw_designer = amba_manf(dev);
2261         host->hw_revision = amba_rev(dev);
2262         dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
2263         dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
2264
2265         host->clk = devm_clk_get(&dev->dev, NULL);
2266         if (IS_ERR(host->clk)) {
2267                 ret = PTR_ERR(host->clk);
2268                 goto host_free;
2269         }
2270
2271         ret = clk_prepare_enable(host->clk);
2272         if (ret)
2273                 goto host_free;
2274
2275         if (variant->qcom_fifo)
2276                 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
2277         else
2278                 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
2279
2280         host->plat = plat;
2281         host->variant = variant;
2282         host->mclk = clk_get_rate(host->clk);
2283         /*
2284          * According to the spec, mclk is max 100 MHz,
2285          * so we try to adjust the clock down to this,
2286          * (if possible).
2287          */
2288         if (host->mclk > variant->f_max) {
2289                 ret = clk_set_rate(host->clk, variant->f_max);
2290                 if (ret < 0)
2291                         goto clk_disable;
2292                 host->mclk = clk_get_rate(host->clk);
2293                 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
2294                         host->mclk);
2295         }
2296
2297         host->phybase = dev->res.start;
2298         host->base = devm_ioremap_resource(&dev->dev, &dev->res);
2299         if (IS_ERR(host->base)) {
2300                 ret = PTR_ERR(host->base);
2301                 goto clk_disable;
2302         }
2303
2304         if (variant->init)
2305                 variant->init(host);
2306
2307         /*
2308          * The ARM and ST versions of the block have slightly different
2309          * clock divider equations which means that the minimum divider
2310          * differs too.
2311          * on Qualcomm like controllers get the nearest minimum clock to 100Khz
2312          */
2313         if (variant->st_clkdiv)
2314                 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
2315         else if (variant->stm32_clkdiv)
2316                 mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
2317         else if (variant->explicit_mclk_control)
2318                 mmc->f_min = clk_round_rate(host->clk, 100000);
2319         else
2320                 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
2321         /*
2322          * If no maximum operating frequency is supplied, fall back to use
2323          * the module parameter, which has a (low) default value in case it
2324          * is not specified. Either value must not exceed the clock rate into
2325          * the block, of course.
2326          */
2327         if (mmc->f_max)
2328                 mmc->f_max = variant->explicit_mclk_control ?
2329                                 min(variant->f_max, mmc->f_max) :
2330                                 min(host->mclk, mmc->f_max);
2331         else
2332                 mmc->f_max = variant->explicit_mclk_control ?
2333                                 fmax : min(host->mclk, fmax);
2334
2335
2336         dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
2337
2338         host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
2339         if (IS_ERR(host->rst)) {
2340                 ret = PTR_ERR(host->rst);
2341                 goto clk_disable;
2342         }
2343         ret = reset_control_deassert(host->rst);
2344         if (ret)
2345                 dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
2346
2347         /* Get regulators and the supported OCR mask */
2348         ret = mmc_regulator_get_supply(mmc);
2349         if (ret)
2350                 goto clk_disable;
2351
2352         if (!mmc->ocr_avail)
2353                 mmc->ocr_avail = plat->ocr_mask;
2354         else if (plat->ocr_mask)
2355                 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
2356
2357         /* We support these capabilities. */
2358         mmc->caps |= MMC_CAP_CMD23;
2359
2360         /*
2361          * Enable busy detection.
2362          */
2363         if (variant->busy_detect) {
2364                 mmci_ops.card_busy = mmci_card_busy;
2365                 /*
2366                  * Not all variants have a flag to enable busy detection
2367                  * in the DPSM, but if they do, set it here.
2368                  */
2369                 if (variant->busy_dpsm_flag)
2370                         mmci_write_datactrlreg(host,
2371                                                host->variant->busy_dpsm_flag);
2372                 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
2373         }
2374
2375         if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) {
2376                 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2377
2378                 mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq;
2379                 mmci_ops.ack_sdio_irq   = mmci_ack_sdio_irq;
2380
2381                 mmci_write_datactrlreg(host,
2382                                        host->variant->datactrl_mask_sdio);
2383         }
2384
2385         /* Variants with mandatory busy timeout in HW needs R1B responses. */
2386         if (variant->busy_timeout)
2387                 mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
2388
2389         /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
2390         host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
2391         host->stop_abort.arg = 0;
2392         host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
2393
2394         /* We support these PM capabilities. */
2395         mmc->pm_caps |= MMC_PM_KEEP_POWER;
2396
2397         /*
2398          * We can do SGIO
2399          */
2400         mmc->max_segs = NR_SG;
2401
2402         /*
2403          * Since only a certain number of bits are valid in the data length
2404          * register, we must ensure that we don't exceed 2^num-1 bytes in a
2405          * single request.
2406          */
2407         mmc->max_req_size = (1 << variant->datalength_bits) - 1;
2408
2409         /*
2410          * Set the maximum segment size.  Since we aren't doing DMA
2411          * (yet) we are only limited by the data length register.
2412          */
2413         mmc->max_seg_size = mmc->max_req_size;
2414
2415         /*
2416          * Block size can be up to 2048 bytes, but must be a power of two.
2417          */
2418         mmc->max_blk_size = 1 << variant->datactrl_blocksz;
2419
2420         /*
2421          * Limit the number of blocks transferred so that we don't overflow
2422          * the maximum request size.
2423          */
2424         mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2425
2426         spin_lock_init(&host->lock);
2427
2428         writel(0, host->base + MMCIMASK0);
2429
2430         if (variant->mmcimask1)
2431                 writel(0, host->base + MMCIMASK1);
2432
2433         writel(0xfff, host->base + MMCICLEAR);
2434
2435         /*
2436          * If:
2437          * - not using DT but using a descriptor table, or
2438          * - using a table of descriptors ALONGSIDE DT, or
2439          * look up these descriptors named "cd" and "wp" right here, fail
2440          * silently of these do not exist
2441          */
2442         if (!np) {
2443                 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
2444                 if (ret == -EPROBE_DEFER)
2445                         goto clk_disable;
2446
2447                 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
2448                 if (ret == -EPROBE_DEFER)
2449                         goto clk_disable;
2450         }
2451
2452         ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
2453                                         mmci_irq_thread, IRQF_SHARED,
2454                                         DRIVER_NAME " (cmd)", host);
2455         if (ret)
2456                 goto clk_disable;
2457
2458         if (!dev->irq[1])
2459                 host->singleirq = true;
2460         else {
2461                 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2462                                 IRQF_SHARED, DRIVER_NAME " (pio)", host);
2463                 if (ret)
2464                         goto clk_disable;
2465         }
2466
2467         if (host->variant->busy_detect)
2468                 INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
2469                                   ux500_busy_timeout_work);
2470
2471         writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2472
2473         amba_set_drvdata(dev, mmc);
2474
2475         dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2476                  mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2477                  amba_rev(dev), (unsigned long long)dev->res.start,
2478                  dev->irq[0], dev->irq[1]);
2479
2480         mmci_dma_setup(host);
2481
2482         pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2483         pm_runtime_use_autosuspend(&dev->dev);
2484
2485         ret = mmc_add_host(mmc);
2486         if (ret)
2487                 goto clk_disable;
2488
2489         pm_runtime_put(&dev->dev);
2490         return 0;
2491
2492  clk_disable:
2493         clk_disable_unprepare(host->clk);
2494  host_free:
2495         mmc_free_host(mmc);
2496         return ret;
2497 }
2498
2499 static void mmci_remove(struct amba_device *dev)
2500 {
2501         struct mmc_host *mmc = amba_get_drvdata(dev);
2502
2503         if (mmc) {
2504                 struct mmci_host *host = mmc_priv(mmc);
2505                 struct variant_data *variant = host->variant;
2506
2507                 /*
2508                  * Undo pm_runtime_put() in probe.  We use the _sync
2509                  * version here so that we can access the primecell.
2510                  */
2511                 pm_runtime_get_sync(&dev->dev);
2512
2513                 mmc_remove_host(mmc);
2514
2515                 writel(0, host->base + MMCIMASK0);
2516
2517                 if (variant->mmcimask1)
2518                         writel(0, host->base + MMCIMASK1);
2519
2520                 writel(0, host->base + MMCICOMMAND);
2521                 writel(0, host->base + MMCIDATACTRL);
2522
2523                 mmci_dma_release(host);
2524                 clk_disable_unprepare(host->clk);
2525                 mmc_free_host(mmc);
2526         }
2527 }
2528
2529 #ifdef CONFIG_PM
2530 static void mmci_save(struct mmci_host *host)
2531 {
2532         unsigned long flags;
2533
2534         spin_lock_irqsave(&host->lock, flags);
2535
2536         writel(0, host->base + MMCIMASK0);
2537         if (host->variant->pwrreg_nopower) {
2538                 writel(0, host->base + MMCIDATACTRL);
2539                 writel(0, host->base + MMCIPOWER);
2540                 writel(0, host->base + MMCICLOCK);
2541         }
2542         mmci_reg_delay(host);
2543
2544         spin_unlock_irqrestore(&host->lock, flags);
2545 }
2546
2547 static void mmci_restore(struct mmci_host *host)
2548 {
2549         unsigned long flags;
2550
2551         spin_lock_irqsave(&host->lock, flags);
2552
2553         if (host->variant->pwrreg_nopower) {
2554                 writel(host->clk_reg, host->base + MMCICLOCK);
2555                 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2556                 writel(host->pwr_reg, host->base + MMCIPOWER);
2557         }
2558         writel(MCI_IRQENABLE | host->variant->start_err,
2559                host->base + MMCIMASK0);
2560         mmci_reg_delay(host);
2561
2562         spin_unlock_irqrestore(&host->lock, flags);
2563 }
2564
2565 static int mmci_runtime_suspend(struct device *dev)
2566 {
2567         struct amba_device *adev = to_amba_device(dev);
2568         struct mmc_host *mmc = amba_get_drvdata(adev);
2569
2570         if (mmc) {
2571                 struct mmci_host *host = mmc_priv(mmc);
2572                 pinctrl_pm_select_sleep_state(dev);
2573                 mmci_save(host);
2574                 clk_disable_unprepare(host->clk);
2575         }
2576
2577         return 0;
2578 }
2579
2580 static int mmci_runtime_resume(struct device *dev)
2581 {
2582         struct amba_device *adev = to_amba_device(dev);
2583         struct mmc_host *mmc = amba_get_drvdata(adev);
2584
2585         if (mmc) {
2586                 struct mmci_host *host = mmc_priv(mmc);
2587                 clk_prepare_enable(host->clk);
2588                 mmci_restore(host);
2589                 pinctrl_select_default_state(dev);
2590         }
2591
2592         return 0;
2593 }
2594 #endif
2595
2596 static const struct dev_pm_ops mmci_dev_pm_ops = {
2597         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2598                                 pm_runtime_force_resume)
2599         SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2600 };
2601
2602 static const struct amba_id mmci_ids[] = {
2603         {
2604                 .id     = 0x00041180,
2605                 .mask   = 0xff0fffff,
2606                 .data   = &variant_arm,
2607         },
2608         {
2609                 .id     = 0x01041180,
2610                 .mask   = 0xff0fffff,
2611                 .data   = &variant_arm_extended_fifo,
2612         },
2613         {
2614                 .id     = 0x02041180,
2615                 .mask   = 0xff0fffff,
2616                 .data   = &variant_arm_extended_fifo_hwfc,
2617         },
2618         {
2619                 .id     = 0x00041181,
2620                 .mask   = 0x000fffff,
2621                 .data   = &variant_arm,
2622         },
2623         /* ST Micro variants */
2624         {
2625                 .id     = 0x00180180,
2626                 .mask   = 0x00ffffff,
2627                 .data   = &variant_u300,
2628         },
2629         {
2630                 .id     = 0x10180180,
2631                 .mask   = 0xf0ffffff,
2632                 .data   = &variant_nomadik,
2633         },
2634         {
2635                 .id     = 0x00280180,
2636                 .mask   = 0x00ffffff,
2637                 .data   = &variant_nomadik,
2638         },
2639         {
2640                 .id     = 0x00480180,
2641                 .mask   = 0xf0ffffff,
2642                 .data   = &variant_ux500,
2643         },
2644         {
2645                 .id     = 0x10480180,
2646                 .mask   = 0xf0ffffff,
2647                 .data   = &variant_ux500v2,
2648         },
2649         {
2650                 .id     = 0x00880180,
2651                 .mask   = 0x00ffffff,
2652                 .data   = &variant_stm32,
2653         },
2654         {
2655                 .id     = 0x10153180,
2656                 .mask   = 0xf0ffffff,
2657                 .data   = &variant_stm32_sdmmc,
2658         },
2659         {
2660                 .id     = 0x00253180,
2661                 .mask   = 0xf0ffffff,
2662                 .data   = &variant_stm32_sdmmcv2,
2663         },
2664         {
2665                 .id     = 0x20253180,
2666                 .mask   = 0xf0ffffff,
2667                 .data   = &variant_stm32_sdmmcv2,
2668         },
2669         {
2670                 .id     = 0x00353180,
2671                 .mask   = 0xf0ffffff,
2672                 .data   = &variant_stm32_sdmmcv3,
2673         },
2674         /* Qualcomm variants */
2675         {
2676                 .id     = 0x00051180,
2677                 .mask   = 0x000fffff,
2678                 .data   = &variant_qcom,
2679         },
2680         { 0, 0 },
2681 };
2682
2683 MODULE_DEVICE_TABLE(amba, mmci_ids);
2684
2685 static struct amba_driver mmci_driver = {
2686         .drv            = {
2687                 .name   = DRIVER_NAME,
2688                 .pm     = &mmci_dev_pm_ops,
2689                 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2690         },
2691         .probe          = mmci_probe,
2692         .remove         = mmci_remove,
2693         .id_table       = mmci_ids,
2694 };
2695
2696 module_amba_driver(mmci_driver);
2697
2698 module_param(fmax, uint, 0444);
2699
2700 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2701 MODULE_LICENSE("GPL");