1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/ioport.h>
20 #include <linux/ktime.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/prandom.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
38 #include <linux/mmc/slot-gpio.h>
42 /* Common flag combinations */
43 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
44 SDMMC_INT_HTO | SDMMC_INT_SBE | \
45 SDMMC_INT_EBE | SDMMC_INT_HLE)
46 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
48 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
49 DW_MCI_CMD_ERROR_FLAGS)
50 #define DW_MCI_SEND_STATUS 1
51 #define DW_MCI_RECV_STATUS 2
52 #define DW_MCI_DMA_THRESHOLD 16
54 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
55 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
57 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
58 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
59 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 #define DESC_RING_BUF_SZ PAGE_SIZE
64 struct idmac_desc_64addr {
65 u32 des0; /* Control Descriptor */
66 #define IDMAC_OWN_CLR64(x) \
67 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
69 u32 des1; /* Reserved */
71 u32 des2; /*Buffer sizes */
72 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
73 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
74 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
76 u32 des3; /* Reserved */
78 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
79 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
81 u32 des6; /* Lower 32-bits of Next Descriptor Address */
82 u32 des7; /* Upper 32-bits of Next Descriptor Address */
86 __le32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 __le32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
99 __le32 des2; /* buffer 1 physical address */
101 __le32 des3; /* buffer 2 physical address */
104 /* Each descriptor can transfer up to 4KB of data in chained mode */
105 #define DW_MCI_DESC_DATA_LENGTH 0x1000
107 #if defined(CONFIG_DEBUG_FS)
108 static int dw_mci_req_show(struct seq_file *s, void *v)
110 struct dw_mci_slot *slot = s->private;
111 struct mmc_request *mrq;
112 struct mmc_command *cmd;
113 struct mmc_command *stop;
114 struct mmc_data *data;
116 /* Make sure we get a consistent snapshot */
117 spin_lock_bh(&slot->host->lock);
127 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
128 cmd->opcode, cmd->arg, cmd->flags,
129 cmd->resp[0], cmd->resp[1], cmd->resp[2],
130 cmd->resp[2], cmd->error);
132 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
133 data->bytes_xfered, data->blocks,
134 data->blksz, data->flags, data->error);
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 stop->opcode, stop->arg, stop->flags,
139 stop->resp[0], stop->resp[1], stop->resp[2],
140 stop->resp[2], stop->error);
143 spin_unlock_bh(&slot->host->lock);
147 DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
149 static int dw_mci_regs_show(struct seq_file *s, void *v)
151 struct dw_mci *host = s->private;
153 pm_runtime_get_sync(host->dev);
155 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
156 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
157 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
158 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
159 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
160 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
162 pm_runtime_put_autosuspend(host->dev);
166 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
168 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
170 struct mmc_host *mmc = slot->mmc;
171 struct dw_mci *host = slot->host;
174 root = mmc->debugfs_root;
178 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
179 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
180 debugfs_create_u32("state", S_IRUSR, root, &host->state);
181 debugfs_create_xul("pending_events", S_IRUSR, root,
182 &host->pending_events);
183 debugfs_create_xul("completed_events", S_IRUSR, root,
184 &host->completed_events);
185 #ifdef CONFIG_FAULT_INJECTION
186 fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
189 #endif /* defined(CONFIG_DEBUG_FS) */
191 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
195 ctrl = mci_readl(host, CTRL);
197 mci_writel(host, CTRL, ctrl);
199 /* wait till resets clear */
200 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
202 1, 500 * USEC_PER_MSEC)) {
204 "Timeout resetting block (ctrl reset %#x)\n",
212 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
217 * Databook says that before issuing a new data transfer command
218 * we need to check to see if the card is busy. Data transfer commands
219 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
221 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
224 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
225 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
226 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
228 !(status & SDMMC_STATUS_BUSY),
229 10, 500 * USEC_PER_MSEC))
230 dev_err(host->dev, "Busy; trying anyway\n");
234 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
236 struct dw_mci *host = slot->host;
237 unsigned int cmd_status = 0;
239 mci_writel(host, CMDARG, arg);
240 wmb(); /* drain writebuffer */
241 dw_mci_wait_while_busy(host, cmd);
242 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
244 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
245 !(cmd_status & SDMMC_CMD_START),
246 1, 500 * USEC_PER_MSEC))
247 dev_err(&slot->mmc->class_dev,
248 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
249 cmd, arg, cmd_status);
252 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
254 struct dw_mci_slot *slot = mmc_priv(mmc);
255 struct dw_mci *host = slot->host;
258 cmd->error = -EINPROGRESS;
261 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
262 cmd->opcode == MMC_GO_IDLE_STATE ||
263 cmd->opcode == MMC_GO_INACTIVE_STATE ||
264 (cmd->opcode == SD_IO_RW_DIRECT &&
265 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
266 cmdr |= SDMMC_CMD_STOP;
267 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
268 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
270 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
273 /* Special bit makes CMD11 not die */
274 cmdr |= SDMMC_CMD_VOLT_SWITCH;
276 /* Change state to continue to handle CMD11 weirdness */
277 WARN_ON(slot->host->state != STATE_SENDING_CMD);
278 slot->host->state = STATE_SENDING_CMD11;
281 * We need to disable low power mode (automatic clock stop)
282 * while doing voltage switch so we don't confuse the card,
283 * since stopping the clock is a specific part of the UHS
284 * voltage change dance.
286 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
287 * unconditionally turned back on in dw_mci_setup_bus() if it's
288 * ever called with a non-zero clock. That shouldn't happen
289 * until the voltage change is all done.
291 clk_en_a = mci_readl(host, CLKENA);
292 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
293 mci_writel(host, CLKENA, clk_en_a);
294 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
295 SDMMC_CMD_PRV_DAT_WAIT, 0);
298 if (cmd->flags & MMC_RSP_PRESENT) {
299 /* We expect a response, so set this bit */
300 cmdr |= SDMMC_CMD_RESP_EXP;
301 if (cmd->flags & MMC_RSP_136)
302 cmdr |= SDMMC_CMD_RESP_LONG;
305 if (cmd->flags & MMC_RSP_CRC)
306 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (cmd->data->flags & MMC_DATA_WRITE)
311 cmdr |= SDMMC_CMD_DAT_WR;
314 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
315 cmdr |= SDMMC_CMD_USE_HOLD_REG;
320 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
322 struct mmc_command *stop;
328 stop = &host->stop_abort;
330 memset(stop, 0, sizeof(struct mmc_command));
332 if (cmdr == MMC_READ_SINGLE_BLOCK ||
333 cmdr == MMC_READ_MULTIPLE_BLOCK ||
334 cmdr == MMC_WRITE_BLOCK ||
335 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
336 mmc_op_tuning(cmdr) ||
337 cmdr == MMC_GEN_CMD) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
353 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
354 cmdr |= SDMMC_CMD_USE_HOLD_REG;
359 static inline void dw_mci_set_cto(struct dw_mci *host)
361 unsigned int cto_clks;
362 unsigned int cto_div;
364 unsigned long irqflags;
366 cto_clks = mci_readl(host, TMOUT) & 0xff;
367 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
371 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
374 /* add a bit spare time */
378 * The durations we're working with are fairly short so we have to be
379 * extra careful about synchronization here. Specifically in hardware a
380 * command timeout is _at most_ 5.1 ms, so that means we expect an
381 * interrupt (either command done or timeout) to come rather quickly
382 * after the mci_writel. ...but just in case we have a long interrupt
383 * latency let's add a bit of paranoia.
385 * In general we'll assume that at least an interrupt will be asserted
386 * in hardware by the time the cto_timer runs. ...and if it hasn't
387 * been asserted in hardware by that time then we'll assume it'll never
390 spin_lock_irqsave(&host->irq_lock, irqflags);
391 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
392 mod_timer(&host->cto_timer,
393 jiffies + msecs_to_jiffies(cto_ms) + 1);
394 spin_unlock_irqrestore(&host->irq_lock, irqflags);
397 static void dw_mci_start_command(struct dw_mci *host,
398 struct mmc_command *cmd, u32 cmd_flags)
402 "start command: ARGR=0x%08x CMDR=0x%08x\n",
403 cmd->arg, cmd_flags);
405 mci_writel(host, CMDARG, cmd->arg);
406 wmb(); /* drain writebuffer */
407 dw_mci_wait_while_busy(host, cmd_flags);
409 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
411 /* response expected command only */
412 if (cmd_flags & SDMMC_CMD_RESP_EXP)
413 dw_mci_set_cto(host);
416 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
418 struct mmc_command *stop = &host->stop_abort;
420 dw_mci_start_command(host, stop, host->stop_cmdr);
423 /* DMA interface functions */
424 static void dw_mci_stop_dma(struct dw_mci *host)
426 if (host->using_dma) {
427 host->dma_ops->stop(host);
428 host->dma_ops->cleanup(host);
431 /* Data transfer was stopped by the interrupt handler */
432 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
435 static void dw_mci_dma_cleanup(struct dw_mci *host)
437 struct mmc_data *data = host->data;
439 if (data && data->host_cookie == COOKIE_MAPPED) {
440 dma_unmap_sg(host->dev,
443 mmc_get_dma_dir(data));
444 data->host_cookie = COOKIE_UNMAPPED;
448 static void dw_mci_idmac_reset(struct dw_mci *host)
450 u32 bmod = mci_readl(host, BMOD);
451 /* Software reset of DMA */
452 bmod |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, bmod);
456 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
460 /* Disable and reset the IDMAC interface */
461 temp = mci_readl(host, CTRL);
462 temp &= ~SDMMC_CTRL_USE_IDMAC;
463 temp |= SDMMC_CTRL_DMA_RESET;
464 mci_writel(host, CTRL, temp);
466 /* Stop the IDMAC running */
467 temp = mci_readl(host, BMOD);
468 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
469 temp |= SDMMC_IDMAC_SWRESET;
470 mci_writel(host, BMOD, temp);
473 static void dw_mci_dmac_complete_dma(void *arg)
475 struct dw_mci *host = arg;
476 struct mmc_data *data = host->data;
478 dev_vdbg(host->dev, "DMA complete\n");
480 if ((host->use_dma == TRANS_MODE_EDMAC) &&
481 data && (data->flags & MMC_DATA_READ))
482 /* Invalidate cache after read */
483 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
488 host->dma_ops->cleanup(host);
491 * If the card was removed, data will be NULL. No point in trying to
492 * send the stop command or waiting for NBUSY in this case.
495 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
496 tasklet_schedule(&host->tasklet);
500 static int dw_mci_idmac_init(struct dw_mci *host)
504 if (host->dma_64bit_address == 1) {
505 struct idmac_desc_64addr *p;
506 /* Number of descriptors in the ring buffer */
508 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
510 /* Forward link the descriptor list */
511 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
513 p->des6 = (host->sg_dma +
514 (sizeof(struct idmac_desc_64addr) *
515 (i + 1))) & 0xffffffff;
517 p->des7 = (u64)(host->sg_dma +
518 (sizeof(struct idmac_desc_64addr) *
520 /* Initialize reserved and buffer size fields to "0" */
527 /* Set the last descriptor as the end-of-ring descriptor */
528 p->des6 = host->sg_dma & 0xffffffff;
529 p->des7 = (u64)host->sg_dma >> 32;
530 p->des0 = IDMAC_DES0_ER;
533 struct idmac_desc *p;
534 /* Number of descriptors in the ring buffer */
536 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
538 /* Forward link the descriptor list */
539 for (i = 0, p = host->sg_cpu;
540 i < host->ring_size - 1;
542 p->des3 = cpu_to_le32(host->sg_dma +
543 (sizeof(struct idmac_desc) * (i + 1)));
548 /* Set the last descriptor as the end-of-ring descriptor */
549 p->des3 = cpu_to_le32(host->sg_dma);
550 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
553 dw_mci_idmac_reset(host);
555 if (host->dma_64bit_address == 1) {
556 /* Mask out interrupts - get Tx & Rx complete only */
557 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
558 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
559 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
561 /* Set the descriptor base address */
562 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
563 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
566 /* Mask out interrupts - get Tx & Rx complete only */
567 mci_writel(host, IDSTS, IDMAC_INT_CLR);
568 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
569 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
571 /* Set the descriptor base address */
572 mci_writel(host, DBADDR, host->sg_dma);
578 static inline int dw_mci_prepare_desc64(struct dw_mci *host,
579 struct mmc_data *data,
582 unsigned int desc_len;
583 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
587 desc_first = desc_last = desc = host->sg_cpu;
589 for (i = 0; i < sg_len; i++) {
590 unsigned int length = sg_dma_len(&data->sg[i]);
592 u64 mem_addr = sg_dma_address(&data->sg[i]);
594 for ( ; length ; desc++) {
595 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
596 length : DW_MCI_DESC_DATA_LENGTH;
601 * Wait for the former clear OWN bit operation
602 * of IDMAC to make sure that this descriptor
603 * isn't still owned by IDMAC as IDMAC's write
604 * ops and CPU's read ops are asynchronous.
606 if (readl_poll_timeout_atomic(&desc->des0, val,
607 !(val & IDMAC_DES0_OWN),
608 10, 100 * USEC_PER_MSEC))
612 * Set the OWN bit and disable interrupts
613 * for this descriptor
615 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
619 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
621 /* Physical address to DMA to/from */
622 desc->des4 = mem_addr & 0xffffffff;
623 desc->des5 = mem_addr >> 32;
625 /* Update physical address for the next desc */
626 mem_addr += desc_len;
628 /* Save pointer to the last descriptor */
633 /* Set first descriptor */
634 desc_first->des0 |= IDMAC_DES0_FD;
636 /* Set last descriptor */
637 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
638 desc_last->des0 |= IDMAC_DES0_LD;
642 /* restore the descriptor chain as it's polluted */
643 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
644 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
645 dw_mci_idmac_init(host);
650 static inline int dw_mci_prepare_desc32(struct dw_mci *host,
651 struct mmc_data *data,
654 unsigned int desc_len;
655 struct idmac_desc *desc_first, *desc_last, *desc;
659 desc_first = desc_last = desc = host->sg_cpu;
661 for (i = 0; i < sg_len; i++) {
662 unsigned int length = sg_dma_len(&data->sg[i]);
664 u32 mem_addr = sg_dma_address(&data->sg[i]);
666 for ( ; length ; desc++) {
667 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
668 length : DW_MCI_DESC_DATA_LENGTH;
673 * Wait for the former clear OWN bit operation
674 * of IDMAC to make sure that this descriptor
675 * isn't still owned by IDMAC as IDMAC's write
676 * ops and CPU's read ops are asynchronous.
678 if (readl_poll_timeout_atomic(&desc->des0, val,
679 IDMAC_OWN_CLR64(val),
681 100 * USEC_PER_MSEC))
685 * Set the OWN bit and disable interrupts
686 * for this descriptor
688 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
693 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
695 /* Physical address to DMA to/from */
696 desc->des2 = cpu_to_le32(mem_addr);
698 /* Update physical address for the next desc */
699 mem_addr += desc_len;
701 /* Save pointer to the last descriptor */
706 /* Set first descriptor */
707 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
709 /* Set last descriptor */
710 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
712 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
716 /* restore the descriptor chain as it's polluted */
717 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
718 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
719 dw_mci_idmac_init(host);
723 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
728 if (host->dma_64bit_address == 1)
729 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
731 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
736 /* drain writebuffer */
739 /* Make sure to reset DMA in case we did PIO before this */
740 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
741 dw_mci_idmac_reset(host);
743 /* Select IDMAC interface */
744 temp = mci_readl(host, CTRL);
745 temp |= SDMMC_CTRL_USE_IDMAC;
746 mci_writel(host, CTRL, temp);
748 /* drain writebuffer */
751 /* Enable the IDMAC */
752 temp = mci_readl(host, BMOD);
753 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
754 mci_writel(host, BMOD, temp);
756 /* Start it running */
757 mci_writel(host, PLDMND, 1);
763 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
764 .init = dw_mci_idmac_init,
765 .start = dw_mci_idmac_start_dma,
766 .stop = dw_mci_idmac_stop_dma,
767 .complete = dw_mci_dmac_complete_dma,
768 .cleanup = dw_mci_dma_cleanup,
771 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
773 dmaengine_terminate_async(host->dms->ch);
776 static int dw_mci_edmac_start_dma(struct dw_mci *host,
779 struct dma_slave_config cfg;
780 struct dma_async_tx_descriptor *desc = NULL;
781 struct scatterlist *sgl = host->data->sg;
782 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
783 u32 sg_elems = host->data->sg_len;
785 u32 fifo_offset = host->fifo_reg - host->regs;
788 /* Set external dma config: burst size, burst width */
789 memset(&cfg, 0, sizeof(cfg));
790 cfg.dst_addr = host->phy_regs + fifo_offset;
791 cfg.src_addr = cfg.dst_addr;
792 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
793 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
795 /* Match burst msize with external dma config */
796 fifoth_val = mci_readl(host, FIFOTH);
797 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
798 cfg.src_maxburst = cfg.dst_maxburst;
800 if (host->data->flags & MMC_DATA_WRITE)
801 cfg.direction = DMA_MEM_TO_DEV;
803 cfg.direction = DMA_DEV_TO_MEM;
805 ret = dmaengine_slave_config(host->dms->ch, &cfg);
807 dev_err(host->dev, "Failed to config edmac.\n");
811 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
812 sg_len, cfg.direction,
813 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
815 dev_err(host->dev, "Can't prepare slave sg.\n");
819 /* Set dw_mci_dmac_complete_dma as callback */
820 desc->callback = dw_mci_dmac_complete_dma;
821 desc->callback_param = (void *)host;
822 dmaengine_submit(desc);
824 /* Flush cache before write */
825 if (host->data->flags & MMC_DATA_WRITE)
826 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
827 sg_elems, DMA_TO_DEVICE);
829 dma_async_issue_pending(host->dms->ch);
834 static int dw_mci_edmac_init(struct dw_mci *host)
836 /* Request external dma channel */
837 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
841 host->dms->ch = dma_request_chan(host->dev, "rx-tx");
842 if (IS_ERR(host->dms->ch)) {
843 int ret = PTR_ERR(host->dms->ch);
845 dev_err(host->dev, "Failed to get external DMA channel.\n");
854 static void dw_mci_edmac_exit(struct dw_mci *host)
858 dma_release_channel(host->dms->ch);
859 host->dms->ch = NULL;
866 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
867 .init = dw_mci_edmac_init,
868 .exit = dw_mci_edmac_exit,
869 .start = dw_mci_edmac_start_dma,
870 .stop = dw_mci_edmac_stop_dma,
871 .complete = dw_mci_dmac_complete_dma,
872 .cleanup = dw_mci_dma_cleanup,
875 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
876 struct mmc_data *data,
879 struct scatterlist *sg;
880 unsigned int i, sg_len;
882 if (data->host_cookie == COOKIE_PRE_MAPPED)
886 * We don't do DMA on "complex" transfers, i.e. with
887 * non-word-aligned buffers or lengths. Also, we don't bother
888 * with all the DMA setup overhead for short transfers.
890 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
896 for_each_sg(data->sg, sg, data->sg_len, i) {
897 if (sg->offset & 3 || sg->length & 3)
901 sg_len = dma_map_sg(host->dev,
904 mmc_get_dma_dir(data));
908 data->host_cookie = cookie;
913 static void dw_mci_pre_req(struct mmc_host *mmc,
914 struct mmc_request *mrq)
916 struct dw_mci_slot *slot = mmc_priv(mmc);
917 struct mmc_data *data = mrq->data;
919 if (!slot->host->use_dma || !data)
922 /* This data might be unmapped at this time */
923 data->host_cookie = COOKIE_UNMAPPED;
925 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
926 COOKIE_PRE_MAPPED) < 0)
927 data->host_cookie = COOKIE_UNMAPPED;
930 static void dw_mci_post_req(struct mmc_host *mmc,
931 struct mmc_request *mrq,
934 struct dw_mci_slot *slot = mmc_priv(mmc);
935 struct mmc_data *data = mrq->data;
937 if (!slot->host->use_dma || !data)
940 if (data->host_cookie != COOKIE_UNMAPPED)
941 dma_unmap_sg(slot->host->dev,
944 mmc_get_dma_dir(data));
945 data->host_cookie = COOKIE_UNMAPPED;
948 static int dw_mci_get_cd(struct mmc_host *mmc)
951 struct dw_mci_slot *slot = mmc_priv(mmc);
952 struct dw_mci *host = slot->host;
953 int gpio_cd = mmc_gpio_get_cd(mmc);
955 /* Use platform get_cd function, else try onboard card detect */
956 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
957 || !mmc_card_is_removable(mmc))) {
960 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
961 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
962 dev_info(&mmc->class_dev,
963 "card is polling.\n");
965 dev_info(&mmc->class_dev,
966 "card is non-removable.\n");
968 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
972 } else if (gpio_cd >= 0)
975 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
978 spin_lock_bh(&host->lock);
979 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
980 dev_dbg(&mmc->class_dev, "card is present\n");
982 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
983 dev_dbg(&mmc->class_dev, "card is not present\n");
984 spin_unlock_bh(&host->lock);
989 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
991 unsigned int blksz = data->blksz;
992 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
993 u32 fifo_width = 1 << host->data_shift;
994 u32 blksz_depth = blksz / fifo_width, fifoth_val;
995 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
996 int idx = ARRAY_SIZE(mszs) - 1;
998 /* pio should ship this scenario */
1002 tx_wmark = (host->fifo_depth) / 2;
1003 tx_wmark_invers = host->fifo_depth - tx_wmark;
1007 * if blksz is not a multiple of the FIFO width
1009 if (blksz % fifo_width)
1013 if (!((blksz_depth % mszs[idx]) ||
1014 (tx_wmark_invers % mszs[idx]))) {
1016 rx_wmark = mszs[idx] - 1;
1019 } while (--idx > 0);
1021 * If idx is '0', it won't be tried
1022 * Thus, initial values are uesed
1025 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1026 mci_writel(host, FIFOTH, fifoth_val);
1029 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1031 unsigned int blksz = data->blksz;
1032 u32 blksz_depth, fifo_depth;
1037 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1038 * in the FIFO region, so we really shouldn't access it).
1040 if (host->verid < DW_MMC_240A ||
1041 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1045 * Card write Threshold is introduced since 2.80a
1046 * It's used when HS400 mode is enabled.
1048 if (data->flags & MMC_DATA_WRITE &&
1049 host->timing != MMC_TIMING_MMC_HS400)
1052 if (data->flags & MMC_DATA_WRITE)
1053 enable = SDMMC_CARD_WR_THR_EN;
1055 enable = SDMMC_CARD_RD_THR_EN;
1057 if (host->timing != MMC_TIMING_MMC_HS200 &&
1058 host->timing != MMC_TIMING_UHS_SDR104 &&
1059 host->timing != MMC_TIMING_MMC_HS400)
1062 blksz_depth = blksz / (1 << host->data_shift);
1063 fifo_depth = host->fifo_depth;
1065 if (blksz_depth > fifo_depth)
1069 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1070 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1071 * Currently just choose blksz.
1074 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1078 mci_writel(host, CDTHRCTL, 0);
1081 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1083 unsigned long irqflags;
1087 host->using_dma = 0;
1089 /* If we don't have a channel, we can't do DMA */
1093 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1095 host->dma_ops->stop(host);
1099 host->using_dma = 1;
1101 if (host->use_dma == TRANS_MODE_IDMAC)
1103 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1104 (unsigned long)host->sg_cpu,
1105 (unsigned long)host->sg_dma,
1109 * Decide the MSIZE and RX/TX Watermark.
1110 * If current block size is same with previous size,
1111 * no need to update fifoth.
1113 if (host->prev_blksz != data->blksz)
1114 dw_mci_adjust_fifoth(host, data);
1116 /* Enable the DMA interface */
1117 temp = mci_readl(host, CTRL);
1118 temp |= SDMMC_CTRL_DMA_ENABLE;
1119 mci_writel(host, CTRL, temp);
1121 /* Disable RX/TX IRQs, let DMA handle it */
1122 spin_lock_irqsave(&host->irq_lock, irqflags);
1123 temp = mci_readl(host, INTMASK);
1124 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1125 mci_writel(host, INTMASK, temp);
1126 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1128 if (host->dma_ops->start(host, sg_len)) {
1129 host->dma_ops->stop(host);
1130 /* We can't do DMA, try PIO for this one */
1132 "%s: fall back to PIO mode for current transfer\n",
1140 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1142 unsigned long irqflags;
1143 int flags = SG_MITER_ATOMIC;
1146 data->error = -EINPROGRESS;
1148 WARN_ON(host->data);
1152 if (data->flags & MMC_DATA_READ)
1153 host->dir_status = DW_MCI_RECV_STATUS;
1155 host->dir_status = DW_MCI_SEND_STATUS;
1157 dw_mci_ctrl_thld(host, data);
1159 if (dw_mci_submit_data_dma(host, data)) {
1160 if (host->data->flags & MMC_DATA_READ)
1161 flags |= SG_MITER_TO_SG;
1163 flags |= SG_MITER_FROM_SG;
1165 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1166 host->sg = data->sg;
1167 host->part_buf_start = 0;
1168 host->part_buf_count = 0;
1170 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1172 spin_lock_irqsave(&host->irq_lock, irqflags);
1173 temp = mci_readl(host, INTMASK);
1174 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1175 mci_writel(host, INTMASK, temp);
1176 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1178 temp = mci_readl(host, CTRL);
1179 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1180 mci_writel(host, CTRL, temp);
1183 * Use the initial fifoth_val for PIO mode. If wm_algined
1184 * is set, we set watermark same as data size.
1185 * If next issued data may be transfered by DMA mode,
1186 * prev_blksz should be invalidated.
1188 if (host->wm_aligned)
1189 dw_mci_adjust_fifoth(host, data);
1191 mci_writel(host, FIFOTH, host->fifoth_val);
1192 host->prev_blksz = 0;
1195 * Keep the current block size.
1196 * It will be used to decide whether to update
1197 * fifoth register next time.
1199 host->prev_blksz = data->blksz;
1203 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1205 struct dw_mci *host = slot->host;
1206 unsigned int clock = slot->clock;
1209 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1211 /* We must continue to set bit 28 in CMD until the change is complete */
1212 if (host->state == STATE_WAITING_CMD11_DONE)
1213 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1215 slot->mmc->actual_clock = 0;
1218 mci_writel(host, CLKENA, 0);
1219 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1220 } else if (clock != host->current_speed || force_clkinit) {
1221 div = host->bus_hz / clock;
1222 if (host->bus_hz % clock && host->bus_hz > clock)
1224 * move the + 1 after the divide to prevent
1225 * over-clocking the card.
1229 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1231 if ((clock != slot->__clk_old &&
1232 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1234 /* Silent the verbose log if calling from PM context */
1236 dev_info(&slot->mmc->class_dev,
1237 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1238 slot->id, host->bus_hz, clock,
1239 div ? ((host->bus_hz / div) >> 1) :
1243 * If card is polling, display the message only
1244 * one time at boot time.
1246 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1247 slot->mmc->f_min == clock)
1248 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1252 mci_writel(host, CLKENA, 0);
1253 mci_writel(host, CLKSRC, 0);
1256 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1258 /* set clock to desired speed */
1259 mci_writel(host, CLKDIV, div);
1262 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1264 /* enable clock; only low power if no SDIO */
1265 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1266 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1267 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1268 mci_writel(host, CLKENA, clk_en_a);
1271 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1273 /* keep the last clock value that was requested from core */
1274 slot->__clk_old = clock;
1275 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1279 host->current_speed = clock;
1281 /* Set the current slot bus width */
1282 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1285 static void dw_mci_set_data_timeout(struct dw_mci *host,
1286 unsigned int timeout_ns)
1288 const struct dw_mci_drv_data *drv_data = host->drv_data;
1292 if (drv_data && drv_data->set_data_timeout)
1293 return drv_data->set_data_timeout(host, timeout_ns);
1295 clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
1299 tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
1300 tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
1302 /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
1303 tmout = 0xFF; /* Set maximum */
1305 /* TMOUT[31:8] (DATA_TIMEOUT) */
1306 if (!tmp || tmp > 0xFFFFFF)
1307 tmout |= (0xFFFFFF << 8);
1309 tmout |= (tmp & 0xFFFFFF) << 8;
1311 mci_writel(host, TMOUT, tmout);
1312 dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
1313 timeout_ns, tmout >> 8);
1316 static void __dw_mci_start_request(struct dw_mci *host,
1317 struct dw_mci_slot *slot,
1318 struct mmc_command *cmd)
1320 struct mmc_request *mrq;
1321 struct mmc_data *data;
1328 host->pending_events = 0;
1329 host->completed_events = 0;
1330 host->cmd_status = 0;
1331 host->data_status = 0;
1332 host->dir_status = 0;
1336 dw_mci_set_data_timeout(host, data->timeout_ns);
1337 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1338 mci_writel(host, BLKSIZ, data->blksz);
1341 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1343 /* this is the first command, send the initialization clock */
1344 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1345 cmdflags |= SDMMC_CMD_INIT;
1348 dw_mci_submit_data(host, data);
1349 wmb(); /* drain writebuffer */
1352 dw_mci_start_command(host, cmd, cmdflags);
1354 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1355 unsigned long irqflags;
1358 * Databook says to fail after 2ms w/ no response, but evidence
1359 * shows that sometimes the cmd11 interrupt takes over 130ms.
1360 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1361 * is just about to roll over.
1363 * We do this whole thing under spinlock and only if the
1364 * command hasn't already completed (indicating the irq
1365 * already ran so we don't want the timeout).
1367 spin_lock_irqsave(&host->irq_lock, irqflags);
1368 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1369 mod_timer(&host->cmd11_timer,
1370 jiffies + msecs_to_jiffies(500) + 1);
1371 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1374 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1377 static void dw_mci_start_request(struct dw_mci *host,
1378 struct dw_mci_slot *slot)
1380 struct mmc_request *mrq = slot->mrq;
1381 struct mmc_command *cmd;
1383 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1384 __dw_mci_start_request(host, slot, cmd);
1387 /* must be called with host->lock held */
1388 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1389 struct mmc_request *mrq)
1391 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1396 if (host->state == STATE_WAITING_CMD11_DONE) {
1397 dev_warn(&slot->mmc->class_dev,
1398 "Voltage change didn't complete\n");
1400 * this case isn't expected to happen, so we can
1401 * either crash here or just try to continue on
1402 * in the closest possible state
1404 host->state = STATE_IDLE;
1407 if (host->state == STATE_IDLE) {
1408 host->state = STATE_SENDING_CMD;
1409 dw_mci_start_request(host, slot);
1411 list_add_tail(&slot->queue_node, &host->queue);
1415 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1417 struct dw_mci_slot *slot = mmc_priv(mmc);
1418 struct dw_mci *host = slot->host;
1423 * The check for card presence and queueing of the request must be
1424 * atomic, otherwise the card could be removed in between and the
1425 * request wouldn't fail until another card was inserted.
1428 if (!dw_mci_get_cd(mmc)) {
1429 mrq->cmd->error = -ENOMEDIUM;
1430 mmc_request_done(mmc, mrq);
1434 spin_lock_bh(&host->lock);
1436 dw_mci_queue_request(host, slot, mrq);
1438 spin_unlock_bh(&host->lock);
1441 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1443 struct dw_mci_slot *slot = mmc_priv(mmc);
1444 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1448 switch (ios->bus_width) {
1449 case MMC_BUS_WIDTH_4:
1450 slot->ctype = SDMMC_CTYPE_4BIT;
1452 case MMC_BUS_WIDTH_8:
1453 slot->ctype = SDMMC_CTYPE_8BIT;
1456 /* set default 1 bit mode */
1457 slot->ctype = SDMMC_CTYPE_1BIT;
1460 regs = mci_readl(slot->host, UHS_REG);
1463 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1464 ios->timing == MMC_TIMING_UHS_DDR50 ||
1465 ios->timing == MMC_TIMING_MMC_HS400)
1466 regs |= ((0x1 << slot->id) << 16);
1468 regs &= ~((0x1 << slot->id) << 16);
1470 mci_writel(slot->host, UHS_REG, regs);
1471 slot->host->timing = ios->timing;
1474 * Use mirror of ios->clock to prevent race with mmc
1475 * core ios update when finding the minimum.
1477 slot->clock = ios->clock;
1479 if (drv_data && drv_data->set_ios)
1480 drv_data->set_ios(slot->host, ios);
1482 switch (ios->power_mode) {
1484 if (!IS_ERR(mmc->supply.vmmc)) {
1485 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1488 dev_err(slot->host->dev,
1489 "failed to enable vmmc regulator\n");
1490 /*return, if failed turn on vmmc*/
1494 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1495 regs = mci_readl(slot->host, PWREN);
1496 regs |= (1 << slot->id);
1497 mci_writel(slot->host, PWREN, regs);
1500 if (!slot->host->vqmmc_enabled) {
1501 if (!IS_ERR(mmc->supply.vqmmc)) {
1502 ret = regulator_enable(mmc->supply.vqmmc);
1504 dev_err(slot->host->dev,
1505 "failed to enable vqmmc\n");
1507 slot->host->vqmmc_enabled = true;
1510 /* Keep track so we don't reset again */
1511 slot->host->vqmmc_enabled = true;
1514 /* Reset our state machine after powering on */
1515 dw_mci_ctrl_reset(slot->host,
1516 SDMMC_CTRL_ALL_RESET_FLAGS);
1519 /* Adjust clock / bus width after power is up */
1520 dw_mci_setup_bus(slot, false);
1524 /* Turn clock off before power goes down */
1525 dw_mci_setup_bus(slot, false);
1527 if (!IS_ERR(mmc->supply.vmmc))
1528 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1530 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1531 regulator_disable(mmc->supply.vqmmc);
1532 slot->host->vqmmc_enabled = false;
1534 regs = mci_readl(slot->host, PWREN);
1535 regs &= ~(1 << slot->id);
1536 mci_writel(slot->host, PWREN, regs);
1542 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1543 slot->host->state = STATE_IDLE;
1546 static int dw_mci_card_busy(struct mmc_host *mmc)
1548 struct dw_mci_slot *slot = mmc_priv(mmc);
1552 * Check the busy bit which is low when DAT[3:0]
1553 * (the data lines) are 0000
1555 status = mci_readl(slot->host, STATUS);
1557 return !!(status & SDMMC_STATUS_BUSY);
1560 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1562 struct dw_mci_slot *slot = mmc_priv(mmc);
1563 struct dw_mci *host = slot->host;
1564 const struct dw_mci_drv_data *drv_data = host->drv_data;
1566 u32 v18 = SDMMC_UHS_18V << slot->id;
1569 if (drv_data && drv_data->switch_voltage)
1570 return drv_data->switch_voltage(mmc, ios);
1573 * Program the voltage. Note that some instances of dw_mmc may use
1574 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1575 * does no harm but you need to set the regulator directly. Try both.
1577 uhs = mci_readl(host, UHS_REG);
1578 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1583 if (!IS_ERR(mmc->supply.vqmmc)) {
1584 ret = mmc_regulator_set_vqmmc(mmc, ios);
1586 dev_dbg(&mmc->class_dev,
1587 "Regulator set error %d - %s V\n",
1588 ret, uhs & v18 ? "1.8" : "3.3");
1592 mci_writel(host, UHS_REG, uhs);
1597 static int dw_mci_get_ro(struct mmc_host *mmc)
1600 struct dw_mci_slot *slot = mmc_priv(mmc);
1601 int gpio_ro = mmc_gpio_get_ro(mmc);
1603 /* Use platform get_ro function, else try on board write protect */
1605 read_only = gpio_ro;
1608 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1610 dev_dbg(&mmc->class_dev, "card is %s\n",
1611 read_only ? "read-only" : "read-write");
1616 static void dw_mci_hw_reset(struct mmc_host *mmc)
1618 struct dw_mci_slot *slot = mmc_priv(mmc);
1619 struct dw_mci *host = slot->host;
1622 if (host->use_dma == TRANS_MODE_IDMAC)
1623 dw_mci_idmac_reset(host);
1625 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1626 SDMMC_CTRL_FIFO_RESET))
1630 * According to eMMC spec, card reset procedure:
1631 * tRstW >= 1us: RST_n pulse width
1632 * tRSCA >= 200us: RST_n to Command time
1633 * tRSTH >= 1us: RST_n high period
1635 reset = mci_readl(host, RST_N);
1636 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1637 mci_writel(host, RST_N, reset);
1639 reset |= SDMMC_RST_HWACTIVE << slot->id;
1640 mci_writel(host, RST_N, reset);
1641 usleep_range(200, 300);
1644 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
1646 struct dw_mci *host = slot->host;
1647 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1652 * Low power mode will stop the card clock when idle. According to the
1653 * description of the CLKENA register we should disable low power mode
1654 * for SDIO cards if we need SDIO interrupts to work.
1657 clk_en_a_old = mci_readl(host, CLKENA);
1659 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1660 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1662 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1663 clk_en_a = clk_en_a_old | clken_low_pwr;
1666 if (clk_en_a != clk_en_a_old) {
1667 mci_writel(host, CLKENA, clk_en_a);
1668 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
1673 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1675 struct dw_mci *host = slot->host;
1676 unsigned long irqflags;
1679 spin_lock_irqsave(&host->irq_lock, irqflags);
1681 /* Enable/disable Slot Specific SDIO interrupt */
1682 int_mask = mci_readl(host, INTMASK);
1684 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1686 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1687 mci_writel(host, INTMASK, int_mask);
1689 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1692 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1694 struct dw_mci_slot *slot = mmc_priv(mmc);
1695 struct dw_mci *host = slot->host;
1697 dw_mci_prepare_sdio_irq(slot, enb);
1698 __dw_mci_enable_sdio_irq(slot, enb);
1700 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1702 pm_runtime_get_noresume(host->dev);
1704 pm_runtime_put_noidle(host->dev);
1707 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1709 struct dw_mci_slot *slot = mmc_priv(mmc);
1711 __dw_mci_enable_sdio_irq(slot, 1);
1714 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1716 struct dw_mci_slot *slot = mmc_priv(mmc);
1717 struct dw_mci *host = slot->host;
1718 const struct dw_mci_drv_data *drv_data = host->drv_data;
1721 if (drv_data && drv_data->execute_tuning)
1722 err = drv_data->execute_tuning(slot, opcode);
1726 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1727 struct mmc_ios *ios)
1729 struct dw_mci_slot *slot = mmc_priv(mmc);
1730 struct dw_mci *host = slot->host;
1731 const struct dw_mci_drv_data *drv_data = host->drv_data;
1733 if (drv_data && drv_data->prepare_hs400_tuning)
1734 return drv_data->prepare_hs400_tuning(host, ios);
1739 static bool dw_mci_reset(struct dw_mci *host)
1741 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1746 * Resetting generates a block interrupt, hence setting
1747 * the scatter-gather pointer to NULL.
1750 sg_miter_stop(&host->sg_miter);
1755 flags |= SDMMC_CTRL_DMA_RESET;
1757 if (dw_mci_ctrl_reset(host, flags)) {
1759 * In all cases we clear the RAWINTS
1760 * register to clear any interrupts.
1762 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1764 if (!host->use_dma) {
1769 /* Wait for dma_req to be cleared */
1770 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1772 !(status & SDMMC_STATUS_DMA_REQ),
1773 1, 500 * USEC_PER_MSEC)) {
1775 "%s: Timeout waiting for dma_req to be cleared\n",
1780 /* when using DMA next we reset the fifo again */
1781 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1784 /* if the controller reset bit did clear, then set clock regs */
1785 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1787 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1793 if (host->use_dma == TRANS_MODE_IDMAC)
1794 /* It is also required that we reinit idmac */
1795 dw_mci_idmac_init(host);
1800 /* After a CTRL reset we need to have CIU set clock registers */
1801 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1806 static const struct mmc_host_ops dw_mci_ops = {
1807 .request = dw_mci_request,
1808 .pre_req = dw_mci_pre_req,
1809 .post_req = dw_mci_post_req,
1810 .set_ios = dw_mci_set_ios,
1811 .get_ro = dw_mci_get_ro,
1812 .get_cd = dw_mci_get_cd,
1813 .card_hw_reset = dw_mci_hw_reset,
1814 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1815 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1816 .execute_tuning = dw_mci_execute_tuning,
1817 .card_busy = dw_mci_card_busy,
1818 .start_signal_voltage_switch = dw_mci_switch_voltage,
1819 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1822 #ifdef CONFIG_FAULT_INJECTION
1823 static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
1825 struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
1826 unsigned long flags;
1828 spin_lock_irqsave(&host->irq_lock, flags);
1831 * Only inject an error if we haven't already got an error or data over
1834 if (!host->data_status) {
1835 host->data_status = SDMMC_INT_DCRC;
1836 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1837 tasklet_schedule(&host->tasklet);
1840 spin_unlock_irqrestore(&host->irq_lock, flags);
1842 return HRTIMER_NORESTART;
1845 static void dw_mci_start_fault_timer(struct dw_mci *host)
1847 struct mmc_data *data = host->data;
1849 if (!data || data->blocks <= 1)
1852 if (!should_fail(&host->fail_data_crc, 1))
1856 * Try to inject the error at random points during the data transfer.
1858 hrtimer_start(&host->fault_timer,
1859 ms_to_ktime(get_random_u32_below(25)),
1863 static void dw_mci_stop_fault_timer(struct dw_mci *host)
1865 hrtimer_cancel(&host->fault_timer);
1868 static void dw_mci_init_fault(struct dw_mci *host)
1870 host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
1872 hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1873 host->fault_timer.function = dw_mci_fault_timer;
1876 static void dw_mci_init_fault(struct dw_mci *host)
1880 static void dw_mci_start_fault_timer(struct dw_mci *host)
1884 static void dw_mci_stop_fault_timer(struct dw_mci *host)
1889 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1890 __releases(&host->lock)
1891 __acquires(&host->lock)
1893 struct dw_mci_slot *slot;
1894 struct mmc_host *prev_mmc = host->slot->mmc;
1896 WARN_ON(host->cmd || host->data);
1898 host->slot->mrq = NULL;
1900 if (!list_empty(&host->queue)) {
1901 slot = list_entry(host->queue.next,
1902 struct dw_mci_slot, queue_node);
1903 list_del(&slot->queue_node);
1904 dev_vdbg(host->dev, "list not empty: %s is next\n",
1905 mmc_hostname(slot->mmc));
1906 host->state = STATE_SENDING_CMD;
1907 dw_mci_start_request(host, slot);
1909 dev_vdbg(host->dev, "list empty\n");
1911 if (host->state == STATE_SENDING_CMD11)
1912 host->state = STATE_WAITING_CMD11_DONE;
1914 host->state = STATE_IDLE;
1917 spin_unlock(&host->lock);
1918 mmc_request_done(prev_mmc, mrq);
1919 spin_lock(&host->lock);
1922 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1924 u32 status = host->cmd_status;
1926 host->cmd_status = 0;
1928 /* Read the response from the card (up to 16 bytes) */
1929 if (cmd->flags & MMC_RSP_PRESENT) {
1930 if (cmd->flags & MMC_RSP_136) {
1931 cmd->resp[3] = mci_readl(host, RESP0);
1932 cmd->resp[2] = mci_readl(host, RESP1);
1933 cmd->resp[1] = mci_readl(host, RESP2);
1934 cmd->resp[0] = mci_readl(host, RESP3);
1936 cmd->resp[0] = mci_readl(host, RESP0);
1943 if (status & SDMMC_INT_RTO)
1944 cmd->error = -ETIMEDOUT;
1945 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1946 cmd->error = -EILSEQ;
1947 else if (status & SDMMC_INT_RESP_ERR)
1955 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1957 u32 status = host->data_status;
1959 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1960 if (status & SDMMC_INT_DRTO) {
1961 data->error = -ETIMEDOUT;
1962 } else if (status & SDMMC_INT_DCRC) {
1963 data->error = -EILSEQ;
1964 } else if (status & SDMMC_INT_EBE) {
1965 if (host->dir_status ==
1966 DW_MCI_SEND_STATUS) {
1968 * No data CRC status was returned.
1969 * The number of bytes transferred
1970 * will be exaggerated in PIO mode.
1972 data->bytes_xfered = 0;
1973 data->error = -ETIMEDOUT;
1974 } else if (host->dir_status ==
1975 DW_MCI_RECV_STATUS) {
1976 data->error = -EILSEQ;
1979 /* SDMMC_INT_SBE is included */
1980 data->error = -EILSEQ;
1983 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1986 * After an error, there may be data lingering
1991 data->bytes_xfered = data->blocks * data->blksz;
1998 static void dw_mci_set_drto(struct dw_mci *host)
2000 const struct dw_mci_drv_data *drv_data = host->drv_data;
2001 unsigned int drto_clks;
2002 unsigned int drto_div;
2003 unsigned int drto_ms;
2004 unsigned long irqflags;
2006 if (drv_data && drv_data->get_drto_clks)
2007 drto_clks = drv_data->get_drto_clks(host);
2009 drto_clks = mci_readl(host, TMOUT) >> 8;
2010 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
2014 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
2017 dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
2019 /* add a bit spare time */
2022 spin_lock_irqsave(&host->irq_lock, irqflags);
2023 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2024 mod_timer(&host->dto_timer,
2025 jiffies + msecs_to_jiffies(drto_ms));
2026 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2029 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
2031 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2035 * Really be certain that the timer has stopped. This is a bit of
2036 * paranoia and could only really happen if we had really bad
2037 * interrupt latency and the interrupt routine and timeout were
2038 * running concurrently so that the del_timer() in the interrupt
2039 * handler couldn't run.
2041 WARN_ON(del_timer_sync(&host->cto_timer));
2042 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2047 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2049 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2052 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2053 WARN_ON(del_timer_sync(&host->dto_timer));
2054 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2059 static void dw_mci_tasklet_func(struct tasklet_struct *t)
2061 struct dw_mci *host = from_tasklet(host, t, tasklet);
2062 struct mmc_data *data;
2063 struct mmc_command *cmd;
2064 struct mmc_request *mrq;
2065 enum dw_mci_state state;
2066 enum dw_mci_state prev_state;
2069 spin_lock(&host->lock);
2071 state = host->state;
2080 case STATE_WAITING_CMD11_DONE:
2083 case STATE_SENDING_CMD11:
2084 case STATE_SENDING_CMD:
2085 if (!dw_mci_clear_pending_cmd_complete(host))
2090 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2091 err = dw_mci_command_complete(host, cmd);
2092 if (cmd == mrq->sbc && !err) {
2093 __dw_mci_start_request(host, host->slot,
2098 if (cmd->data && err) {
2100 * During UHS tuning sequence, sending the stop
2101 * command after the response CRC error would
2102 * throw the system into a confused state
2103 * causing all future tuning phases to report
2106 * In such case controller will move into a data
2107 * transfer state after a response error or
2108 * response CRC error. Let's let that finish
2109 * before trying to send a stop, so we'll go to
2110 * STATE_SENDING_DATA.
2112 * Although letting the data transfer take place
2113 * will waste a bit of time (we already know
2114 * the command was bad), it can't cause any
2115 * errors since it's possible it would have
2116 * taken place anyway if this tasklet got
2117 * delayed. Allowing the transfer to take place
2118 * avoids races and keeps things simple.
2120 if (err != -ETIMEDOUT &&
2121 host->dir_status == DW_MCI_RECV_STATUS) {
2122 state = STATE_SENDING_DATA;
2126 send_stop_abort(host, data);
2127 dw_mci_stop_dma(host);
2128 state = STATE_SENDING_STOP;
2132 if (!cmd->data || err) {
2133 dw_mci_request_end(host, mrq);
2137 prev_state = state = STATE_SENDING_DATA;
2140 case STATE_SENDING_DATA:
2142 * We could get a data error and never a transfer
2143 * complete so we'd better check for it here.
2145 * Note that we don't really care if we also got a
2146 * transfer complete; stopping the DMA and sending an
2149 if (test_and_clear_bit(EVENT_DATA_ERROR,
2150 &host->pending_events)) {
2151 if (!(host->data_status & (SDMMC_INT_DRTO |
2153 send_stop_abort(host, data);
2154 dw_mci_stop_dma(host);
2155 state = STATE_DATA_ERROR;
2159 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2160 &host->pending_events)) {
2162 * If all data-related interrupts don't come
2163 * within the given time in reading data state.
2165 if (host->dir_status == DW_MCI_RECV_STATUS)
2166 dw_mci_set_drto(host);
2170 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2173 * Handle an EVENT_DATA_ERROR that might have shown up
2174 * before the transfer completed. This might not have
2175 * been caught by the check above because the interrupt
2176 * could have gone off between the previous check and
2177 * the check for transfer complete.
2179 * Technically this ought not be needed assuming we
2180 * get a DATA_COMPLETE eventually (we'll notice the
2181 * error and end the request), but it shouldn't hurt.
2183 * This has the advantage of sending the stop command.
2185 if (test_and_clear_bit(EVENT_DATA_ERROR,
2186 &host->pending_events)) {
2187 if (!(host->data_status & (SDMMC_INT_DRTO |
2189 send_stop_abort(host, data);
2190 dw_mci_stop_dma(host);
2191 state = STATE_DATA_ERROR;
2194 prev_state = state = STATE_DATA_BUSY;
2198 case STATE_DATA_BUSY:
2199 if (!dw_mci_clear_pending_data_complete(host)) {
2201 * If data error interrupt comes but data over
2202 * interrupt doesn't come within the given time.
2203 * in reading data state.
2205 if (host->dir_status == DW_MCI_RECV_STATUS)
2206 dw_mci_set_drto(host);
2210 dw_mci_stop_fault_timer(host);
2212 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2213 err = dw_mci_data_complete(host, data);
2216 if (!data->stop || mrq->sbc) {
2217 if (mrq->sbc && data->stop)
2218 data->stop->error = 0;
2219 dw_mci_request_end(host, mrq);
2223 /* stop command for open-ended transfer*/
2225 send_stop_abort(host, data);
2228 * If we don't have a command complete now we'll
2229 * never get one since we just reset everything;
2230 * better end the request.
2232 * If we do have a command complete we'll fall
2233 * through to the SENDING_STOP command and
2234 * everything will be peachy keen.
2236 if (!test_bit(EVENT_CMD_COMPLETE,
2237 &host->pending_events)) {
2239 dw_mci_request_end(host, mrq);
2245 * If err has non-zero,
2246 * stop-abort command has been already issued.
2248 prev_state = state = STATE_SENDING_STOP;
2252 case STATE_SENDING_STOP:
2253 if (!dw_mci_clear_pending_cmd_complete(host))
2256 /* CMD error in data command */
2257 if (mrq->cmd->error && mrq->data)
2260 dw_mci_stop_fault_timer(host);
2264 if (!mrq->sbc && mrq->stop)
2265 dw_mci_command_complete(host, mrq->stop);
2267 host->cmd_status = 0;
2269 dw_mci_request_end(host, mrq);
2272 case STATE_DATA_ERROR:
2273 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2274 &host->pending_events))
2277 state = STATE_DATA_BUSY;
2280 } while (state != prev_state);
2282 host->state = state;
2284 spin_unlock(&host->lock);
2288 /* push final bytes to part_buf, only use during push */
2289 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2291 memcpy((void *)&host->part_buf, buf, cnt);
2292 host->part_buf_count = cnt;
2295 /* append bytes to part_buf, only use during push */
2296 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2298 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2299 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2300 host->part_buf_count += cnt;
2304 /* pull first bytes from part_buf, only use during pull */
2305 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2307 cnt = min_t(int, cnt, host->part_buf_count);
2309 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2311 host->part_buf_count -= cnt;
2312 host->part_buf_start += cnt;
2317 /* pull final bytes from the part_buf, assuming it's just been filled */
2318 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2320 memcpy(buf, &host->part_buf, cnt);
2321 host->part_buf_start = cnt;
2322 host->part_buf_count = (1 << host->data_shift) - cnt;
2325 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2327 struct mmc_data *data = host->data;
2330 /* try and push anything in the part_buf */
2331 if (unlikely(host->part_buf_count)) {
2332 int len = dw_mci_push_part_bytes(host, buf, cnt);
2336 if (host->part_buf_count == 2) {
2337 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2338 host->part_buf_count = 0;
2341 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2342 if (unlikely((unsigned long)buf & 0x1)) {
2344 u16 aligned_buf[64];
2345 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2346 int items = len >> 1;
2348 /* memcpy from input buffer into aligned buffer */
2349 memcpy(aligned_buf, buf, len);
2352 /* push data from aligned buffer into fifo */
2353 for (i = 0; i < items; ++i)
2354 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2361 for (; cnt >= 2; cnt -= 2)
2362 mci_fifo_writew(host->fifo_reg, *pdata++);
2365 /* put anything remaining in the part_buf */
2367 dw_mci_set_part_bytes(host, buf, cnt);
2368 /* Push data if we have reached the expected data length */
2369 if ((data->bytes_xfered + init_cnt) ==
2370 (data->blksz * data->blocks))
2371 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2375 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2377 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2378 if (unlikely((unsigned long)buf & 0x1)) {
2380 /* pull data from fifo into aligned buffer */
2381 u16 aligned_buf[64];
2382 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2383 int items = len >> 1;
2386 for (i = 0; i < items; ++i)
2387 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2388 /* memcpy from aligned buffer into output buffer */
2389 memcpy(buf, aligned_buf, len);
2398 for (; cnt >= 2; cnt -= 2)
2399 *pdata++ = mci_fifo_readw(host->fifo_reg);
2403 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2404 dw_mci_pull_final_bytes(host, buf, cnt);
2408 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2410 struct mmc_data *data = host->data;
2413 /* try and push anything in the part_buf */
2414 if (unlikely(host->part_buf_count)) {
2415 int len = dw_mci_push_part_bytes(host, buf, cnt);
2419 if (host->part_buf_count == 4) {
2420 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2421 host->part_buf_count = 0;
2424 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2425 if (unlikely((unsigned long)buf & 0x3)) {
2427 u32 aligned_buf[32];
2428 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2429 int items = len >> 2;
2431 /* memcpy from input buffer into aligned buffer */
2432 memcpy(aligned_buf, buf, len);
2435 /* push data from aligned buffer into fifo */
2436 for (i = 0; i < items; ++i)
2437 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2444 for (; cnt >= 4; cnt -= 4)
2445 mci_fifo_writel(host->fifo_reg, *pdata++);
2448 /* put anything remaining in the part_buf */
2450 dw_mci_set_part_bytes(host, buf, cnt);
2451 /* Push data if we have reached the expected data length */
2452 if ((data->bytes_xfered + init_cnt) ==
2453 (data->blksz * data->blocks))
2454 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2458 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2460 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2461 if (unlikely((unsigned long)buf & 0x3)) {
2463 /* pull data from fifo into aligned buffer */
2464 u32 aligned_buf[32];
2465 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2466 int items = len >> 2;
2469 for (i = 0; i < items; ++i)
2470 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2471 /* memcpy from aligned buffer into output buffer */
2472 memcpy(buf, aligned_buf, len);
2481 for (; cnt >= 4; cnt -= 4)
2482 *pdata++ = mci_fifo_readl(host->fifo_reg);
2486 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2487 dw_mci_pull_final_bytes(host, buf, cnt);
2491 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2493 struct mmc_data *data = host->data;
2496 /* try and push anything in the part_buf */
2497 if (unlikely(host->part_buf_count)) {
2498 int len = dw_mci_push_part_bytes(host, buf, cnt);
2503 if (host->part_buf_count == 8) {
2504 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2505 host->part_buf_count = 0;
2508 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2509 if (unlikely((unsigned long)buf & 0x7)) {
2511 u64 aligned_buf[16];
2512 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2513 int items = len >> 3;
2515 /* memcpy from input buffer into aligned buffer */
2516 memcpy(aligned_buf, buf, len);
2519 /* push data from aligned buffer into fifo */
2520 for (i = 0; i < items; ++i)
2521 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2528 for (; cnt >= 8; cnt -= 8)
2529 mci_fifo_writeq(host->fifo_reg, *pdata++);
2532 /* put anything remaining in the part_buf */
2534 dw_mci_set_part_bytes(host, buf, cnt);
2535 /* Push data if we have reached the expected data length */
2536 if ((data->bytes_xfered + init_cnt) ==
2537 (data->blksz * data->blocks))
2538 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2542 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2544 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2545 if (unlikely((unsigned long)buf & 0x7)) {
2547 /* pull data from fifo into aligned buffer */
2548 u64 aligned_buf[16];
2549 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2550 int items = len >> 3;
2553 for (i = 0; i < items; ++i)
2554 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2556 /* memcpy from aligned buffer into output buffer */
2557 memcpy(buf, aligned_buf, len);
2566 for (; cnt >= 8; cnt -= 8)
2567 *pdata++ = mci_fifo_readq(host->fifo_reg);
2571 host->part_buf = mci_fifo_readq(host->fifo_reg);
2572 dw_mci_pull_final_bytes(host, buf, cnt);
2576 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2580 /* get remaining partial bytes */
2581 len = dw_mci_pull_part_bytes(host, buf, cnt);
2582 if (unlikely(len == cnt))
2587 /* get the rest of the data */
2588 host->pull_data(host, buf, cnt);
2591 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2593 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2595 unsigned int offset;
2596 struct mmc_data *data = host->data;
2597 int shift = host->data_shift;
2600 unsigned int remain, fcnt;
2603 if (!sg_miter_next(sg_miter))
2606 host->sg = sg_miter->piter.sg;
2607 buf = sg_miter->addr;
2608 remain = sg_miter->length;
2612 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2613 << shift) + host->part_buf_count;
2614 len = min(remain, fcnt);
2617 dw_mci_pull_data(host, (void *)(buf + offset), len);
2618 data->bytes_xfered += len;
2623 sg_miter->consumed = offset;
2624 status = mci_readl(host, MINTSTS);
2625 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2626 /* if the RXDR is ready read again */
2627 } while ((status & SDMMC_INT_RXDR) ||
2628 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2631 if (!sg_miter_next(sg_miter))
2633 sg_miter->consumed = 0;
2635 sg_miter_stop(sg_miter);
2639 sg_miter_stop(sg_miter);
2641 smp_wmb(); /* drain writebuffer */
2642 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2645 static void dw_mci_write_data_pio(struct dw_mci *host)
2647 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2649 unsigned int offset;
2650 struct mmc_data *data = host->data;
2651 int shift = host->data_shift;
2654 unsigned int fifo_depth = host->fifo_depth;
2655 unsigned int remain, fcnt;
2658 if (!sg_miter_next(sg_miter))
2661 host->sg = sg_miter->piter.sg;
2662 buf = sg_miter->addr;
2663 remain = sg_miter->length;
2667 fcnt = ((fifo_depth -
2668 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2669 << shift) - host->part_buf_count;
2670 len = min(remain, fcnt);
2673 host->push_data(host, (void *)(buf + offset), len);
2674 data->bytes_xfered += len;
2679 sg_miter->consumed = offset;
2680 status = mci_readl(host, MINTSTS);
2681 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2682 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2685 if (!sg_miter_next(sg_miter))
2687 sg_miter->consumed = 0;
2689 sg_miter_stop(sg_miter);
2693 sg_miter_stop(sg_miter);
2695 smp_wmb(); /* drain writebuffer */
2696 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2699 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2701 del_timer(&host->cto_timer);
2703 if (!host->cmd_status)
2704 host->cmd_status = status;
2706 smp_wmb(); /* drain writebuffer */
2708 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2709 tasklet_schedule(&host->tasklet);
2711 dw_mci_start_fault_timer(host);
2714 static void dw_mci_handle_cd(struct dw_mci *host)
2716 struct dw_mci_slot *slot = host->slot;
2718 mmc_detect_change(slot->mmc,
2719 msecs_to_jiffies(host->pdata->detect_delay_ms));
2722 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2724 struct dw_mci *host = dev_id;
2726 struct dw_mci_slot *slot = host->slot;
2728 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2731 /* Check volt switch first, since it can look like an error */
2732 if ((host->state == STATE_SENDING_CMD11) &&
2733 (pending & SDMMC_INT_VOLT_SWITCH)) {
2734 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2735 pending &= ~SDMMC_INT_VOLT_SWITCH;
2738 * Hold the lock; we know cmd11_timer can't be kicked
2739 * off after the lock is released, so safe to delete.
2741 spin_lock(&host->irq_lock);
2742 dw_mci_cmd_interrupt(host, pending);
2743 spin_unlock(&host->irq_lock);
2745 del_timer(&host->cmd11_timer);
2748 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2749 spin_lock(&host->irq_lock);
2751 del_timer(&host->cto_timer);
2752 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2753 host->cmd_status = pending;
2754 smp_wmb(); /* drain writebuffer */
2755 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2757 spin_unlock(&host->irq_lock);
2760 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2761 spin_lock(&host->irq_lock);
2763 if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2764 del_timer(&host->dto_timer);
2766 /* if there is an error report DATA_ERROR */
2767 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2768 host->data_status = pending;
2769 smp_wmb(); /* drain writebuffer */
2770 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2772 if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2773 /* In case of error, we cannot expect a DTO */
2774 set_bit(EVENT_DATA_COMPLETE,
2775 &host->pending_events);
2777 tasklet_schedule(&host->tasklet);
2779 spin_unlock(&host->irq_lock);
2782 if (pending & SDMMC_INT_DATA_OVER) {
2783 spin_lock(&host->irq_lock);
2785 del_timer(&host->dto_timer);
2787 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2788 if (!host->data_status)
2789 host->data_status = pending;
2790 smp_wmb(); /* drain writebuffer */
2791 if (host->dir_status == DW_MCI_RECV_STATUS) {
2792 if (host->sg != NULL)
2793 dw_mci_read_data_pio(host, true);
2795 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2796 tasklet_schedule(&host->tasklet);
2798 spin_unlock(&host->irq_lock);
2801 if (pending & SDMMC_INT_RXDR) {
2802 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2803 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2804 dw_mci_read_data_pio(host, false);
2807 if (pending & SDMMC_INT_TXDR) {
2808 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2809 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2810 dw_mci_write_data_pio(host);
2813 if (pending & SDMMC_INT_CMD_DONE) {
2814 spin_lock(&host->irq_lock);
2816 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2817 dw_mci_cmd_interrupt(host, pending);
2819 spin_unlock(&host->irq_lock);
2822 if (pending & SDMMC_INT_CD) {
2823 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2824 dw_mci_handle_cd(host);
2827 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2828 mci_writel(host, RINTSTS,
2829 SDMMC_INT_SDIO(slot->sdio_id));
2830 __dw_mci_enable_sdio_irq(slot, 0);
2831 sdio_signal_irq(slot->mmc);
2836 if (host->use_dma != TRANS_MODE_IDMAC)
2839 /* Handle IDMA interrupts */
2840 if (host->dma_64bit_address == 1) {
2841 pending = mci_readl(host, IDSTS64);
2842 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2843 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2844 SDMMC_IDMAC_INT_RI);
2845 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2846 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2847 host->dma_ops->complete((void *)host);
2850 pending = mci_readl(host, IDSTS);
2851 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2852 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2853 SDMMC_IDMAC_INT_RI);
2854 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2855 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2856 host->dma_ops->complete((void *)host);
2863 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2865 struct dw_mci *host = slot->host;
2866 const struct dw_mci_drv_data *drv_data = host->drv_data;
2867 struct mmc_host *mmc = slot->mmc;
2870 if (host->pdata->caps)
2871 mmc->caps = host->pdata->caps;
2873 if (host->pdata->pm_caps)
2874 mmc->pm_caps = host->pdata->pm_caps;
2877 mmc->caps |= drv_data->common_caps;
2879 if (host->dev->of_node) {
2880 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2884 ctrl_id = to_platform_device(host->dev)->id;
2887 if (drv_data && drv_data->caps) {
2888 if (ctrl_id >= drv_data->num_caps) {
2889 dev_err(host->dev, "invalid controller id %d\n",
2893 mmc->caps |= drv_data->caps[ctrl_id];
2896 if (host->pdata->caps2)
2897 mmc->caps2 = host->pdata->caps2;
2899 /* if host has set a minimum_freq, we should respect it */
2900 if (host->minimum_speed)
2901 mmc->f_min = host->minimum_speed;
2903 mmc->f_min = DW_MCI_FREQ_MIN;
2906 mmc->f_max = DW_MCI_FREQ_MAX;
2908 /* Process SDIO IRQs through the sdio_irq_work. */
2909 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2910 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2915 static int dw_mci_init_slot(struct dw_mci *host)
2917 struct mmc_host *mmc;
2918 struct dw_mci_slot *slot;
2921 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2925 slot = mmc_priv(mmc);
2927 slot->sdio_id = host->sdio_id0 + slot->id;
2932 mmc->ops = &dw_mci_ops;
2934 /*if there are external regulators, get them*/
2935 ret = mmc_regulator_get_supply(mmc);
2937 goto err_host_allocated;
2939 if (!mmc->ocr_avail)
2940 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2942 ret = mmc_of_parse(mmc);
2944 goto err_host_allocated;
2946 ret = dw_mci_init_slot_caps(slot);
2948 goto err_host_allocated;
2950 /* Useful defaults if platform data is unset. */
2951 if (host->use_dma == TRANS_MODE_IDMAC) {
2952 mmc->max_segs = host->ring_size;
2953 mmc->max_blk_size = 65535;
2954 mmc->max_seg_size = 0x1000;
2955 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2956 mmc->max_blk_count = mmc->max_req_size / 512;
2957 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2959 mmc->max_blk_size = 65535;
2960 mmc->max_blk_count = 65535;
2962 mmc->max_blk_size * mmc->max_blk_count;
2963 mmc->max_seg_size = mmc->max_req_size;
2965 /* TRANS_MODE_PIO */
2967 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2968 mmc->max_blk_count = 512;
2969 mmc->max_req_size = mmc->max_blk_size *
2971 mmc->max_seg_size = mmc->max_req_size;
2976 ret = mmc_add_host(mmc);
2978 goto err_host_allocated;
2980 #if defined(CONFIG_DEBUG_FS)
2981 dw_mci_init_debugfs(slot);
2991 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2993 /* Debugfs stuff is cleaned up by mmc core */
2994 mmc_remove_host(slot->mmc);
2995 slot->host->slot = NULL;
2996 mmc_free_host(slot->mmc);
2999 static void dw_mci_init_dma(struct dw_mci *host)
3002 struct device *dev = host->dev;
3005 * Check tansfer mode from HCON[17:16]
3006 * Clear the ambiguous description of dw_mmc databook:
3007 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
3008 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3009 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3010 * 2b'11: Non DW DMA Interface -> pio only
3011 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
3012 * simpler request/acknowledge handshake mechanism and both of them
3013 * are regarded as external dma master for dw_mmc.
3015 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
3016 if (host->use_dma == DMA_INTERFACE_IDMA) {
3017 host->use_dma = TRANS_MODE_IDMAC;
3018 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
3019 host->use_dma == DMA_INTERFACE_GDMA) {
3020 host->use_dma = TRANS_MODE_EDMAC;
3025 /* Determine which DMA interface to use */
3026 if (host->use_dma == TRANS_MODE_IDMAC) {
3028 * Check ADDR_CONFIG bit in HCON to find
3029 * IDMAC address bus width
3031 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3033 if (addr_config == 1) {
3034 /* host supports IDMAC in 64-bit address mode */
3035 host->dma_64bit_address = 1;
3037 "IDMAC supports 64-bit address mode.\n");
3038 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
3039 dma_set_coherent_mask(host->dev,
3042 /* host supports IDMAC in 32-bit address mode */
3043 host->dma_64bit_address = 0;
3045 "IDMAC supports 32-bit address mode.\n");
3048 /* Alloc memory for sg translation */
3049 host->sg_cpu = dmam_alloc_coherent(host->dev,
3051 &host->sg_dma, GFP_KERNEL);
3052 if (!host->sg_cpu) {
3054 "%s: could not alloc DMA memory\n",
3059 host->dma_ops = &dw_mci_idmac_ops;
3060 dev_info(host->dev, "Using internal DMA controller.\n");
3062 /* TRANS_MODE_EDMAC: check dma bindings again */
3063 if ((device_property_string_array_count(dev, "dma-names") < 0) ||
3064 !device_property_present(dev, "dmas")) {
3067 host->dma_ops = &dw_mci_edmac_ops;
3068 dev_info(host->dev, "Using external DMA controller.\n");
3071 if (host->dma_ops->init && host->dma_ops->start &&
3072 host->dma_ops->stop && host->dma_ops->cleanup) {
3073 if (host->dma_ops->init(host)) {
3074 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3079 dev_err(host->dev, "DMA initialization not found.\n");
3086 dev_info(host->dev, "Using PIO mode.\n");
3087 host->use_dma = TRANS_MODE_PIO;
3090 static void dw_mci_cmd11_timer(struct timer_list *t)
3092 struct dw_mci *host = from_timer(host, t, cmd11_timer);
3094 if (host->state != STATE_SENDING_CMD11) {
3095 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3099 host->cmd_status = SDMMC_INT_RTO;
3100 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3101 tasklet_schedule(&host->tasklet);
3104 static void dw_mci_cto_timer(struct timer_list *t)
3106 struct dw_mci *host = from_timer(host, t, cto_timer);
3107 unsigned long irqflags;
3110 spin_lock_irqsave(&host->irq_lock, irqflags);
3113 * If somehow we have very bad interrupt latency it's remotely possible
3114 * that the timer could fire while the interrupt is still pending or
3115 * while the interrupt is midway through running. Let's be paranoid
3116 * and detect those two cases. Note that this is paranoia is somewhat
3117 * justified because in this function we don't actually cancel the
3118 * pending command in the controller--we just assume it will never come.
3120 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3121 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3122 /* The interrupt should fire; no need to act but we can warn */
3123 dev_warn(host->dev, "Unexpected interrupt latency\n");
3126 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3127 /* Presumably interrupt handler couldn't delete the timer */
3128 dev_warn(host->dev, "CTO timeout when already completed\n");
3133 * Continued paranoia to make sure we're in the state we expect.
3134 * This paranoia isn't really justified but it seems good to be safe.
3136 switch (host->state) {
3137 case STATE_SENDING_CMD11:
3138 case STATE_SENDING_CMD:
3139 case STATE_SENDING_STOP:
3141 * If CMD_DONE interrupt does NOT come in sending command
3142 * state, we should notify the driver to terminate current
3143 * transfer and report a command timeout to the core.
3145 host->cmd_status = SDMMC_INT_RTO;
3146 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3147 tasklet_schedule(&host->tasklet);
3150 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3156 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3159 static void dw_mci_dto_timer(struct timer_list *t)
3161 struct dw_mci *host = from_timer(host, t, dto_timer);
3162 unsigned long irqflags;
3165 spin_lock_irqsave(&host->irq_lock, irqflags);
3168 * The DTO timer is much longer than the CTO timer, so it's even less
3169 * likely that we'll these cases, but it pays to be paranoid.
3171 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3172 if (pending & SDMMC_INT_DATA_OVER) {
3173 /* The interrupt should fire; no need to act but we can warn */
3174 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3177 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3178 /* Presumably interrupt handler couldn't delete the timer */
3179 dev_warn(host->dev, "DTO timeout when already completed\n");
3184 * Continued paranoia to make sure we're in the state we expect.
3185 * This paranoia isn't really justified but it seems good to be safe.
3187 switch (host->state) {
3188 case STATE_SENDING_DATA:
3189 case STATE_DATA_BUSY:
3191 * If DTO interrupt does NOT come in sending data state,
3192 * we should notify the driver to terminate current transfer
3193 * and report a data timeout to the core.
3195 host->data_status = SDMMC_INT_DRTO;
3196 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3197 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3198 tasklet_schedule(&host->tasklet);
3201 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3207 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3211 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3213 struct dw_mci_board *pdata;
3214 struct device *dev = host->dev;
3215 const struct dw_mci_drv_data *drv_data = host->drv_data;
3217 u32 clock_frequency;
3219 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3221 return ERR_PTR(-ENOMEM);
3223 /* find reset controller when exist */
3224 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3225 if (IS_ERR(pdata->rstc))
3226 return ERR_CAST(pdata->rstc);
3228 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3230 "fifo-depth property not found, using value of FIFOTH register as default\n");
3232 device_property_read_u32(dev, "card-detect-delay",
3233 &pdata->detect_delay_ms);
3235 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3237 if (device_property_present(dev, "fifo-watermark-aligned"))
3238 host->wm_aligned = true;
3240 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3241 pdata->bus_hz = clock_frequency;
3243 if (drv_data && drv_data->parse_dt) {
3244 ret = drv_data->parse_dt(host);
3246 return ERR_PTR(ret);
3252 #else /* CONFIG_OF */
3253 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3255 return ERR_PTR(-EINVAL);
3257 #endif /* CONFIG_OF */
3259 static void dw_mci_enable_cd(struct dw_mci *host)
3261 unsigned long irqflags;
3265 * No need for CD if all slots have a non-error GPIO
3266 * as well as broken card detection is found.
3268 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3271 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3272 spin_lock_irqsave(&host->irq_lock, irqflags);
3273 temp = mci_readl(host, INTMASK);
3274 temp |= SDMMC_INT_CD;
3275 mci_writel(host, INTMASK, temp);
3276 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3280 int dw_mci_probe(struct dw_mci *host)
3282 const struct dw_mci_drv_data *drv_data = host->drv_data;
3283 int width, i, ret = 0;
3287 host->pdata = dw_mci_parse_dt(host);
3288 if (IS_ERR(host->pdata))
3289 return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3290 "platform data not available\n");
3293 host->biu_clk = devm_clk_get(host->dev, "biu");
3294 if (IS_ERR(host->biu_clk)) {
3295 dev_dbg(host->dev, "biu clock not available\n");
3297 ret = clk_prepare_enable(host->biu_clk);
3299 dev_err(host->dev, "failed to enable biu clock\n");
3304 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3305 if (IS_ERR(host->ciu_clk)) {
3306 dev_dbg(host->dev, "ciu clock not available\n");
3307 host->bus_hz = host->pdata->bus_hz;
3309 ret = clk_prepare_enable(host->ciu_clk);
3311 dev_err(host->dev, "failed to enable ciu clock\n");
3315 if (host->pdata->bus_hz) {
3316 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3319 "Unable to set bus rate to %uHz\n",
3320 host->pdata->bus_hz);
3322 host->bus_hz = clk_get_rate(host->ciu_clk);
3325 if (!host->bus_hz) {
3327 "Platform data must supply bus speed\n");
3332 if (host->pdata->rstc) {
3333 reset_control_assert(host->pdata->rstc);
3334 usleep_range(10, 50);
3335 reset_control_deassert(host->pdata->rstc);
3338 if (drv_data && drv_data->init) {
3339 ret = drv_data->init(host);
3342 "implementation specific init failed\n");
3347 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3348 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3349 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3351 spin_lock_init(&host->lock);
3352 spin_lock_init(&host->irq_lock);
3353 INIT_LIST_HEAD(&host->queue);
3355 dw_mci_init_fault(host);
3358 * Get the host data width - this assumes that HCON has been set with
3359 * the correct values.
3361 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3363 host->push_data = dw_mci_push_data16;
3364 host->pull_data = dw_mci_pull_data16;
3366 host->data_shift = 1;
3367 } else if (i == 2) {
3368 host->push_data = dw_mci_push_data64;
3369 host->pull_data = dw_mci_pull_data64;
3371 host->data_shift = 3;
3373 /* Check for a reserved value, and warn if it is */
3375 "HCON reports a reserved host data width!\n"
3376 "Defaulting to 32-bit access.\n");
3377 host->push_data = dw_mci_push_data32;
3378 host->pull_data = dw_mci_pull_data32;
3380 host->data_shift = 2;
3383 /* Reset all blocks */
3384 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3389 host->dma_ops = host->pdata->dma_ops;
3390 dw_mci_init_dma(host);
3392 /* Clear the interrupts for the host controller */
3393 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3394 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3396 /* Put in max timeout */
3397 mci_writel(host, TMOUT, 0xFFFFFFFF);
3400 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3401 * Tx Mark = fifo_size / 2 DMA Size = 8
3403 if (!host->pdata->fifo_depth) {
3405 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3406 * have been overwritten by the bootloader, just like we're
3407 * about to do, so if you know the value for your hardware, you
3408 * should put it in the platform data.
3410 fifo_size = mci_readl(host, FIFOTH);
3411 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3413 fifo_size = host->pdata->fifo_depth;
3415 host->fifo_depth = fifo_size;
3417 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3418 mci_writel(host, FIFOTH, host->fifoth_val);
3420 /* disable clock to CIU */
3421 mci_writel(host, CLKENA, 0);
3422 mci_writel(host, CLKSRC, 0);
3425 * In 2.40a spec, Data offset is changed.
3426 * Need to check the version-id and set data-offset for DATA register.
3428 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3429 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3431 if (host->data_addr_override)
3432 host->fifo_reg = host->regs + host->data_addr_override;
3433 else if (host->verid < DW_MMC_240A)
3434 host->fifo_reg = host->regs + DATA_OFFSET;
3436 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3438 tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
3439 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3440 host->irq_flags, "dw-mci", host);
3445 * Enable interrupts for command done, data over, data empty,
3446 * receive ready and error such as transmit, receive timeout, crc error
3448 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3449 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3450 DW_MCI_ERROR_FLAGS);
3451 /* Enable mci interrupt */
3452 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3455 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3456 host->irq, width, fifo_size);
3458 /* We need at least one slot to succeed */
3459 ret = dw_mci_init_slot(host);
3461 dev_dbg(host->dev, "slot %d init failed\n", i);
3465 /* Now that slots are all setup, we can enable card detect */
3466 dw_mci_enable_cd(host);
3471 if (host->use_dma && host->dma_ops->exit)
3472 host->dma_ops->exit(host);
3474 reset_control_assert(host->pdata->rstc);
3477 clk_disable_unprepare(host->ciu_clk);
3480 clk_disable_unprepare(host->biu_clk);
3484 EXPORT_SYMBOL(dw_mci_probe);
3486 void dw_mci_remove(struct dw_mci *host)
3488 dev_dbg(host->dev, "remove slot\n");
3490 dw_mci_cleanup_slot(host->slot);
3492 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3493 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3495 /* disable clock to CIU */
3496 mci_writel(host, CLKENA, 0);
3497 mci_writel(host, CLKSRC, 0);
3499 if (host->use_dma && host->dma_ops->exit)
3500 host->dma_ops->exit(host);
3502 reset_control_assert(host->pdata->rstc);
3504 clk_disable_unprepare(host->ciu_clk);
3505 clk_disable_unprepare(host->biu_clk);
3507 EXPORT_SYMBOL(dw_mci_remove);
3512 int dw_mci_runtime_suspend(struct device *dev)
3514 struct dw_mci *host = dev_get_drvdata(dev);
3516 if (host->use_dma && host->dma_ops->exit)
3517 host->dma_ops->exit(host);
3519 clk_disable_unprepare(host->ciu_clk);
3522 (mmc_can_gpio_cd(host->slot->mmc) ||
3523 !mmc_card_is_removable(host->slot->mmc)))
3524 clk_disable_unprepare(host->biu_clk);
3528 EXPORT_SYMBOL(dw_mci_runtime_suspend);
3530 int dw_mci_runtime_resume(struct device *dev)
3533 struct dw_mci *host = dev_get_drvdata(dev);
3536 (mmc_can_gpio_cd(host->slot->mmc) ||
3537 !mmc_card_is_removable(host->slot->mmc))) {
3538 ret = clk_prepare_enable(host->biu_clk);
3543 ret = clk_prepare_enable(host->ciu_clk);
3547 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3548 clk_disable_unprepare(host->ciu_clk);
3553 if (host->use_dma && host->dma_ops->init)
3554 host->dma_ops->init(host);
3557 * Restore the initial value at FIFOTH register
3558 * And Invalidate the prev_blksz with zero
3560 mci_writel(host, FIFOTH, host->fifoth_val);
3561 host->prev_blksz = 0;
3563 /* Put in max timeout */
3564 mci_writel(host, TMOUT, 0xFFFFFFFF);
3566 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3567 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3568 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3569 DW_MCI_ERROR_FLAGS);
3570 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3573 if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3574 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3576 /* Force setup bus to guarantee available clock output */
3577 dw_mci_setup_bus(host->slot, true);
3579 /* Re-enable SDIO interrupts. */
3580 if (sdio_irq_claimed(host->slot->mmc))
3581 __dw_mci_enable_sdio_irq(host->slot, 1);
3583 /* Now that slots are all setup, we can enable card detect */
3584 dw_mci_enable_cd(host);
3590 (mmc_can_gpio_cd(host->slot->mmc) ||
3591 !mmc_card_is_removable(host->slot->mmc)))
3592 clk_disable_unprepare(host->biu_clk);
3596 EXPORT_SYMBOL(dw_mci_runtime_resume);
3597 #endif /* CONFIG_PM */
3599 static int __init dw_mci_init(void)
3601 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3605 static void __exit dw_mci_exit(void)
3609 module_init(dw_mci_init);
3610 module_exit(dw_mci_exit);
3612 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3613 MODULE_AUTHOR("NXP Semiconductor VietNam");
3614 MODULE_AUTHOR("Imagination Technologies Ltd");
3615 MODULE_LICENSE("GPL v2");