2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
51 static void sdhci_finish_data(struct sdhci_host *);
53 static void sdhci_finish_command(struct sdhci_host *);
54 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
57 struct mmc_data *data);
58 static int sdhci_do_get_cd(struct sdhci_host *host);
61 static int sdhci_runtime_pm_get(struct sdhci_host *host);
62 static int sdhci_runtime_pm_put(struct sdhci_host *host);
63 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
64 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
66 static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
70 static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
74 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
77 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
82 static void sdhci_dumpregs(struct sdhci_host *host)
84 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
85 mmc_hostname(host->mmc));
87 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
88 sdhci_readl(host, SDHCI_DMA_ADDRESS),
89 sdhci_readw(host, SDHCI_HOST_VERSION));
90 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
91 sdhci_readw(host, SDHCI_BLOCK_SIZE),
92 sdhci_readw(host, SDHCI_BLOCK_COUNT));
93 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
94 sdhci_readl(host, SDHCI_ARGUMENT),
95 sdhci_readw(host, SDHCI_TRANSFER_MODE));
96 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
97 sdhci_readl(host, SDHCI_PRESENT_STATE),
98 sdhci_readb(host, SDHCI_HOST_CONTROL));
99 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
100 sdhci_readb(host, SDHCI_POWER_CONTROL),
101 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
102 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
103 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
104 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
105 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
106 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
107 sdhci_readl(host, SDHCI_INT_STATUS));
108 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
109 sdhci_readl(host, SDHCI_INT_ENABLE),
110 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
111 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
112 sdhci_readw(host, SDHCI_ACMD12_ERR),
113 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
114 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
115 sdhci_readl(host, SDHCI_CAPABILITIES),
116 sdhci_readl(host, SDHCI_CAPABILITIES_1));
117 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
118 sdhci_readw(host, SDHCI_COMMAND),
119 sdhci_readl(host, SDHCI_MAX_CURRENT));
120 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
121 sdhci_readw(host, SDHCI_HOST_CONTROL2));
123 if (host->flags & SDHCI_USE_ADMA) {
124 if (host->flags & SDHCI_USE_64_BIT_DMA)
125 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
126 readl(host->ioaddr + SDHCI_ADMA_ERROR),
127 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
128 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
130 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
131 readl(host->ioaddr + SDHCI_ADMA_ERROR),
132 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
135 pr_debug(DRIVER_NAME ": ===========================================\n");
138 /*****************************************************************************\
140 * Low level functions *
142 \*****************************************************************************/
144 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
148 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
149 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
156 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
157 SDHCI_INT_CARD_INSERT;
159 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
162 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
163 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
166 static void sdhci_enable_card_detection(struct sdhci_host *host)
168 sdhci_set_card_detection(host, true);
171 static void sdhci_disable_card_detection(struct sdhci_host *host)
173 sdhci_set_card_detection(host, false);
176 void sdhci_reset(struct sdhci_host *host, u8 mask)
178 unsigned long timeout;
180 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
182 if (mask & SDHCI_RESET_ALL) {
184 /* Reset-all turns off SD Bus Power */
185 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
186 sdhci_runtime_pm_bus_off(host);
189 /* Wait max 100 ms */
192 /* hw clears the bit when it's done */
193 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
195 pr_err("%s: Reset 0x%x never completed.\n",
196 mmc_hostname(host->mmc), (int)mask);
197 sdhci_dumpregs(host);
204 EXPORT_SYMBOL_GPL(sdhci_reset);
206 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
208 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
209 if (!sdhci_do_get_cd(host))
213 host->ops->reset(host, mask);
215 if (mask & SDHCI_RESET_ALL) {
216 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
217 if (host->ops->enable_dma)
218 host->ops->enable_dma(host);
221 /* Resetting the controller clears many */
222 host->preset_enabled = false;
226 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
228 static void sdhci_init(struct sdhci_host *host, int soft)
231 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
233 sdhci_do_reset(host, SDHCI_RESET_ALL);
235 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
236 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
237 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
238 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
241 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
242 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
245 /* force clock reconfiguration */
247 sdhci_set_ios(host->mmc, &host->mmc->ios);
251 static void sdhci_reinit(struct sdhci_host *host)
254 sdhci_enable_card_detection(host);
257 static void sdhci_activate_led(struct sdhci_host *host)
261 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
262 ctrl |= SDHCI_CTRL_LED;
263 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
266 static void sdhci_deactivate_led(struct sdhci_host *host)
270 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
271 ctrl &= ~SDHCI_CTRL_LED;
272 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
275 #ifdef SDHCI_USE_LEDS_CLASS
276 static void sdhci_led_control(struct led_classdev *led,
277 enum led_brightness brightness)
279 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
282 spin_lock_irqsave(&host->lock, flags);
284 if (host->runtime_suspended)
287 if (brightness == LED_OFF)
288 sdhci_deactivate_led(host);
290 sdhci_activate_led(host);
292 spin_unlock_irqrestore(&host->lock, flags);
296 /*****************************************************************************\
300 \*****************************************************************************/
302 static void sdhci_read_block_pio(struct sdhci_host *host)
305 size_t blksize, len, chunk;
306 u32 uninitialized_var(scratch);
309 DBG("PIO reading\n");
311 blksize = host->data->blksz;
314 local_irq_save(flags);
317 BUG_ON(!sg_miter_next(&host->sg_miter));
319 len = min(host->sg_miter.length, blksize);
322 host->sg_miter.consumed = len;
324 buf = host->sg_miter.addr;
328 scratch = sdhci_readl(host, SDHCI_BUFFER);
332 *buf = scratch & 0xFF;
341 sg_miter_stop(&host->sg_miter);
343 local_irq_restore(flags);
346 static void sdhci_write_block_pio(struct sdhci_host *host)
349 size_t blksize, len, chunk;
353 DBG("PIO writing\n");
355 blksize = host->data->blksz;
359 local_irq_save(flags);
362 BUG_ON(!sg_miter_next(&host->sg_miter));
364 len = min(host->sg_miter.length, blksize);
367 host->sg_miter.consumed = len;
369 buf = host->sg_miter.addr;
372 scratch |= (u32)*buf << (chunk * 8);
378 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
379 sdhci_writel(host, scratch, SDHCI_BUFFER);
386 sg_miter_stop(&host->sg_miter);
388 local_irq_restore(flags);
391 static void sdhci_transfer_pio(struct sdhci_host *host)
397 if (host->blocks == 0)
400 if (host->data->flags & MMC_DATA_READ)
401 mask = SDHCI_DATA_AVAILABLE;
403 mask = SDHCI_SPACE_AVAILABLE;
406 * Some controllers (JMicron JMB38x) mess up the buffer bits
407 * for transfers < 4 bytes. As long as it is just one block,
408 * we can ignore the bits.
410 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
411 (host->data->blocks == 1))
414 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
415 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
418 if (host->data->flags & MMC_DATA_READ)
419 sdhci_read_block_pio(host);
421 sdhci_write_block_pio(host);
424 if (host->blocks == 0)
428 DBG("PIO transfer complete.\n");
431 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
433 local_irq_save(*flags);
434 return kmap_atomic(sg_page(sg)) + sg->offset;
437 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
439 kunmap_atomic(buffer);
440 local_irq_restore(*flags);
443 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
444 dma_addr_t addr, int len, unsigned cmd)
446 struct sdhci_adma2_64_desc *dma_desc = desc;
448 /* 32-bit and 64-bit descriptors have these members in same position */
449 dma_desc->cmd = cpu_to_le16(cmd);
450 dma_desc->len = cpu_to_le16(len);
451 dma_desc->addr_lo = cpu_to_le32((u32)addr);
453 if (host->flags & SDHCI_USE_64_BIT_DMA)
454 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
457 static void sdhci_adma_mark_end(void *desc)
459 struct sdhci_adma2_64_desc *dma_desc = desc;
461 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
462 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
465 static int sdhci_adma_table_pre(struct sdhci_host *host,
466 struct mmc_data *data)
473 dma_addr_t align_addr;
476 struct scatterlist *sg;
482 * The spec does not specify endianness of descriptor table.
483 * We currently guess that it is LE.
486 if (data->flags & MMC_DATA_READ)
487 direction = DMA_FROM_DEVICE;
489 direction = DMA_TO_DEVICE;
491 host->align_addr = dma_map_single(mmc_dev(host->mmc),
492 host->align_buffer, host->align_buffer_sz, direction);
493 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
495 BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
497 host->sg_count = sdhci_pre_dma_transfer(host, data);
498 if (host->sg_count < 0)
501 desc = host->adma_table;
502 align = host->align_buffer;
504 align_addr = host->align_addr;
506 for_each_sg(data->sg, sg, host->sg_count, i) {
507 addr = sg_dma_address(sg);
508 len = sg_dma_len(sg);
511 * The SDHCI specification states that ADMA
512 * addresses must be 32-bit aligned. If they
513 * aren't, then we use a bounce buffer for
514 * the (up to three) bytes that screw up the
517 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
520 if (data->flags & MMC_DATA_WRITE) {
521 buffer = sdhci_kmap_atomic(sg, &flags);
522 memcpy(align, buffer, offset);
523 sdhci_kunmap_atomic(buffer, &flags);
527 sdhci_adma_write_desc(host, desc, align_addr, offset,
530 BUG_ON(offset > 65536);
532 align += SDHCI_ADMA2_ALIGN;
533 align_addr += SDHCI_ADMA2_ALIGN;
535 desc += host->desc_sz;
545 sdhci_adma_write_desc(host, desc, addr, len,
547 desc += host->desc_sz;
551 * If this triggers then we have a calculation bug
554 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
557 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
559 * Mark the last descriptor as the terminating descriptor
561 if (desc != host->adma_table) {
562 desc -= host->desc_sz;
563 sdhci_adma_mark_end(desc);
567 * Add a terminating entry.
570 /* nop, end, valid */
571 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
575 * Resync align buffer as we might have changed it.
577 if (data->flags & MMC_DATA_WRITE) {
578 dma_sync_single_for_device(mmc_dev(host->mmc),
579 host->align_addr, host->align_buffer_sz, direction);
585 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
586 host->align_buffer_sz, direction);
591 static void sdhci_adma_table_post(struct sdhci_host *host,
592 struct mmc_data *data)
596 struct scatterlist *sg;
603 if (data->flags & MMC_DATA_READ)
604 direction = DMA_FROM_DEVICE;
606 direction = DMA_TO_DEVICE;
608 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
609 host->align_buffer_sz, direction);
611 /* Do a quick scan of the SG list for any unaligned mappings */
612 has_unaligned = false;
613 for_each_sg(data->sg, sg, host->sg_count, i)
614 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
615 has_unaligned = true;
619 if (has_unaligned && data->flags & MMC_DATA_READ) {
620 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
621 data->sg_len, direction);
623 align = host->align_buffer;
625 for_each_sg(data->sg, sg, host->sg_count, i) {
626 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
627 size = SDHCI_ADMA2_ALIGN -
628 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
630 buffer = sdhci_kmap_atomic(sg, &flags);
631 memcpy(buffer, align, size);
632 sdhci_kunmap_atomic(buffer, &flags);
634 align += SDHCI_ADMA2_ALIGN;
639 if (data->host_cookie == COOKIE_MAPPED) {
640 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
641 data->sg_len, direction);
642 data->host_cookie = COOKIE_UNMAPPED;
646 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
649 struct mmc_data *data = cmd->data;
650 unsigned target_timeout, current_timeout;
653 * If the host controller provides us with an incorrect timeout
654 * value, just skip the check and use 0xE. The hardware may take
655 * longer to time out, but that's much better than having a too-short
658 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
661 /* Unspecified timeout, assume max */
662 if (!data && !cmd->busy_timeout)
667 target_timeout = cmd->busy_timeout * 1000;
669 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
670 if (host->clock && data->timeout_clks) {
671 unsigned long long val;
674 * data->timeout_clks is in units of clock cycles.
675 * host->clock is in Hz. target_timeout is in us.
676 * Hence, us = 1000000 * cycles / Hz. Round up.
678 val = 1000000ULL * data->timeout_clks;
679 if (do_div(val, host->clock))
681 target_timeout += val;
686 * Figure out needed cycles.
687 * We do this in steps in order to fit inside a 32 bit int.
688 * The first step is the minimum timeout, which will have a
689 * minimum resolution of 6 bits:
690 * (1) 2^13*1000 > 2^22,
691 * (2) host->timeout_clk < 2^16
696 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
697 while (current_timeout < target_timeout) {
699 current_timeout <<= 1;
705 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
706 mmc_hostname(host->mmc), count, cmd->opcode);
713 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
715 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
716 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
718 if (host->flags & SDHCI_REQ_USE_DMA)
719 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
721 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
723 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
724 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
727 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
731 if (host->ops->set_timeout) {
732 host->ops->set_timeout(host, cmd);
734 count = sdhci_calc_timeout(host, cmd);
735 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
739 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
742 struct mmc_data *data = cmd->data;
747 if (data || (cmd->flags & MMC_RSP_BUSY))
748 sdhci_set_timeout(host, cmd);
754 BUG_ON(data->blksz * data->blocks > 524288);
755 BUG_ON(data->blksz > host->mmc->max_blk_size);
756 BUG_ON(data->blocks > 65535);
759 host->data_early = 0;
760 host->data->bytes_xfered = 0;
762 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
763 host->flags |= SDHCI_REQ_USE_DMA;
766 * FIXME: This doesn't account for merging when mapping the
769 if (host->flags & SDHCI_REQ_USE_DMA) {
771 struct scatterlist *sg;
774 if (host->flags & SDHCI_USE_ADMA) {
775 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
778 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
782 if (unlikely(broken)) {
783 for_each_sg(data->sg, sg, data->sg_len, i) {
784 if (sg->length & 0x3) {
785 DBG("Reverting to PIO because of "
786 "transfer size (%d)\n",
788 host->flags &= ~SDHCI_REQ_USE_DMA;
796 * The assumption here being that alignment is the same after
797 * translation to device address space.
799 if (host->flags & SDHCI_REQ_USE_DMA) {
801 struct scatterlist *sg;
804 if (host->flags & SDHCI_USE_ADMA) {
806 * As we use 3 byte chunks to work around
807 * alignment problems, we need to check this
810 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
813 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
817 if (unlikely(broken)) {
818 for_each_sg(data->sg, sg, data->sg_len, i) {
819 if (sg->offset & 0x3) {
820 DBG("Reverting to PIO because of "
822 host->flags &= ~SDHCI_REQ_USE_DMA;
829 if (host->flags & SDHCI_REQ_USE_DMA) {
830 if (host->flags & SDHCI_USE_ADMA) {
831 ret = sdhci_adma_table_pre(host, data);
834 * This only happens when someone fed
835 * us an invalid request.
838 host->flags &= ~SDHCI_REQ_USE_DMA;
840 sdhci_writel(host, host->adma_addr,
842 if (host->flags & SDHCI_USE_64_BIT_DMA)
844 (u64)host->adma_addr >> 32,
845 SDHCI_ADMA_ADDRESS_HI);
850 sg_cnt = sdhci_pre_dma_transfer(host, data);
853 * This only happens when someone fed
854 * us an invalid request.
857 host->flags &= ~SDHCI_REQ_USE_DMA;
859 WARN_ON(sg_cnt != 1);
860 sdhci_writel(host, sg_dma_address(data->sg),
867 * Always adjust the DMA selection as some controllers
868 * (e.g. JMicron) can't do PIO properly when the selection
871 if (host->version >= SDHCI_SPEC_200) {
872 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
873 ctrl &= ~SDHCI_CTRL_DMA_MASK;
874 if ((host->flags & SDHCI_REQ_USE_DMA) &&
875 (host->flags & SDHCI_USE_ADMA)) {
876 if (host->flags & SDHCI_USE_64_BIT_DMA)
877 ctrl |= SDHCI_CTRL_ADMA64;
879 ctrl |= SDHCI_CTRL_ADMA32;
881 ctrl |= SDHCI_CTRL_SDMA;
883 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
886 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
889 flags = SG_MITER_ATOMIC;
890 if (host->data->flags & MMC_DATA_READ)
891 flags |= SG_MITER_TO_SG;
893 flags |= SG_MITER_FROM_SG;
894 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
895 host->blocks = data->blocks;
898 sdhci_set_transfer_irqs(host);
900 /* Set the DMA boundary value and block size */
901 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
902 data->blksz), SDHCI_BLOCK_SIZE);
903 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
906 static void sdhci_set_transfer_mode(struct sdhci_host *host,
907 struct mmc_command *cmd)
910 struct mmc_data *data = cmd->data;
914 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
915 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
917 /* clear Auto CMD settings for no data CMDs */
918 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
919 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
920 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
925 WARN_ON(!host->data);
927 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
928 mode = SDHCI_TRNS_BLK_CNT_EN;
930 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
931 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
933 * If we are sending CMD23, CMD12 never gets sent
934 * on successful completion (so no Auto-CMD12).
936 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
937 (cmd->opcode != SD_IO_RW_EXTENDED))
938 mode |= SDHCI_TRNS_AUTO_CMD12;
939 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
940 mode |= SDHCI_TRNS_AUTO_CMD23;
941 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
945 if (data->flags & MMC_DATA_READ)
946 mode |= SDHCI_TRNS_READ;
947 if (host->flags & SDHCI_REQ_USE_DMA)
948 mode |= SDHCI_TRNS_DMA;
950 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
953 static void sdhci_finish_data(struct sdhci_host *host)
955 struct mmc_data *data;
962 if (host->flags & SDHCI_REQ_USE_DMA) {
963 if (host->flags & SDHCI_USE_ADMA)
964 sdhci_adma_table_post(host, data);
966 if (data->host_cookie == COOKIE_MAPPED) {
967 dma_unmap_sg(mmc_dev(host->mmc),
968 data->sg, data->sg_len,
969 (data->flags & MMC_DATA_READ) ?
970 DMA_FROM_DEVICE : DMA_TO_DEVICE);
971 data->host_cookie = COOKIE_UNMAPPED;
977 * The specification states that the block count register must
978 * be updated, but it does not specify at what point in the
979 * data flow. That makes the register entirely useless to read
980 * back so we have to assume that nothing made it to the card
981 * in the event of an error.
984 data->bytes_xfered = 0;
986 data->bytes_xfered = data->blksz * data->blocks;
989 * Need to send CMD12 if -
990 * a) open-ended multiblock transfer (no CMD23)
991 * b) error in multiblock transfer
998 * The controller needs a reset of internal state machines
999 * upon error conditions.
1002 sdhci_do_reset(host, SDHCI_RESET_CMD);
1003 sdhci_do_reset(host, SDHCI_RESET_DATA);
1006 sdhci_send_command(host, data->stop);
1008 tasklet_schedule(&host->finish_tasklet);
1011 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1015 unsigned long timeout;
1019 /* Wait max 10 ms */
1022 mask = SDHCI_CMD_INHIBIT;
1023 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1024 mask |= SDHCI_DATA_INHIBIT;
1026 /* We shouldn't wait for data inihibit for stop commands, even
1027 though they might use busy signaling */
1028 if (host->mrq->data && (cmd == host->mrq->data->stop))
1029 mask &= ~SDHCI_DATA_INHIBIT;
1031 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1033 pr_err("%s: Controller never released "
1034 "inhibit bit(s).\n", mmc_hostname(host->mmc));
1035 sdhci_dumpregs(host);
1037 tasklet_schedule(&host->finish_tasklet);
1045 if (!cmd->data && cmd->busy_timeout > 9000)
1046 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1049 mod_timer(&host->timer, timeout);
1052 host->busy_handle = 0;
1054 sdhci_prepare_data(host, cmd);
1056 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1058 sdhci_set_transfer_mode(host, cmd);
1060 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1061 pr_err("%s: Unsupported response type!\n",
1062 mmc_hostname(host->mmc));
1063 cmd->error = -EINVAL;
1064 tasklet_schedule(&host->finish_tasklet);
1068 if (!(cmd->flags & MMC_RSP_PRESENT))
1069 flags = SDHCI_CMD_RESP_NONE;
1070 else if (cmd->flags & MMC_RSP_136)
1071 flags = SDHCI_CMD_RESP_LONG;
1072 else if (cmd->flags & MMC_RSP_BUSY)
1073 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1075 flags = SDHCI_CMD_RESP_SHORT;
1077 if (cmd->flags & MMC_RSP_CRC)
1078 flags |= SDHCI_CMD_CRC;
1079 if (cmd->flags & MMC_RSP_OPCODE)
1080 flags |= SDHCI_CMD_INDEX;
1082 /* CMD19 is special in that the Data Present Select should be set */
1083 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1084 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1085 flags |= SDHCI_CMD_DATA;
1087 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1089 EXPORT_SYMBOL_GPL(sdhci_send_command);
1091 static void sdhci_finish_command(struct sdhci_host *host)
1095 BUG_ON(host->cmd == NULL);
1097 if (host->cmd->flags & MMC_RSP_PRESENT) {
1098 if (host->cmd->flags & MMC_RSP_136) {
1099 /* CRC is stripped so we need to do some shifting. */
1100 for (i = 0;i < 4;i++) {
1101 host->cmd->resp[i] = sdhci_readl(host,
1102 SDHCI_RESPONSE + (3-i)*4) << 8;
1104 host->cmd->resp[i] |=
1106 SDHCI_RESPONSE + (3-i)*4-1);
1109 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1113 host->cmd->error = 0;
1115 /* Finished CMD23, now send actual command. */
1116 if (host->cmd == host->mrq->sbc) {
1118 sdhci_send_command(host, host->mrq->cmd);
1121 /* Processed actual command. */
1122 if (host->data && host->data_early)
1123 sdhci_finish_data(host);
1125 if (!host->cmd->data)
1126 tasklet_schedule(&host->finish_tasklet);
1132 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1136 switch (host->timing) {
1137 case MMC_TIMING_MMC_HS:
1138 case MMC_TIMING_SD_HS:
1139 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1141 case MMC_TIMING_UHS_SDR12:
1142 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1144 case MMC_TIMING_UHS_SDR25:
1145 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1147 case MMC_TIMING_UHS_SDR50:
1148 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1150 case MMC_TIMING_UHS_SDR104:
1151 case MMC_TIMING_MMC_HS200:
1152 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1154 case MMC_TIMING_UHS_DDR50:
1155 case MMC_TIMING_MMC_DDR52:
1156 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1158 case MMC_TIMING_MMC_HS400:
1159 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1162 pr_warn("%s: Invalid UHS-I mode selected\n",
1163 mmc_hostname(host->mmc));
1164 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1170 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1172 int div = 0; /* Initialized for compiler warning */
1173 int real_div = div, clk_mul = 1;
1175 unsigned long timeout;
1176 bool switch_base_clk = false;
1178 host->mmc->actual_clock = 0;
1180 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1181 if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
1187 if (host->version >= SDHCI_SPEC_300) {
1188 if (host->preset_enabled) {
1191 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1192 pre_val = sdhci_get_preset_value(host);
1193 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1194 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1195 if (host->clk_mul &&
1196 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1197 clk = SDHCI_PROG_CLOCK_MODE;
1199 clk_mul = host->clk_mul;
1201 real_div = max_t(int, 1, div << 1);
1207 * Check if the Host Controller supports Programmable Clock
1210 if (host->clk_mul) {
1211 for (div = 1; div <= 1024; div++) {
1212 if ((host->max_clk * host->clk_mul / div)
1216 if ((host->max_clk * host->clk_mul / div) <= clock) {
1218 * Set Programmable Clock Mode in the Clock
1221 clk = SDHCI_PROG_CLOCK_MODE;
1223 clk_mul = host->clk_mul;
1227 * Divisor can be too small to reach clock
1228 * speed requirement. Then use the base clock.
1230 switch_base_clk = true;
1234 if (!host->clk_mul || switch_base_clk) {
1235 /* Version 3.00 divisors must be a multiple of 2. */
1236 if (host->max_clk <= clock)
1239 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1241 if ((host->max_clk / div) <= clock)
1247 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1248 && !div && host->max_clk <= 25000000)
1252 /* Version 2.00 divisors must be a power of 2. */
1253 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1254 if ((host->max_clk / div) <= clock)
1263 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1264 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1265 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1266 << SDHCI_DIVIDER_HI_SHIFT;
1267 clk |= SDHCI_CLOCK_INT_EN;
1268 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1270 /* Wait max 20 ms */
1272 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1273 & SDHCI_CLOCK_INT_STABLE)) {
1275 pr_err("%s: Internal clock never "
1276 "stabilised.\n", mmc_hostname(host->mmc));
1277 sdhci_dumpregs(host);
1281 spin_unlock_irq(&host->lock);
1282 usleep_range(900, 1100);
1283 spin_lock_irq(&host->lock);
1286 clk |= SDHCI_CLOCK_CARD_EN;
1287 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1289 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1291 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1294 struct mmc_host *mmc = host->mmc;
1296 spin_unlock_irq(&host->lock);
1297 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1298 spin_lock_irq(&host->lock);
1300 if (mode != MMC_POWER_OFF)
1301 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1303 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1306 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1311 if (mode != MMC_POWER_OFF) {
1313 case MMC_VDD_165_195:
1314 pwr = SDHCI_POWER_180;
1318 pwr = SDHCI_POWER_300;
1322 pwr = SDHCI_POWER_330;
1325 WARN(1, "%s: Invalid vdd %#x\n",
1326 mmc_hostname(host->mmc), vdd);
1331 if (host->pwr == pwr)
1337 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1338 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1339 sdhci_runtime_pm_bus_off(host);
1342 * Spec says that we should clear the power reg before setting
1343 * a new value. Some controllers don't seem to like this though.
1345 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1346 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1349 * At least the Marvell CaFe chip gets confused if we set the
1350 * voltage and set turn on power at the same time, so set the
1353 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1354 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1356 pwr |= SDHCI_POWER_ON;
1358 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1360 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1361 sdhci_runtime_pm_bus_on(host);
1364 * Some controllers need an extra 10ms delay of 10ms before
1365 * they can apply clock after applying power
1367 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1371 EXPORT_SYMBOL_GPL(sdhci_set_power);
1373 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1376 struct mmc_host *mmc = host->mmc;
1378 if (host->ops->set_power)
1379 host->ops->set_power(host, mode, vdd);
1380 else if (!IS_ERR(mmc->supply.vmmc))
1381 sdhci_set_power_reg(host, mode, vdd);
1383 sdhci_set_power(host, mode, vdd);
1386 /*****************************************************************************\
1390 \*****************************************************************************/
1392 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1394 struct sdhci_host *host;
1396 unsigned long flags;
1398 host = mmc_priv(mmc);
1400 sdhci_runtime_pm_get(host);
1402 /* Firstly check card presence */
1403 present = mmc->ops->get_cd(mmc);
1405 spin_lock_irqsave(&host->lock, flags);
1407 WARN_ON(host->mrq != NULL);
1409 #ifndef SDHCI_USE_LEDS_CLASS
1410 sdhci_activate_led(host);
1414 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1415 * requests if Auto-CMD12 is enabled.
1417 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1419 mrq->data->stop = NULL;
1426 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1427 host->mrq->cmd->error = -ENOMEDIUM;
1428 tasklet_schedule(&host->finish_tasklet);
1430 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1431 sdhci_send_command(host, mrq->sbc);
1433 sdhci_send_command(host, mrq->cmd);
1437 spin_unlock_irqrestore(&host->lock, flags);
1440 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1444 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1445 if (width == MMC_BUS_WIDTH_8) {
1446 ctrl &= ~SDHCI_CTRL_4BITBUS;
1447 if (host->version >= SDHCI_SPEC_300)
1448 ctrl |= SDHCI_CTRL_8BITBUS;
1450 if (host->version >= SDHCI_SPEC_300)
1451 ctrl &= ~SDHCI_CTRL_8BITBUS;
1452 if (width == MMC_BUS_WIDTH_4)
1453 ctrl |= SDHCI_CTRL_4BITBUS;
1455 ctrl &= ~SDHCI_CTRL_4BITBUS;
1457 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1459 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1461 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1465 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1466 /* Select Bus Speed Mode for host */
1467 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1468 if ((timing == MMC_TIMING_MMC_HS200) ||
1469 (timing == MMC_TIMING_UHS_SDR104))
1470 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1471 else if (timing == MMC_TIMING_UHS_SDR12)
1472 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1473 else if (timing == MMC_TIMING_UHS_SDR25)
1474 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1475 else if (timing == MMC_TIMING_UHS_SDR50)
1476 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1477 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1478 (timing == MMC_TIMING_MMC_DDR52))
1479 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1480 else if (timing == MMC_TIMING_MMC_HS400)
1481 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1482 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1484 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1486 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1488 unsigned long flags;
1490 struct mmc_host *mmc = host->mmc;
1492 spin_lock_irqsave(&host->lock, flags);
1494 if (host->flags & SDHCI_DEVICE_DEAD) {
1495 spin_unlock_irqrestore(&host->lock, flags);
1496 if (!IS_ERR(mmc->supply.vmmc) &&
1497 ios->power_mode == MMC_POWER_OFF)
1498 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1503 * Reset the chip on each power off.
1504 * Should clear out any weird states.
1506 if (ios->power_mode == MMC_POWER_OFF) {
1507 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1511 if (host->version >= SDHCI_SPEC_300 &&
1512 (ios->power_mode == MMC_POWER_UP) &&
1513 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1514 sdhci_enable_preset_value(host, false);
1516 if (!ios->clock || ios->clock != host->clock) {
1517 host->ops->set_clock(host, ios->clock);
1518 host->clock = ios->clock;
1520 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1522 host->timeout_clk = host->mmc->actual_clock ?
1523 host->mmc->actual_clock / 1000 :
1525 host->mmc->max_busy_timeout =
1526 host->ops->get_max_timeout_count ?
1527 host->ops->get_max_timeout_count(host) :
1529 host->mmc->max_busy_timeout /= host->timeout_clk;
1533 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1535 if (host->ops->platform_send_init_74_clocks)
1536 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1538 host->ops->set_bus_width(host, ios->bus_width);
1540 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1542 if ((ios->timing == MMC_TIMING_SD_HS ||
1543 ios->timing == MMC_TIMING_MMC_HS)
1544 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1545 ctrl |= SDHCI_CTRL_HISPD;
1547 ctrl &= ~SDHCI_CTRL_HISPD;
1549 if (host->version >= SDHCI_SPEC_300) {
1552 /* In case of UHS-I modes, set High Speed Enable */
1553 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1554 (ios->timing == MMC_TIMING_MMC_HS200) ||
1555 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1556 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1557 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1558 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1559 (ios->timing == MMC_TIMING_UHS_SDR25))
1560 ctrl |= SDHCI_CTRL_HISPD;
1562 if (!host->preset_enabled) {
1563 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1565 * We only need to set Driver Strength if the
1566 * preset value enable is not set.
1568 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1569 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1570 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1571 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1572 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1573 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1574 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1575 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1576 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1577 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1579 pr_warn("%s: invalid driver type, default to "
1580 "driver type B\n", mmc_hostname(mmc));
1581 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1584 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1587 * According to SDHC Spec v3.00, if the Preset Value
1588 * Enable in the Host Control 2 register is set, we
1589 * need to reset SD Clock Enable before changing High
1590 * Speed Enable to avoid generating clock gliches.
1593 /* Reset SD Clock Enable */
1594 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1595 clk &= ~SDHCI_CLOCK_CARD_EN;
1596 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1598 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1600 /* Re-enable SD Clock */
1601 host->ops->set_clock(host, host->clock);
1604 /* Reset SD Clock Enable */
1605 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1606 clk &= ~SDHCI_CLOCK_CARD_EN;
1607 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1609 host->ops->set_uhs_signaling(host, ios->timing);
1610 host->timing = ios->timing;
1612 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1613 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1614 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1615 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1616 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1617 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1618 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1621 sdhci_enable_preset_value(host, true);
1622 preset = sdhci_get_preset_value(host);
1623 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1624 >> SDHCI_PRESET_DRV_SHIFT;
1627 /* Re-enable SD Clock */
1628 host->ops->set_clock(host, host->clock);
1630 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1633 * Some (ENE) controllers go apeshit on some ios operation,
1634 * signalling timeout and CRC errors even on CMD0. Resetting
1635 * it on each ios seems to solve the problem.
1637 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1638 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1641 spin_unlock_irqrestore(&host->lock, flags);
1644 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1646 struct sdhci_host *host = mmc_priv(mmc);
1648 sdhci_runtime_pm_get(host);
1649 sdhci_do_set_ios(host, ios);
1650 sdhci_runtime_pm_put(host);
1653 static int sdhci_do_get_cd(struct sdhci_host *host)
1655 int gpio_cd = mmc_gpio_get_cd(host->mmc);
1657 if (host->flags & SDHCI_DEVICE_DEAD)
1660 /* If nonremovable, assume that the card is always present. */
1661 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1665 * Try slot gpio detect, if defined it take precedence
1666 * over build in controller functionality
1668 if (!IS_ERR_VALUE(gpio_cd))
1671 /* If polling, assume that the card is always present. */
1672 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1675 /* Host native card detect */
1676 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1679 static int sdhci_get_cd(struct mmc_host *mmc)
1681 struct sdhci_host *host = mmc_priv(mmc);
1684 sdhci_runtime_pm_get(host);
1685 ret = sdhci_do_get_cd(host);
1686 sdhci_runtime_pm_put(host);
1690 static int sdhci_check_ro(struct sdhci_host *host)
1692 unsigned long flags;
1695 spin_lock_irqsave(&host->lock, flags);
1697 if (host->flags & SDHCI_DEVICE_DEAD)
1699 else if (host->ops->get_ro)
1700 is_readonly = host->ops->get_ro(host);
1702 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1703 & SDHCI_WRITE_PROTECT);
1705 spin_unlock_irqrestore(&host->lock, flags);
1707 /* This quirk needs to be replaced by a callback-function later */
1708 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1709 !is_readonly : is_readonly;
1712 #define SAMPLE_COUNT 5
1714 static int sdhci_do_get_ro(struct sdhci_host *host)
1718 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1719 return sdhci_check_ro(host);
1722 for (i = 0; i < SAMPLE_COUNT; i++) {
1723 if (sdhci_check_ro(host)) {
1724 if (++ro_count > SAMPLE_COUNT / 2)
1732 static void sdhci_hw_reset(struct mmc_host *mmc)
1734 struct sdhci_host *host = mmc_priv(mmc);
1736 if (host->ops && host->ops->hw_reset)
1737 host->ops->hw_reset(host);
1740 static int sdhci_get_ro(struct mmc_host *mmc)
1742 struct sdhci_host *host = mmc_priv(mmc);
1745 sdhci_runtime_pm_get(host);
1746 ret = sdhci_do_get_ro(host);
1747 sdhci_runtime_pm_put(host);
1751 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1753 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1755 host->ier |= SDHCI_INT_CARD_INT;
1757 host->ier &= ~SDHCI_INT_CARD_INT;
1759 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1760 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1765 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1767 struct sdhci_host *host = mmc_priv(mmc);
1768 unsigned long flags;
1770 sdhci_runtime_pm_get(host);
1772 spin_lock_irqsave(&host->lock, flags);
1774 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1776 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1778 sdhci_enable_sdio_irq_nolock(host, enable);
1779 spin_unlock_irqrestore(&host->lock, flags);
1781 sdhci_runtime_pm_put(host);
1784 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1785 struct mmc_ios *ios)
1787 struct mmc_host *mmc = host->mmc;
1792 * Signal Voltage Switching is only applicable for Host Controllers
1795 if (host->version < SDHCI_SPEC_300)
1798 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1800 switch (ios->signal_voltage) {
1801 case MMC_SIGNAL_VOLTAGE_330:
1802 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1803 ctrl &= ~SDHCI_CTRL_VDD_180;
1804 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1806 if (!IS_ERR(mmc->supply.vqmmc)) {
1807 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1810 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1816 usleep_range(5000, 5500);
1818 /* 3.3V regulator output should be stable within 5 ms */
1819 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1820 if (!(ctrl & SDHCI_CTRL_VDD_180))
1823 pr_warn("%s: 3.3V regulator output did not became stable\n",
1827 case MMC_SIGNAL_VOLTAGE_180:
1828 if (!IS_ERR(mmc->supply.vqmmc)) {
1829 ret = regulator_set_voltage(mmc->supply.vqmmc,
1832 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1839 * Enable 1.8V Signal Enable in the Host Control2
1842 ctrl |= SDHCI_CTRL_VDD_180;
1843 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1845 /* Some controller need to do more when switching */
1846 if (host->ops->voltage_switch)
1847 host->ops->voltage_switch(host);
1849 /* 1.8V regulator output should be stable within 5 ms */
1850 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1851 if (ctrl & SDHCI_CTRL_VDD_180)
1854 pr_warn("%s: 1.8V regulator output did not became stable\n",
1858 case MMC_SIGNAL_VOLTAGE_120:
1859 if (!IS_ERR(mmc->supply.vqmmc)) {
1860 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1863 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1870 /* No signal voltage switch required */
1875 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1876 struct mmc_ios *ios)
1878 struct sdhci_host *host = mmc_priv(mmc);
1881 if (host->version < SDHCI_SPEC_300)
1883 sdhci_runtime_pm_get(host);
1884 err = sdhci_do_start_signal_voltage_switch(host, ios);
1885 sdhci_runtime_pm_put(host);
1889 static int sdhci_card_busy(struct mmc_host *mmc)
1891 struct sdhci_host *host = mmc_priv(mmc);
1894 sdhci_runtime_pm_get(host);
1895 /* Check whether DAT[3:0] is 0000 */
1896 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1897 sdhci_runtime_pm_put(host);
1899 return !(present_state & SDHCI_DATA_LVL_MASK);
1902 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1904 struct sdhci_host *host = mmc_priv(mmc);
1905 unsigned long flags;
1907 spin_lock_irqsave(&host->lock, flags);
1908 host->flags |= SDHCI_HS400_TUNING;
1909 spin_unlock_irqrestore(&host->lock, flags);
1914 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1916 struct sdhci_host *host = mmc_priv(mmc);
1918 int tuning_loop_counter = MAX_TUNING_LOOP;
1920 unsigned long flags;
1921 unsigned int tuning_count = 0;
1924 sdhci_runtime_pm_get(host);
1925 spin_lock_irqsave(&host->lock, flags);
1927 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1928 host->flags &= ~SDHCI_HS400_TUNING;
1930 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1931 tuning_count = host->tuning_count;
1934 * The Host Controller needs tuning in case of SDR104 and DDR50
1935 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1936 * the Capabilities register.
1937 * If the Host Controller supports the HS200 mode then the
1938 * tuning function has to be executed.
1940 switch (host->timing) {
1941 /* HS400 tuning is done in HS200 mode */
1942 case MMC_TIMING_MMC_HS400:
1946 case MMC_TIMING_MMC_HS200:
1948 * Periodic re-tuning for HS400 is not expected to be needed, so
1955 case MMC_TIMING_UHS_SDR104:
1956 case MMC_TIMING_UHS_DDR50:
1959 case MMC_TIMING_UHS_SDR50:
1960 if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1961 host->flags & SDHCI_SDR104_NEEDS_TUNING)
1969 if (host->ops->platform_execute_tuning) {
1970 spin_unlock_irqrestore(&host->lock, flags);
1971 err = host->ops->platform_execute_tuning(host, opcode);
1972 sdhci_runtime_pm_put(host);
1976 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1977 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1978 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1979 ctrl |= SDHCI_CTRL_TUNED_CLK;
1980 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1983 * As per the Host Controller spec v3.00, tuning command
1984 * generates Buffer Read Ready interrupt, so enable that.
1986 * Note: The spec clearly says that when tuning sequence
1987 * is being performed, the controller does not generate
1988 * interrupts other than Buffer Read Ready interrupt. But
1989 * to make sure we don't hit a controller bug, we _only_
1990 * enable Buffer Read Ready interrupt here.
1992 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1993 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1996 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1997 * of loops reaches 40 times or a timeout of 150ms occurs.
2000 struct mmc_command cmd = {0};
2001 struct mmc_request mrq = {NULL};
2003 cmd.opcode = opcode;
2005 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2010 if (tuning_loop_counter-- == 0)
2017 * In response to CMD19, the card sends 64 bytes of tuning
2018 * block to the Host Controller. So we set the block size
2021 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2022 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2023 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
2025 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
2026 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2029 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2034 * The tuning block is sent by the card to the host controller.
2035 * So we set the TRNS_READ bit in the Transfer Mode register.
2036 * This also takes care of setting DMA Enable and Multi Block
2037 * Select in the same register to 0.
2039 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2041 sdhci_send_command(host, &cmd);
2046 spin_unlock_irqrestore(&host->lock, flags);
2047 /* Wait for Buffer Read Ready interrupt */
2048 wait_event_interruptible_timeout(host->buf_ready_int,
2049 (host->tuning_done == 1),
2050 msecs_to_jiffies(50));
2051 spin_lock_irqsave(&host->lock, flags);
2053 if (!host->tuning_done) {
2054 pr_debug(DRIVER_NAME ": Timeout waiting for "
2055 "Buffer Read Ready interrupt during tuning "
2056 "procedure, falling back to fixed sampling "
2058 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2059 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2060 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2061 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2063 sdhci_do_reset(host, SDHCI_RESET_CMD);
2064 sdhci_do_reset(host, SDHCI_RESET_DATA);
2068 if (cmd.opcode != MMC_SEND_TUNING_BLOCK_HS200)
2071 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2072 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2074 spin_unlock_irqrestore(&host->lock, flags);
2076 memset(&cmd, 0, sizeof(cmd));
2077 cmd.opcode = MMC_STOP_TRANSMISSION;
2078 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2079 cmd.busy_timeout = 50;
2080 mmc_wait_for_cmd(mmc, &cmd, 0);
2082 spin_lock_irqsave(&host->lock, flags);
2087 host->tuning_done = 0;
2089 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2091 /* eMMC spec does not require a delay between tuning cycles */
2092 if (opcode == MMC_SEND_TUNING_BLOCK)
2094 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2097 * The Host Driver has exhausted the maximum number of loops allowed,
2098 * so use fixed sampling frequency.
2100 if (tuning_loop_counter < 0) {
2101 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2102 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2104 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2105 pr_info(DRIVER_NAME ": Tuning procedure"
2106 " failed, falling back to fixed sampling"
2114 * In case tuning fails, host controllers which support
2115 * re-tuning can try tuning again at a later time, when the
2116 * re-tuning timer expires. So for these controllers, we
2117 * return 0. Since there might be other controllers who do not
2118 * have this capability, we return error for them.
2123 host->mmc->retune_period = err ? 0 : tuning_count;
2125 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2126 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2128 spin_unlock_irqrestore(&host->lock, flags);
2129 sdhci_runtime_pm_put(host);
2134 static int sdhci_select_drive_strength(struct mmc_card *card,
2135 unsigned int max_dtr, int host_drv,
2136 int card_drv, int *drv_type)
2138 struct sdhci_host *host = mmc_priv(card->host);
2140 if (!host->ops->select_drive_strength)
2143 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2144 card_drv, drv_type);
2147 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2149 /* Host Controller v3.00 defines preset value registers */
2150 if (host->version < SDHCI_SPEC_300)
2154 * We only enable or disable Preset Value if they are not already
2155 * enabled or disabled respectively. Otherwise, we bail out.
2157 if (host->preset_enabled != enable) {
2158 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2161 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2163 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2165 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2168 host->flags |= SDHCI_PV_ENABLED;
2170 host->flags &= ~SDHCI_PV_ENABLED;
2172 host->preset_enabled = enable;
2176 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2179 struct sdhci_host *host = mmc_priv(mmc);
2180 struct mmc_data *data = mrq->data;
2182 if (host->flags & SDHCI_REQ_USE_DMA) {
2183 if (data->host_cookie == COOKIE_GIVEN ||
2184 data->host_cookie == COOKIE_MAPPED)
2185 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2186 data->flags & MMC_DATA_WRITE ?
2187 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2188 data->host_cookie = COOKIE_UNMAPPED;
2192 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
2193 struct mmc_data *data)
2197 if (data->host_cookie == COOKIE_MAPPED) {
2198 data->host_cookie = COOKIE_GIVEN;
2199 return data->sg_count;
2202 WARN_ON(data->host_cookie == COOKIE_GIVEN);
2204 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2205 data->flags & MMC_DATA_WRITE ?
2206 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2211 data->sg_count = sg_count;
2212 data->host_cookie = COOKIE_MAPPED;
2217 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2220 struct sdhci_host *host = mmc_priv(mmc);
2222 mrq->data->host_cookie = COOKIE_UNMAPPED;
2224 if (host->flags & SDHCI_REQ_USE_DMA)
2225 sdhci_pre_dma_transfer(host, mrq->data);
2228 static void sdhci_card_event(struct mmc_host *mmc)
2230 struct sdhci_host *host = mmc_priv(mmc);
2231 unsigned long flags;
2234 /* First check if client has provided their own card event */
2235 if (host->ops->card_event)
2236 host->ops->card_event(host);
2238 present = sdhci_do_get_cd(host);
2240 spin_lock_irqsave(&host->lock, flags);
2242 /* Check host->mrq first in case we are runtime suspended */
2243 if (host->mrq && !present) {
2244 pr_err("%s: Card removed during transfer!\n",
2245 mmc_hostname(host->mmc));
2246 pr_err("%s: Resetting controller.\n",
2247 mmc_hostname(host->mmc));
2249 sdhci_do_reset(host, SDHCI_RESET_CMD);
2250 sdhci_do_reset(host, SDHCI_RESET_DATA);
2252 host->mrq->cmd->error = -ENOMEDIUM;
2253 tasklet_schedule(&host->finish_tasklet);
2256 spin_unlock_irqrestore(&host->lock, flags);
2259 static const struct mmc_host_ops sdhci_ops = {
2260 .request = sdhci_request,
2261 .post_req = sdhci_post_req,
2262 .pre_req = sdhci_pre_req,
2263 .set_ios = sdhci_set_ios,
2264 .get_cd = sdhci_get_cd,
2265 .get_ro = sdhci_get_ro,
2266 .hw_reset = sdhci_hw_reset,
2267 .enable_sdio_irq = sdhci_enable_sdio_irq,
2268 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2269 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2270 .execute_tuning = sdhci_execute_tuning,
2271 .select_drive_strength = sdhci_select_drive_strength,
2272 .card_event = sdhci_card_event,
2273 .card_busy = sdhci_card_busy,
2276 /*****************************************************************************\
2280 \*****************************************************************************/
2282 static void sdhci_tasklet_finish(unsigned long param)
2284 struct sdhci_host *host;
2285 unsigned long flags;
2286 struct mmc_request *mrq;
2288 host = (struct sdhci_host*)param;
2290 spin_lock_irqsave(&host->lock, flags);
2293 * If this tasklet gets rescheduled while running, it will
2294 * be run again afterwards but without any active request.
2297 spin_unlock_irqrestore(&host->lock, flags);
2301 del_timer(&host->timer);
2306 * The controller needs a reset of internal state machines
2307 * upon error conditions.
2309 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2310 ((mrq->cmd && mrq->cmd->error) ||
2311 (mrq->sbc && mrq->sbc->error) ||
2312 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2313 (mrq->data->stop && mrq->data->stop->error))) ||
2314 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2316 /* Some controllers need this kick or reset won't work here */
2317 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2318 /* This is to force an update */
2319 host->ops->set_clock(host, host->clock);
2321 /* Spec says we should do both at the same time, but Ricoh
2322 controllers do not like that. */
2323 sdhci_do_reset(host, SDHCI_RESET_CMD);
2324 sdhci_do_reset(host, SDHCI_RESET_DATA);
2331 #ifndef SDHCI_USE_LEDS_CLASS
2332 sdhci_deactivate_led(host);
2336 spin_unlock_irqrestore(&host->lock, flags);
2338 mmc_request_done(host->mmc, mrq);
2339 sdhci_runtime_pm_put(host);
2342 static void sdhci_timeout_timer(unsigned long data)
2344 struct sdhci_host *host;
2345 unsigned long flags;
2347 host = (struct sdhci_host*)data;
2349 spin_lock_irqsave(&host->lock, flags);
2352 pr_err("%s: Timeout waiting for hardware "
2353 "interrupt.\n", mmc_hostname(host->mmc));
2354 sdhci_dumpregs(host);
2357 host->data->error = -ETIMEDOUT;
2358 sdhci_finish_data(host);
2361 host->cmd->error = -ETIMEDOUT;
2363 host->mrq->cmd->error = -ETIMEDOUT;
2365 tasklet_schedule(&host->finish_tasklet);
2370 spin_unlock_irqrestore(&host->lock, flags);
2373 /*****************************************************************************\
2375 * Interrupt handling *
2377 \*****************************************************************************/
2379 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2381 BUG_ON(intmask == 0);
2384 pr_err("%s: Got command interrupt 0x%08x even "
2385 "though no command operation was in progress.\n",
2386 mmc_hostname(host->mmc), (unsigned)intmask);
2387 sdhci_dumpregs(host);
2391 if (intmask & SDHCI_INT_TIMEOUT)
2392 host->cmd->error = -ETIMEDOUT;
2393 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
2395 host->cmd->error = -EILSEQ;
2397 if (host->cmd->error) {
2398 tasklet_schedule(&host->finish_tasklet);
2403 * The host can send and interrupt when the busy state has
2404 * ended, allowing us to wait without wasting CPU cycles.
2405 * Unfortunately this is overloaded on the "data complete"
2406 * interrupt, so we need to take some care when handling
2409 * Note: The 1.0 specification is a bit ambiguous about this
2410 * feature so there might be some problems with older
2413 if (host->cmd->flags & MMC_RSP_BUSY) {
2414 if (host->cmd->data)
2415 DBG("Cannot wait for busy signal when also "
2416 "doing a data transfer");
2417 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2418 && !host->busy_handle) {
2419 /* Mark that command complete before busy is ended */
2420 host->busy_handle = 1;
2424 /* The controller does not support the end-of-busy IRQ,
2425 * fall through and take the SDHCI_INT_RESPONSE */
2426 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2427 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2428 *mask &= ~SDHCI_INT_DATA_END;
2431 if (intmask & SDHCI_INT_RESPONSE)
2432 sdhci_finish_command(host);
2435 #ifdef CONFIG_MMC_DEBUG
2436 static void sdhci_adma_show_error(struct sdhci_host *host)
2438 const char *name = mmc_hostname(host->mmc);
2439 void *desc = host->adma_table;
2441 sdhci_dumpregs(host);
2444 struct sdhci_adma2_64_desc *dma_desc = desc;
2446 if (host->flags & SDHCI_USE_64_BIT_DMA)
2447 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2448 name, desc, le32_to_cpu(dma_desc->addr_hi),
2449 le32_to_cpu(dma_desc->addr_lo),
2450 le16_to_cpu(dma_desc->len),
2451 le16_to_cpu(dma_desc->cmd));
2453 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2454 name, desc, le32_to_cpu(dma_desc->addr_lo),
2455 le16_to_cpu(dma_desc->len),
2456 le16_to_cpu(dma_desc->cmd));
2458 desc += host->desc_sz;
2460 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2465 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2468 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2471 BUG_ON(intmask == 0);
2473 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2474 if (intmask & SDHCI_INT_DATA_AVAIL) {
2475 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2476 if (command == MMC_SEND_TUNING_BLOCK ||
2477 command == MMC_SEND_TUNING_BLOCK_HS200) {
2478 host->tuning_done = 1;
2479 wake_up(&host->buf_ready_int);
2486 * The "data complete" interrupt is also used to
2487 * indicate that a busy state has ended. See comment
2488 * above in sdhci_cmd_irq().
2490 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2491 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2492 host->cmd->error = -ETIMEDOUT;
2493 tasklet_schedule(&host->finish_tasklet);
2496 if (intmask & SDHCI_INT_DATA_END) {
2498 * Some cards handle busy-end interrupt
2499 * before the command completed, so make
2500 * sure we do things in the proper order.
2502 if (host->busy_handle)
2503 sdhci_finish_command(host);
2505 host->busy_handle = 1;
2510 pr_err("%s: Got data interrupt 0x%08x even "
2511 "though no data operation was in progress.\n",
2512 mmc_hostname(host->mmc), (unsigned)intmask);
2513 sdhci_dumpregs(host);
2518 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2519 host->data->error = -ETIMEDOUT;
2520 else if (intmask & SDHCI_INT_DATA_END_BIT)
2521 host->data->error = -EILSEQ;
2522 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2523 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2525 host->data->error = -EILSEQ;
2526 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2527 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2528 sdhci_adma_show_error(host);
2529 host->data->error = -EIO;
2530 if (host->ops->adma_workaround)
2531 host->ops->adma_workaround(host, intmask);
2534 if (host->data->error)
2535 sdhci_finish_data(host);
2537 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2538 sdhci_transfer_pio(host);
2541 * We currently don't do anything fancy with DMA
2542 * boundaries, but as we can't disable the feature
2543 * we need to at least restart the transfer.
2545 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2546 * should return a valid address to continue from, but as
2547 * some controllers are faulty, don't trust them.
2549 if (intmask & SDHCI_INT_DMA_END) {
2550 u32 dmastart, dmanow;
2551 dmastart = sg_dma_address(host->data->sg);
2552 dmanow = dmastart + host->data->bytes_xfered;
2554 * Force update to the next DMA block boundary.
2557 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2558 SDHCI_DEFAULT_BOUNDARY_SIZE;
2559 host->data->bytes_xfered = dmanow - dmastart;
2560 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2562 mmc_hostname(host->mmc), dmastart,
2563 host->data->bytes_xfered, dmanow);
2564 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2567 if (intmask & SDHCI_INT_DATA_END) {
2570 * Data managed to finish before the
2571 * command completed. Make sure we do
2572 * things in the proper order.
2574 host->data_early = 1;
2576 sdhci_finish_data(host);
2582 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2584 irqreturn_t result = IRQ_NONE;
2585 struct sdhci_host *host = dev_id;
2586 u32 intmask, mask, unexpected = 0;
2589 spin_lock(&host->lock);
2591 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2592 spin_unlock(&host->lock);
2596 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2597 if (!intmask || intmask == 0xffffffff) {
2603 /* Clear selected interrupts. */
2604 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2605 SDHCI_INT_BUS_POWER);
2606 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2608 DBG("*** %s got interrupt: 0x%08x\n",
2609 mmc_hostname(host->mmc), intmask);
2611 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2612 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2616 * There is a observation on i.mx esdhc. INSERT
2617 * bit will be immediately set again when it gets
2618 * cleared, if a card is inserted. We have to mask
2619 * the irq to prevent interrupt storm which will
2620 * freeze the system. And the REMOVE gets the
2623 * More testing are needed here to ensure it works
2624 * for other platforms though.
2626 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2627 SDHCI_INT_CARD_REMOVE);
2628 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2629 SDHCI_INT_CARD_INSERT;
2630 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2631 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2633 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2634 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2636 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2637 SDHCI_INT_CARD_REMOVE);
2638 result = IRQ_WAKE_THREAD;
2641 if (intmask & SDHCI_INT_CMD_MASK)
2642 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2645 if (intmask & SDHCI_INT_DATA_MASK)
2646 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2648 if (intmask & SDHCI_INT_BUS_POWER)
2649 pr_err("%s: Card is consuming too much power!\n",
2650 mmc_hostname(host->mmc));
2652 if ((intmask & SDHCI_INT_CARD_INT) &&
2653 (host->ier & SDHCI_INT_CARD_INT)) {
2654 sdhci_enable_sdio_irq_nolock(host, false);
2655 host->thread_isr |= SDHCI_INT_CARD_INT;
2656 result = IRQ_WAKE_THREAD;
2659 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2660 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2661 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2662 SDHCI_INT_CARD_INT);
2665 unexpected |= intmask;
2666 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2669 if (result == IRQ_NONE)
2670 result = IRQ_HANDLED;
2672 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2673 } while (intmask && --max_loops);
2675 spin_unlock(&host->lock);
2678 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2679 mmc_hostname(host->mmc), unexpected);
2680 sdhci_dumpregs(host);
2686 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2688 struct sdhci_host *host = dev_id;
2689 unsigned long flags;
2692 spin_lock_irqsave(&host->lock, flags);
2693 isr = host->thread_isr;
2694 host->thread_isr = 0;
2695 spin_unlock_irqrestore(&host->lock, flags);
2697 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2698 sdhci_card_event(host->mmc);
2699 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2702 if (isr & SDHCI_INT_CARD_INT) {
2703 sdio_run_irqs(host->mmc);
2705 spin_lock_irqsave(&host->lock, flags);
2706 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2707 sdhci_enable_sdio_irq_nolock(host, true);
2708 spin_unlock_irqrestore(&host->lock, flags);
2711 return isr ? IRQ_HANDLED : IRQ_NONE;
2714 /*****************************************************************************\
2718 \*****************************************************************************/
2721 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2724 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2725 | SDHCI_WAKE_ON_INT;
2727 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2729 /* Avoid fake wake up */
2730 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2731 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2732 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2734 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2736 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2739 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2740 | SDHCI_WAKE_ON_INT;
2742 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2744 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2747 int sdhci_suspend_host(struct sdhci_host *host)
2749 sdhci_disable_card_detection(host);
2751 mmc_retune_timer_stop(host->mmc);
2752 mmc_retune_needed(host->mmc);
2754 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2756 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2757 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2758 free_irq(host->irq, host);
2760 sdhci_enable_irq_wakeups(host);
2761 enable_irq_wake(host->irq);
2766 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2768 int sdhci_resume_host(struct sdhci_host *host)
2772 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2773 if (host->ops->enable_dma)
2774 host->ops->enable_dma(host);
2777 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2778 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2779 /* Card keeps power but host controller does not */
2780 sdhci_init(host, 0);
2783 sdhci_do_set_ios(host, &host->mmc->ios);
2785 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2789 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2790 ret = request_threaded_irq(host->irq, sdhci_irq,
2791 sdhci_thread_irq, IRQF_SHARED,
2792 mmc_hostname(host->mmc), host);
2796 sdhci_disable_irq_wakeups(host);
2797 disable_irq_wake(host->irq);
2800 sdhci_enable_card_detection(host);
2805 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2807 static int sdhci_runtime_pm_get(struct sdhci_host *host)
2809 return pm_runtime_get_sync(host->mmc->parent);
2812 static int sdhci_runtime_pm_put(struct sdhci_host *host)
2814 pm_runtime_mark_last_busy(host->mmc->parent);
2815 return pm_runtime_put_autosuspend(host->mmc->parent);
2818 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2822 host->bus_on = true;
2823 pm_runtime_get_noresume(host->mmc->parent);
2826 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2830 host->bus_on = false;
2831 pm_runtime_put_noidle(host->mmc->parent);
2834 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2836 unsigned long flags;
2838 mmc_retune_timer_stop(host->mmc);
2839 mmc_retune_needed(host->mmc);
2841 spin_lock_irqsave(&host->lock, flags);
2842 host->ier &= SDHCI_INT_CARD_INT;
2843 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2844 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2845 spin_unlock_irqrestore(&host->lock, flags);
2847 synchronize_hardirq(host->irq);
2849 spin_lock_irqsave(&host->lock, flags);
2850 host->runtime_suspended = true;
2851 spin_unlock_irqrestore(&host->lock, flags);
2855 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2857 int sdhci_runtime_resume_host(struct sdhci_host *host)
2859 unsigned long flags;
2860 int host_flags = host->flags;
2862 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2863 if (host->ops->enable_dma)
2864 host->ops->enable_dma(host);
2867 sdhci_init(host, 0);
2869 /* Force clock and power re-program */
2872 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2873 sdhci_do_set_ios(host, &host->mmc->ios);
2875 if ((host_flags & SDHCI_PV_ENABLED) &&
2876 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2877 spin_lock_irqsave(&host->lock, flags);
2878 sdhci_enable_preset_value(host, true);
2879 spin_unlock_irqrestore(&host->lock, flags);
2882 spin_lock_irqsave(&host->lock, flags);
2884 host->runtime_suspended = false;
2886 /* Enable SDIO IRQ */
2887 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2888 sdhci_enable_sdio_irq_nolock(host, true);
2890 /* Enable Card Detection */
2891 sdhci_enable_card_detection(host);
2893 spin_unlock_irqrestore(&host->lock, flags);
2897 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2899 #endif /* CONFIG_PM */
2901 /*****************************************************************************\
2903 * Device allocation/registration *
2905 \*****************************************************************************/
2907 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2910 struct mmc_host *mmc;
2911 struct sdhci_host *host;
2913 WARN_ON(dev == NULL);
2915 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2917 return ERR_PTR(-ENOMEM);
2919 host = mmc_priv(mmc);
2921 host->mmc_host_ops = sdhci_ops;
2922 mmc->ops = &host->mmc_host_ops;
2927 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2929 int sdhci_add_host(struct sdhci_host *host)
2931 struct mmc_host *mmc;
2932 u32 caps[2] = {0, 0};
2933 u32 max_current_caps;
2934 unsigned int ocr_avail;
2935 unsigned int override_timeout_clk;
2939 WARN_ON(host == NULL);
2946 host->quirks = debug_quirks;
2948 host->quirks2 = debug_quirks2;
2950 override_timeout_clk = host->timeout_clk;
2952 sdhci_do_reset(host, SDHCI_RESET_ALL);
2954 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2955 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2956 >> SDHCI_SPEC_VER_SHIFT;
2957 if (host->version > SDHCI_SPEC_300) {
2958 pr_err("%s: Unknown controller version (%d). "
2959 "You may experience problems.\n", mmc_hostname(mmc),
2963 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2964 sdhci_readl(host, SDHCI_CAPABILITIES);
2966 if (host->version >= SDHCI_SPEC_300)
2967 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2969 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2971 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2972 host->flags |= SDHCI_USE_SDMA;
2973 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2974 DBG("Controller doesn't have SDMA capability\n");
2976 host->flags |= SDHCI_USE_SDMA;
2978 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2979 (host->flags & SDHCI_USE_SDMA)) {
2980 DBG("Disabling DMA as it is marked broken\n");
2981 host->flags &= ~SDHCI_USE_SDMA;
2984 if ((host->version >= SDHCI_SPEC_200) &&
2985 (caps[0] & SDHCI_CAN_DO_ADMA2))
2986 host->flags |= SDHCI_USE_ADMA;
2988 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2989 (host->flags & SDHCI_USE_ADMA)) {
2990 DBG("Disabling ADMA as it is marked broken\n");
2991 host->flags &= ~SDHCI_USE_ADMA;
2995 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2996 * and *must* do 64-bit DMA. A driver has the opportunity to change
2997 * that during the first call to ->enable_dma(). Similarly
2998 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3001 if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
3002 host->flags |= SDHCI_USE_64_BIT_DMA;
3004 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3005 if (host->ops->enable_dma) {
3006 if (host->ops->enable_dma(host)) {
3007 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3010 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3015 /* SDMA does not support 64-bit DMA */
3016 if (host->flags & SDHCI_USE_64_BIT_DMA)
3017 host->flags &= ~SDHCI_USE_SDMA;
3019 if (host->flags & SDHCI_USE_ADMA) {
3021 * The DMA descriptor table size is calculated as the maximum
3022 * number of segments times 2, to allow for an alignment
3023 * descriptor for each segment, plus 1 for a nop end descriptor,
3024 * all multipled by the descriptor size.
3026 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3027 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3028 SDHCI_ADMA2_64_DESC_SZ;
3029 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3031 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3032 SDHCI_ADMA2_32_DESC_SZ;
3033 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3035 host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
3036 host->adma_table_sz,
3039 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3040 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
3041 if (!host->adma_table || !host->align_buffer) {
3042 if (host->adma_table)
3043 dma_free_coherent(mmc_dev(mmc),
3044 host->adma_table_sz,
3047 kfree(host->align_buffer);
3048 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3050 host->flags &= ~SDHCI_USE_ADMA;
3051 host->adma_table = NULL;
3052 host->align_buffer = NULL;
3053 } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3054 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3056 host->flags &= ~SDHCI_USE_ADMA;
3057 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3058 host->adma_table, host->adma_addr);
3059 kfree(host->align_buffer);
3060 host->adma_table = NULL;
3061 host->align_buffer = NULL;
3066 * If we use DMA, then it's up to the caller to set the DMA
3067 * mask, but PIO does not need the hw shim so we set a new
3068 * mask here in that case.
3070 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3071 host->dma_mask = DMA_BIT_MASK(64);
3072 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3075 if (host->version >= SDHCI_SPEC_300)
3076 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
3077 >> SDHCI_CLOCK_BASE_SHIFT;
3079 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
3080 >> SDHCI_CLOCK_BASE_SHIFT;
3082 host->max_clk *= 1000000;
3083 if (host->max_clk == 0 || host->quirks &
3084 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3085 if (!host->ops->get_max_clock) {
3086 pr_err("%s: Hardware doesn't specify base clock "
3087 "frequency.\n", mmc_hostname(mmc));
3090 host->max_clk = host->ops->get_max_clock(host);
3094 * In case of Host Controller v3.00, find out whether clock
3095 * multiplier is supported.
3097 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
3098 SDHCI_CLOCK_MUL_SHIFT;
3101 * In case the value in Clock Multiplier is 0, then programmable
3102 * clock mode is not supported, otherwise the actual clock
3103 * multiplier is one more than the value of Clock Multiplier
3104 * in the Capabilities Register.
3110 * Set host parameters.
3112 max_clk = host->max_clk;
3114 if (host->ops->get_min_clock)
3115 mmc->f_min = host->ops->get_min_clock(host);
3116 else if (host->version >= SDHCI_SPEC_300) {
3118 max_clk = host->max_clk * host->clk_mul;
3120 * Divided Clock Mode minimum clock rate is always less than
3121 * Programmable Clock Mode minimum clock rate.
3123 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3125 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3127 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3128 mmc->f_max = max_clk;
3130 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3131 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3132 SDHCI_TIMEOUT_CLK_SHIFT;
3133 if (host->timeout_clk == 0) {
3134 if (host->ops->get_timeout_clock) {
3136 host->ops->get_timeout_clock(host);
3138 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3144 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
3145 host->timeout_clk *= 1000;
3147 if (override_timeout_clk)
3148 host->timeout_clk = override_timeout_clk;
3150 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3151 host->ops->get_max_timeout_count(host) : 1 << 27;
3152 mmc->max_busy_timeout /= host->timeout_clk;
3155 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3156 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3158 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3159 host->flags |= SDHCI_AUTO_CMD12;
3161 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3162 if ((host->version >= SDHCI_SPEC_300) &&
3163 ((host->flags & SDHCI_USE_ADMA) ||
3164 !(host->flags & SDHCI_USE_SDMA)) &&
3165 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3166 host->flags |= SDHCI_AUTO_CMD23;
3167 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3169 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3173 * A controller may support 8-bit width, but the board itself
3174 * might not have the pins brought out. Boards that support
3175 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3176 * their platform code before calling sdhci_add_host(), and we
3177 * won't assume 8-bit width for hosts without that CAP.
3179 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3180 mmc->caps |= MMC_CAP_4_BIT_DATA;
3182 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3183 mmc->caps &= ~MMC_CAP_CMD23;
3185 if (caps[0] & SDHCI_CAN_DO_HISPD)
3186 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3188 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3189 !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
3190 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
3191 mmc->caps |= MMC_CAP_NEEDS_POLL;
3193 /* If there are external regulators, get them */
3194 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
3195 return -EPROBE_DEFER;
3197 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3198 if (!IS_ERR(mmc->supply.vqmmc)) {
3199 ret = regulator_enable(mmc->supply.vqmmc);
3200 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3202 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3203 SDHCI_SUPPORT_SDR50 |
3204 SDHCI_SUPPORT_DDR50);
3206 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3207 mmc_hostname(mmc), ret);
3208 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3212 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3213 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3214 SDHCI_SUPPORT_DDR50);
3216 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3217 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3218 SDHCI_SUPPORT_DDR50))
3219 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3221 /* SDR104 supports also implies SDR50 support */
3222 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3223 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3224 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3225 * field can be promoted to support HS200.
3227 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3228 mmc->caps2 |= MMC_CAP2_HS200;
3229 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3230 mmc->caps |= MMC_CAP_UHS_SDR50;
3232 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3233 (caps[1] & SDHCI_SUPPORT_HS400))
3234 mmc->caps2 |= MMC_CAP2_HS400;
3236 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3237 (IS_ERR(mmc->supply.vqmmc) ||
3238 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3240 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3242 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3243 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3244 mmc->caps |= MMC_CAP_UHS_DDR50;
3246 /* Does the host need tuning for SDR50? */
3247 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3248 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3250 /* Does the host need tuning for SDR104 / HS200? */
3251 if (mmc->caps2 & MMC_CAP2_HS200)
3252 host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3254 /* Driver Type(s) (A, C, D) supported by the host */
3255 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3256 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3257 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3258 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3259 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3260 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3262 /* Initial value for re-tuning timer count */
3263 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3264 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3267 * In case Re-tuning Timer is not disabled, the actual value of
3268 * re-tuning timer will be 2 ^ (n - 1).
3270 if (host->tuning_count)
3271 host->tuning_count = 1 << (host->tuning_count - 1);
3273 /* Re-tuning mode supported by the Host Controller */
3274 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3275 SDHCI_RETUNING_MODE_SHIFT;
3280 * According to SD Host Controller spec v3.00, if the Host System
3281 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3282 * the value is meaningful only if Voltage Support in the Capabilities
3283 * register is set. The actual current value is 4 times the register
3286 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3287 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3288 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3291 /* convert to SDHCI_MAX_CURRENT format */
3292 curr = curr/1000; /* convert to mA */
3293 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3295 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3297 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3298 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3299 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3303 if (caps[0] & SDHCI_CAN_VDD_330) {
3304 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3306 mmc->max_current_330 = ((max_current_caps &
3307 SDHCI_MAX_CURRENT_330_MASK) >>
3308 SDHCI_MAX_CURRENT_330_SHIFT) *
3309 SDHCI_MAX_CURRENT_MULTIPLIER;
3311 if (caps[0] & SDHCI_CAN_VDD_300) {
3312 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3314 mmc->max_current_300 = ((max_current_caps &
3315 SDHCI_MAX_CURRENT_300_MASK) >>
3316 SDHCI_MAX_CURRENT_300_SHIFT) *
3317 SDHCI_MAX_CURRENT_MULTIPLIER;
3319 if (caps[0] & SDHCI_CAN_VDD_180) {
3320 ocr_avail |= MMC_VDD_165_195;
3322 mmc->max_current_180 = ((max_current_caps &
3323 SDHCI_MAX_CURRENT_180_MASK) >>
3324 SDHCI_MAX_CURRENT_180_SHIFT) *
3325 SDHCI_MAX_CURRENT_MULTIPLIER;
3328 /* If OCR set by host, use it instead. */
3330 ocr_avail = host->ocr_mask;
3332 /* If OCR set by external regulators, give it highest prio. */
3334 ocr_avail = mmc->ocr_avail;
3336 mmc->ocr_avail = ocr_avail;
3337 mmc->ocr_avail_sdio = ocr_avail;
3338 if (host->ocr_avail_sdio)
3339 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3340 mmc->ocr_avail_sd = ocr_avail;
3341 if (host->ocr_avail_sd)
3342 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3343 else /* normal SD controllers don't support 1.8V */
3344 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3345 mmc->ocr_avail_mmc = ocr_avail;
3346 if (host->ocr_avail_mmc)
3347 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3349 if (mmc->ocr_avail == 0) {
3350 pr_err("%s: Hardware doesn't report any "
3351 "support voltages.\n", mmc_hostname(mmc));
3355 spin_lock_init(&host->lock);
3358 * Maximum number of segments. Depends on if the hardware
3359 * can do scatter/gather or not.
3361 if (host->flags & SDHCI_USE_ADMA)
3362 mmc->max_segs = SDHCI_MAX_SEGS;
3363 else if (host->flags & SDHCI_USE_SDMA)
3366 mmc->max_segs = SDHCI_MAX_SEGS;
3369 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3370 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3373 mmc->max_req_size = 524288;
3376 * Maximum segment size. Could be one segment with the maximum number
3377 * of bytes. When doing hardware scatter/gather, each entry cannot
3378 * be larger than 64 KiB though.
3380 if (host->flags & SDHCI_USE_ADMA) {
3381 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3382 mmc->max_seg_size = 65535;
3384 mmc->max_seg_size = 65536;
3386 mmc->max_seg_size = mmc->max_req_size;
3390 * Maximum block size. This varies from controller to controller and
3391 * is specified in the capabilities register.
3393 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3394 mmc->max_blk_size = 2;
3396 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3397 SDHCI_MAX_BLOCK_SHIFT;
3398 if (mmc->max_blk_size >= 3) {
3399 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3401 mmc->max_blk_size = 0;
3405 mmc->max_blk_size = 512 << mmc->max_blk_size;
3408 * Maximum block count.
3410 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3415 tasklet_init(&host->finish_tasklet,
3416 sdhci_tasklet_finish, (unsigned long)host);
3418 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3420 init_waitqueue_head(&host->buf_ready_int);
3422 sdhci_init(host, 0);
3424 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3425 IRQF_SHARED, mmc_hostname(mmc), host);
3427 pr_err("%s: Failed to request IRQ %d: %d\n",
3428 mmc_hostname(mmc), host->irq, ret);
3432 #ifdef CONFIG_MMC_DEBUG
3433 sdhci_dumpregs(host);
3436 #ifdef SDHCI_USE_LEDS_CLASS
3437 snprintf(host->led_name, sizeof(host->led_name),
3438 "%s::", mmc_hostname(mmc));
3439 host->led.name = host->led_name;
3440 host->led.brightness = LED_OFF;
3441 host->led.default_trigger = mmc_hostname(mmc);
3442 host->led.brightness_set = sdhci_led_control;
3444 ret = led_classdev_register(mmc_dev(mmc), &host->led);
3446 pr_err("%s: Failed to register LED device: %d\n",
3447 mmc_hostname(mmc), ret);
3456 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3457 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3458 (host->flags & SDHCI_USE_ADMA) ?
3459 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3460 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3462 sdhci_enable_card_detection(host);
3466 #ifdef SDHCI_USE_LEDS_CLASS
3468 sdhci_do_reset(host, SDHCI_RESET_ALL);
3469 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3470 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3471 free_irq(host->irq, host);
3474 tasklet_kill(&host->finish_tasklet);
3479 EXPORT_SYMBOL_GPL(sdhci_add_host);
3481 void sdhci_remove_host(struct sdhci_host *host, int dead)
3483 struct mmc_host *mmc = host->mmc;
3484 unsigned long flags;
3487 spin_lock_irqsave(&host->lock, flags);
3489 host->flags |= SDHCI_DEVICE_DEAD;
3492 pr_err("%s: Controller removed during "
3493 " transfer!\n", mmc_hostname(mmc));
3495 host->mrq->cmd->error = -ENOMEDIUM;
3496 tasklet_schedule(&host->finish_tasklet);
3499 spin_unlock_irqrestore(&host->lock, flags);
3502 sdhci_disable_card_detection(host);
3504 mmc_remove_host(mmc);
3506 #ifdef SDHCI_USE_LEDS_CLASS
3507 led_classdev_unregister(&host->led);
3511 sdhci_do_reset(host, SDHCI_RESET_ALL);
3513 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3514 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3515 free_irq(host->irq, host);
3517 del_timer_sync(&host->timer);
3519 tasklet_kill(&host->finish_tasklet);
3521 if (!IS_ERR(mmc->supply.vqmmc))
3522 regulator_disable(mmc->supply.vqmmc);
3524 if (host->adma_table)
3525 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3526 host->adma_table, host->adma_addr);
3527 kfree(host->align_buffer);
3529 host->adma_table = NULL;
3530 host->align_buffer = NULL;
3533 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3535 void sdhci_free_host(struct sdhci_host *host)
3537 mmc_free_host(host->mmc);
3540 EXPORT_SYMBOL_GPL(sdhci_free_host);
3542 /*****************************************************************************\
3544 * Driver init/exit *
3546 \*****************************************************************************/
3548 static int __init sdhci_drv_init(void)
3551 ": Secure Digital Host Controller Interface driver\n");
3552 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3557 static void __exit sdhci_drv_exit(void)
3561 module_init(sdhci_drv_init);
3562 module_exit(sdhci_drv_exit);
3564 module_param(debug_quirks, uint, 0444);
3565 module_param(debug_quirks2, uint, 0444);
3567 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3568 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3569 MODULE_LICENSE("GPL");
3571 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3572 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");