1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * Thanks to the following companies for their support:
9 * - JMicron (hardware and technical support)
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/swiotlb.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
28 #include <linux/leds.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/slot-gpio.h>
38 #define DRIVER_NAME "sdhci"
40 #define DBG(f, x...) \
41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43 #define SDHCI_DUMP(f, x...) \
44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
53 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
55 void sdhci_dumpregs(struct sdhci_host *host)
57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
60 sdhci_readl(host, SDHCI_DMA_ADDRESS),
61 sdhci_readw(host, SDHCI_HOST_VERSION));
62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
63 sdhci_readw(host, SDHCI_BLOCK_SIZE),
64 sdhci_readw(host, SDHCI_BLOCK_COUNT));
65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
66 sdhci_readl(host, SDHCI_ARGUMENT),
67 sdhci_readw(host, SDHCI_TRANSFER_MODE));
68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
69 sdhci_readl(host, SDHCI_PRESENT_STATE),
70 sdhci_readb(host, SDHCI_HOST_CONTROL));
71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
72 sdhci_readb(host, SDHCI_POWER_CONTROL),
73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
76 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
79 sdhci_readl(host, SDHCI_INT_STATUS));
80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
81 sdhci_readl(host, SDHCI_INT_ENABLE),
82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
83 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
84 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
87 sdhci_readl(host, SDHCI_CAPABILITIES),
88 sdhci_readl(host, SDHCI_CAPABILITIES_1));
89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
90 sdhci_readw(host, SDHCI_COMMAND),
91 sdhci_readl(host, SDHCI_MAX_CURRENT));
92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
93 sdhci_readl(host, SDHCI_RESPONSE),
94 sdhci_readl(host, SDHCI_RESPONSE + 4));
95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
96 sdhci_readl(host, SDHCI_RESPONSE + 8),
97 sdhci_readl(host, SDHCI_RESPONSE + 12));
98 SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 sdhci_readw(host, SDHCI_HOST_CONTROL2));
101 if (host->flags & SDHCI_USE_ADMA) {
102 if (host->flags & SDHCI_USE_64_BIT_DMA) {
103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 sdhci_readl(host, SDHCI_ADMA_ERROR),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
106 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
109 sdhci_readl(host, SDHCI_ADMA_ERROR),
110 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
114 if (host->ops->dump_vendor_regs)
115 host->ops->dump_vendor_regs(host);
117 SDHCI_DUMP("============================================\n");
119 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
121 /*****************************************************************************\
123 * Low level functions *
125 \*****************************************************************************/
127 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
131 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
132 if (ctrl2 & SDHCI_CTRL_V4_MODE)
135 ctrl2 |= SDHCI_CTRL_V4_MODE;
136 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
140 * This can be called before sdhci_add_host() by Vendor's host controller
141 * driver to enable v4 mode if supported.
143 void sdhci_enable_v4_mode(struct sdhci_host *host)
145 host->v4_mode = true;
146 sdhci_do_enable_v4_mode(host);
148 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
150 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
152 return cmd->data || cmd->flags & MMC_RSP_BUSY;
155 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
159 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
160 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
164 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
167 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
168 SDHCI_INT_CARD_INSERT;
170 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
173 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
174 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
177 static void sdhci_enable_card_detection(struct sdhci_host *host)
179 sdhci_set_card_detection(host, true);
182 static void sdhci_disable_card_detection(struct sdhci_host *host)
184 sdhci_set_card_detection(host, false);
187 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
192 pm_runtime_get_noresume(host->mmc->parent);
195 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
199 host->bus_on = false;
200 pm_runtime_put_noidle(host->mmc->parent);
203 void sdhci_reset(struct sdhci_host *host, u8 mask)
207 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
209 if (mask & SDHCI_RESET_ALL) {
211 /* Reset-all turns off SD Bus Power */
212 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
213 sdhci_runtime_pm_bus_off(host);
216 /* Wait max 100 ms */
217 timeout = ktime_add_ms(ktime_get(), 100);
219 /* hw clears the bit when it's done */
221 bool timedout = ktime_after(ktime_get(), timeout);
223 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
226 pr_err("%s: Reset 0x%x never completed.\n",
227 mmc_hostname(host->mmc), (int)mask);
228 sdhci_dumpregs(host);
234 EXPORT_SYMBOL_GPL(sdhci_reset);
236 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
239 struct mmc_host *mmc = host->mmc;
241 if (!mmc->ops->get_cd(mmc))
245 host->ops->reset(host, mask);
247 if (mask & SDHCI_RESET_ALL) {
248 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
249 if (host->ops->enable_dma)
250 host->ops->enable_dma(host);
253 /* Resetting the controller clears many */
254 host->preset_enabled = false;
258 static void sdhci_set_default_irqs(struct sdhci_host *host)
260 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
261 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
262 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
263 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
266 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
267 host->tuning_mode == SDHCI_TUNING_MODE_3)
268 host->ier |= SDHCI_INT_RETUNE;
270 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
271 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
274 static void sdhci_config_dma(struct sdhci_host *host)
279 if (host->version < SDHCI_SPEC_200)
282 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
285 * Always adjust the DMA selection as some controllers
286 * (e.g. JMicron) can't do PIO properly when the selection
289 ctrl &= ~SDHCI_CTRL_DMA_MASK;
290 if (!(host->flags & SDHCI_REQ_USE_DMA))
293 /* Note if DMA Select is zero then SDMA is selected */
294 if (host->flags & SDHCI_USE_ADMA)
295 ctrl |= SDHCI_CTRL_ADMA32;
297 if (host->flags & SDHCI_USE_64_BIT_DMA) {
299 * If v4 mode, all supported DMA can be 64-bit addressing if
300 * controller supports 64-bit system address, otherwise only
301 * ADMA can support 64-bit addressing.
304 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
305 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
306 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
307 } else if (host->flags & SDHCI_USE_ADMA) {
309 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
310 * set SDHCI_CTRL_ADMA64.
312 ctrl |= SDHCI_CTRL_ADMA64;
317 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
320 static void sdhci_init(struct sdhci_host *host, int soft)
322 struct mmc_host *mmc = host->mmc;
326 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
328 sdhci_do_reset(host, SDHCI_RESET_ALL);
331 sdhci_do_enable_v4_mode(host);
333 spin_lock_irqsave(&host->lock, flags);
334 sdhci_set_default_irqs(host);
335 spin_unlock_irqrestore(&host->lock, flags);
337 host->cqe_on = false;
340 /* force clock reconfiguration */
342 mmc->ops->set_ios(mmc, &mmc->ios);
346 static void sdhci_reinit(struct sdhci_host *host)
348 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
351 sdhci_enable_card_detection(host);
354 * A change to the card detect bits indicates a change in present state,
355 * refer sdhci_set_card_detection(). A card detect interrupt might have
356 * been missed while the host controller was being reset, so trigger a
359 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
360 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
363 static void __sdhci_led_activate(struct sdhci_host *host)
367 if (host->quirks & SDHCI_QUIRK_NO_LED)
370 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
371 ctrl |= SDHCI_CTRL_LED;
372 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
375 static void __sdhci_led_deactivate(struct sdhci_host *host)
379 if (host->quirks & SDHCI_QUIRK_NO_LED)
382 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
383 ctrl &= ~SDHCI_CTRL_LED;
384 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
387 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
388 static void sdhci_led_control(struct led_classdev *led,
389 enum led_brightness brightness)
391 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
394 spin_lock_irqsave(&host->lock, flags);
396 if (host->runtime_suspended)
399 if (brightness == LED_OFF)
400 __sdhci_led_deactivate(host);
402 __sdhci_led_activate(host);
404 spin_unlock_irqrestore(&host->lock, flags);
407 static int sdhci_led_register(struct sdhci_host *host)
409 struct mmc_host *mmc = host->mmc;
411 if (host->quirks & SDHCI_QUIRK_NO_LED)
414 snprintf(host->led_name, sizeof(host->led_name),
415 "%s::", mmc_hostname(mmc));
417 host->led.name = host->led_name;
418 host->led.brightness = LED_OFF;
419 host->led.default_trigger = mmc_hostname(mmc);
420 host->led.brightness_set = sdhci_led_control;
422 return led_classdev_register(mmc_dev(mmc), &host->led);
425 static void sdhci_led_unregister(struct sdhci_host *host)
427 if (host->quirks & SDHCI_QUIRK_NO_LED)
430 led_classdev_unregister(&host->led);
433 static inline void sdhci_led_activate(struct sdhci_host *host)
437 static inline void sdhci_led_deactivate(struct sdhci_host *host)
443 static inline int sdhci_led_register(struct sdhci_host *host)
448 static inline void sdhci_led_unregister(struct sdhci_host *host)
452 static inline void sdhci_led_activate(struct sdhci_host *host)
454 __sdhci_led_activate(host);
457 static inline void sdhci_led_deactivate(struct sdhci_host *host)
459 __sdhci_led_deactivate(host);
464 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
465 unsigned long timeout)
467 if (sdhci_data_line_cmd(mrq->cmd))
468 mod_timer(&host->data_timer, timeout);
470 mod_timer(&host->timer, timeout);
473 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
475 if (sdhci_data_line_cmd(mrq->cmd))
476 del_timer(&host->data_timer);
478 del_timer(&host->timer);
481 static inline bool sdhci_has_requests(struct sdhci_host *host)
483 return host->cmd || host->data_cmd;
486 /*****************************************************************************\
490 \*****************************************************************************/
492 static void sdhci_read_block_pio(struct sdhci_host *host)
495 size_t blksize, len, chunk;
499 DBG("PIO reading\n");
501 blksize = host->data->blksz;
504 local_irq_save(flags);
507 BUG_ON(!sg_miter_next(&host->sg_miter));
509 len = min(host->sg_miter.length, blksize);
512 host->sg_miter.consumed = len;
514 buf = host->sg_miter.addr;
518 scratch = sdhci_readl(host, SDHCI_BUFFER);
522 *buf = scratch & 0xFF;
531 sg_miter_stop(&host->sg_miter);
533 local_irq_restore(flags);
536 static void sdhci_write_block_pio(struct sdhci_host *host)
539 size_t blksize, len, chunk;
543 DBG("PIO writing\n");
545 blksize = host->data->blksz;
549 local_irq_save(flags);
552 BUG_ON(!sg_miter_next(&host->sg_miter));
554 len = min(host->sg_miter.length, blksize);
557 host->sg_miter.consumed = len;
559 buf = host->sg_miter.addr;
562 scratch |= (u32)*buf << (chunk * 8);
568 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
569 sdhci_writel(host, scratch, SDHCI_BUFFER);
576 sg_miter_stop(&host->sg_miter);
578 local_irq_restore(flags);
581 static void sdhci_transfer_pio(struct sdhci_host *host)
585 if (host->blocks == 0)
588 if (host->data->flags & MMC_DATA_READ)
589 mask = SDHCI_DATA_AVAILABLE;
591 mask = SDHCI_SPACE_AVAILABLE;
594 * Some controllers (JMicron JMB38x) mess up the buffer bits
595 * for transfers < 4 bytes. As long as it is just one block,
596 * we can ignore the bits.
598 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
599 (host->data->blocks == 1))
602 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
603 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
606 if (host->data->flags & MMC_DATA_READ)
607 sdhci_read_block_pio(host);
609 sdhci_write_block_pio(host);
612 if (host->blocks == 0)
616 DBG("PIO transfer complete.\n");
619 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
620 struct mmc_data *data, int cookie)
625 * If the data buffers are already mapped, return the previous
626 * dma_map_sg() result.
628 if (data->host_cookie == COOKIE_PRE_MAPPED)
629 return data->sg_count;
631 /* Bounce write requests to the bounce buffer */
632 if (host->bounce_buffer) {
633 unsigned int length = data->blksz * data->blocks;
635 if (length > host->bounce_buffer_size) {
636 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
637 mmc_hostname(host->mmc), length,
638 host->bounce_buffer_size);
641 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
642 /* Copy the data to the bounce buffer */
643 if (host->ops->copy_to_bounce_buffer) {
644 host->ops->copy_to_bounce_buffer(host,
647 sg_copy_to_buffer(data->sg, data->sg_len,
648 host->bounce_buffer, length);
651 /* Switch ownership to the DMA */
652 dma_sync_single_for_device(host->mmc->parent,
654 host->bounce_buffer_size,
655 mmc_get_dma_dir(data));
656 /* Just a dummy value */
659 /* Just access the data directly from memory */
660 sg_count = dma_map_sg(mmc_dev(host->mmc),
661 data->sg, data->sg_len,
662 mmc_get_dma_dir(data));
668 data->sg_count = sg_count;
669 data->host_cookie = cookie;
674 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
676 local_irq_save(*flags);
677 return kmap_atomic(sg_page(sg)) + sg->offset;
680 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
682 kunmap_atomic(buffer);
683 local_irq_restore(*flags);
686 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
687 dma_addr_t addr, int len, unsigned int cmd)
689 struct sdhci_adma2_64_desc *dma_desc = *desc;
691 /* 32-bit and 64-bit descriptors have these members in same position */
692 dma_desc->cmd = cpu_to_le16(cmd);
693 dma_desc->len = cpu_to_le16(len);
694 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
696 if (host->flags & SDHCI_USE_64_BIT_DMA)
697 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
699 *desc += host->desc_sz;
701 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
703 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
704 void **desc, dma_addr_t addr,
705 int len, unsigned int cmd)
707 if (host->ops->adma_write_desc)
708 host->ops->adma_write_desc(host, desc, addr, len, cmd);
710 sdhci_adma_write_desc(host, desc, addr, len, cmd);
713 static void sdhci_adma_mark_end(void *desc)
715 struct sdhci_adma2_64_desc *dma_desc = desc;
717 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
718 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
721 static void sdhci_adma_table_pre(struct sdhci_host *host,
722 struct mmc_data *data, int sg_count)
724 struct scatterlist *sg;
726 dma_addr_t addr, align_addr;
732 * The spec does not specify endianness of descriptor table.
733 * We currently guess that it is LE.
736 host->sg_count = sg_count;
738 desc = host->adma_table;
739 align = host->align_buffer;
741 align_addr = host->align_addr;
743 for_each_sg(data->sg, sg, host->sg_count, i) {
744 addr = sg_dma_address(sg);
745 len = sg_dma_len(sg);
748 * The SDHCI specification states that ADMA addresses must
749 * be 32-bit aligned. If they aren't, then we use a bounce
750 * buffer for the (up to three) bytes that screw up the
753 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
756 if (data->flags & MMC_DATA_WRITE) {
757 buffer = sdhci_kmap_atomic(sg, &flags);
758 memcpy(align, buffer, offset);
759 sdhci_kunmap_atomic(buffer, &flags);
763 __sdhci_adma_write_desc(host, &desc, align_addr,
764 offset, ADMA2_TRAN_VALID);
766 BUG_ON(offset > 65536);
768 align += SDHCI_ADMA2_ALIGN;
769 align_addr += SDHCI_ADMA2_ALIGN;
776 * The block layer forces a minimum segment size of PAGE_SIZE,
777 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
778 * multiple descriptors, noting that the ADMA table is sized
779 * for 4KiB chunks anyway, so it will be big enough.
781 while (len > host->max_adma) {
782 int n = 32 * 1024; /* 32KiB*/
784 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
791 __sdhci_adma_write_desc(host, &desc, addr, len,
795 * If this triggers then we have a calculation bug
798 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
801 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
802 /* Mark the last descriptor as the terminating descriptor */
803 if (desc != host->adma_table) {
804 desc -= host->desc_sz;
805 sdhci_adma_mark_end(desc);
808 /* Add a terminating entry - nop, end, valid */
809 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
813 static void sdhci_adma_table_post(struct sdhci_host *host,
814 struct mmc_data *data)
816 struct scatterlist *sg;
822 if (data->flags & MMC_DATA_READ) {
823 bool has_unaligned = false;
825 /* Do a quick scan of the SG list for any unaligned mappings */
826 for_each_sg(data->sg, sg, host->sg_count, i)
827 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
828 has_unaligned = true;
833 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
834 data->sg_len, DMA_FROM_DEVICE);
836 align = host->align_buffer;
838 for_each_sg(data->sg, sg, host->sg_count, i) {
839 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
840 size = SDHCI_ADMA2_ALIGN -
841 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
843 buffer = sdhci_kmap_atomic(sg, &flags);
844 memcpy(buffer, align, size);
845 sdhci_kunmap_atomic(buffer, &flags);
847 align += SDHCI_ADMA2_ALIGN;
854 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
856 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
857 if (host->flags & SDHCI_USE_64_BIT_DMA)
858 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
861 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
863 if (host->bounce_buffer)
864 return host->bounce_addr;
866 return sg_dma_address(host->data->sg);
869 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
872 sdhci_set_adma_addr(host, addr);
874 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
877 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
878 struct mmc_command *cmd,
879 struct mmc_data *data)
881 unsigned int target_timeout;
885 target_timeout = cmd->busy_timeout * 1000;
887 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
888 if (host->clock && data->timeout_clks) {
889 unsigned long long val;
892 * data->timeout_clks is in units of clock cycles.
893 * host->clock is in Hz. target_timeout is in us.
894 * Hence, us = 1000000 * cycles / Hz. Round up.
896 val = 1000000ULL * data->timeout_clks;
897 if (do_div(val, host->clock))
899 target_timeout += val;
903 return target_timeout;
906 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
907 struct mmc_command *cmd)
909 struct mmc_data *data = cmd->data;
910 struct mmc_host *mmc = host->mmc;
911 struct mmc_ios *ios = &mmc->ios;
912 unsigned char bus_width = 1 << ios->bus_width;
918 target_timeout = sdhci_target_timeout(host, cmd, data);
919 target_timeout *= NSEC_PER_USEC;
923 freq = host->mmc->actual_clock ? : host->clock;
924 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
925 do_div(transfer_time, freq);
926 /* multiply by '2' to account for any unknowns */
927 transfer_time = transfer_time * 2;
928 /* calculate timeout for the entire data */
929 host->data_timeout = data->blocks * target_timeout +
932 host->data_timeout = target_timeout;
935 if (host->data_timeout)
936 host->data_timeout += MMC_CMD_TRANSFER_TIME;
939 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
943 struct mmc_data *data;
944 unsigned target_timeout, current_timeout;
949 * If the host controller provides us with an incorrect timeout
950 * value, just skip the check and use 0xE. The hardware may take
951 * longer to time out, but that's much better than having a too-short
954 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
957 /* Unspecified command, asume max */
962 /* Unspecified timeout, assume max */
963 if (!data && !cmd->busy_timeout)
967 target_timeout = sdhci_target_timeout(host, cmd, data);
970 * Figure out needed cycles.
971 * We do this in steps in order to fit inside a 32 bit int.
972 * The first step is the minimum timeout, which will have a
973 * minimum resolution of 6 bits:
974 * (1) 2^13*1000 > 2^22,
975 * (2) host->timeout_clk < 2^16
980 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
981 while (current_timeout < target_timeout) {
983 current_timeout <<= 1;
989 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
990 DBG("Too large timeout 0x%x requested for CMD%d!\n",
1000 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1002 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1003 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1005 if (host->flags & SDHCI_REQ_USE_DMA)
1006 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1008 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1010 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1011 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1013 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1015 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1016 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1019 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1022 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1024 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1025 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1026 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1028 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1030 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1032 bool too_big = false;
1033 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1036 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1037 sdhci_calc_sw_timeout(host, cmd);
1038 sdhci_set_data_timeout_irq(host, false);
1039 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1040 sdhci_set_data_timeout_irq(host, true);
1043 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1045 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1047 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1049 if (host->ops->set_timeout)
1050 host->ops->set_timeout(host, cmd);
1052 __sdhci_set_timeout(host, cmd);
1055 static void sdhci_initialize_data(struct sdhci_host *host,
1056 struct mmc_data *data)
1058 WARN_ON(host->data);
1061 BUG_ON(data->blksz * data->blocks > 524288);
1062 BUG_ON(data->blksz > host->mmc->max_blk_size);
1063 BUG_ON(data->blocks > 65535);
1066 host->data_early = 0;
1067 host->data->bytes_xfered = 0;
1070 static inline void sdhci_set_block_info(struct sdhci_host *host,
1071 struct mmc_data *data)
1073 /* Set the DMA boundary value and block size */
1075 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1078 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1079 * can be supported, in that case 16-bit block count register must be 0.
1081 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1082 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1083 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1084 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1085 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1087 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1091 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1093 struct mmc_data *data = cmd->data;
1095 sdhci_initialize_data(host, data);
1097 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1098 struct scatterlist *sg;
1099 unsigned int length_mask, offset_mask;
1102 host->flags |= SDHCI_REQ_USE_DMA;
1105 * FIXME: This doesn't account for merging when mapping the
1108 * The assumption here being that alignment and lengths are
1109 * the same after DMA mapping to device address space.
1113 if (host->flags & SDHCI_USE_ADMA) {
1114 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1117 * As we use up to 3 byte chunks to work
1118 * around alignment problems, we need to
1119 * check the offset as well.
1124 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1126 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1130 if (unlikely(length_mask | offset_mask)) {
1131 for_each_sg(data->sg, sg, data->sg_len, i) {
1132 if (sg->length & length_mask) {
1133 DBG("Reverting to PIO because of transfer size (%d)\n",
1135 host->flags &= ~SDHCI_REQ_USE_DMA;
1138 if (sg->offset & offset_mask) {
1139 DBG("Reverting to PIO because of bad alignment\n");
1140 host->flags &= ~SDHCI_REQ_USE_DMA;
1147 if (host->flags & SDHCI_REQ_USE_DMA) {
1148 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1152 * This only happens when someone fed
1153 * us an invalid request.
1156 host->flags &= ~SDHCI_REQ_USE_DMA;
1157 } else if (host->flags & SDHCI_USE_ADMA) {
1158 sdhci_adma_table_pre(host, data, sg_cnt);
1159 sdhci_set_adma_addr(host, host->adma_addr);
1161 WARN_ON(sg_cnt != 1);
1162 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1166 sdhci_config_dma(host);
1168 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1171 flags = SG_MITER_ATOMIC;
1172 if (host->data->flags & MMC_DATA_READ)
1173 flags |= SG_MITER_TO_SG;
1175 flags |= SG_MITER_FROM_SG;
1176 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1177 host->blocks = data->blocks;
1180 sdhci_set_transfer_irqs(host);
1182 sdhci_set_block_info(host, data);
1185 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1187 static int sdhci_external_dma_init(struct sdhci_host *host)
1190 struct mmc_host *mmc = host->mmc;
1192 host->tx_chan = dma_request_chan(mmc->parent, "tx");
1193 if (IS_ERR(host->tx_chan)) {
1194 ret = PTR_ERR(host->tx_chan);
1195 if (ret != -EPROBE_DEFER)
1196 pr_warn("Failed to request TX DMA channel.\n");
1197 host->tx_chan = NULL;
1201 host->rx_chan = dma_request_chan(mmc->parent, "rx");
1202 if (IS_ERR(host->rx_chan)) {
1203 if (host->tx_chan) {
1204 dma_release_channel(host->tx_chan);
1205 host->tx_chan = NULL;
1208 ret = PTR_ERR(host->rx_chan);
1209 if (ret != -EPROBE_DEFER)
1210 pr_warn("Failed to request RX DMA channel.\n");
1211 host->rx_chan = NULL;
1217 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1218 struct mmc_data *data)
1220 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1223 static int sdhci_external_dma_setup(struct sdhci_host *host,
1224 struct mmc_command *cmd)
1227 enum dma_transfer_direction dir;
1228 struct dma_async_tx_descriptor *desc;
1229 struct mmc_data *data = cmd->data;
1230 struct dma_chan *chan;
1231 struct dma_slave_config cfg;
1232 dma_cookie_t cookie;
1238 memset(&cfg, 0, sizeof(cfg));
1239 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1240 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1241 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1242 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1243 cfg.src_maxburst = data->blksz / 4;
1244 cfg.dst_maxburst = data->blksz / 4;
1246 /* Sanity check: all the SG entries must be aligned by block size. */
1247 for (i = 0; i < data->sg_len; i++) {
1248 if ((data->sg + i)->length % data->blksz)
1252 chan = sdhci_external_dma_channel(host, data);
1254 ret = dmaengine_slave_config(chan, &cfg);
1258 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1262 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1263 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1264 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1268 desc->callback = NULL;
1269 desc->callback_param = NULL;
1271 cookie = dmaengine_submit(desc);
1272 if (dma_submit_error(cookie))
1278 static void sdhci_external_dma_release(struct sdhci_host *host)
1280 if (host->tx_chan) {
1281 dma_release_channel(host->tx_chan);
1282 host->tx_chan = NULL;
1285 if (host->rx_chan) {
1286 dma_release_channel(host->rx_chan);
1287 host->rx_chan = NULL;
1290 sdhci_switch_external_dma(host, false);
1293 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1294 struct mmc_command *cmd)
1296 struct mmc_data *data = cmd->data;
1298 sdhci_initialize_data(host, data);
1300 host->flags |= SDHCI_REQ_USE_DMA;
1301 sdhci_set_transfer_irqs(host);
1303 sdhci_set_block_info(host, data);
1306 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1307 struct mmc_command *cmd)
1309 if (!sdhci_external_dma_setup(host, cmd)) {
1310 __sdhci_external_dma_prepare_data(host, cmd);
1312 sdhci_external_dma_release(host);
1313 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1314 mmc_hostname(host->mmc));
1315 sdhci_prepare_data(host, cmd);
1319 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1320 struct mmc_command *cmd)
1322 struct dma_chan *chan;
1327 chan = sdhci_external_dma_channel(host, cmd->data);
1329 dma_async_issue_pending(chan);
1334 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1339 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1343 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1344 struct mmc_command *cmd)
1346 /* This should never happen */
1350 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1351 struct mmc_command *cmd)
1355 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1356 struct mmc_data *data)
1363 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1365 host->use_external_dma = en;
1367 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1369 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1370 struct mmc_request *mrq)
1372 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1373 !mrq->cap_cmd_during_tfr;
1376 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1377 struct mmc_request *mrq)
1379 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1382 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1383 struct mmc_request *mrq)
1385 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1388 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1389 struct mmc_command *cmd,
1392 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1393 (cmd->opcode != SD_IO_RW_EXTENDED);
1394 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1398 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1399 * Select' is recommended rather than use of 'Auto CMD12
1400 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1401 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1403 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1404 (use_cmd12 || use_cmd23)) {
1405 *mode |= SDHCI_TRNS_AUTO_SEL;
1407 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1409 ctrl2 |= SDHCI_CMD23_ENABLE;
1411 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1412 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1418 * If we are sending CMD23, CMD12 never gets sent
1419 * on successful completion (so no Auto-CMD12).
1422 *mode |= SDHCI_TRNS_AUTO_CMD12;
1424 *mode |= SDHCI_TRNS_AUTO_CMD23;
1427 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1428 struct mmc_command *cmd)
1431 struct mmc_data *data = cmd->data;
1435 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1436 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1437 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1438 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1440 /* clear Auto CMD settings for no data CMDs */
1441 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1442 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1443 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1448 WARN_ON(!host->data);
1450 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1451 mode = SDHCI_TRNS_BLK_CNT_EN;
1453 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1454 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1455 sdhci_auto_cmd_select(host, cmd, &mode);
1456 if (sdhci_auto_cmd23(host, cmd->mrq))
1457 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1460 if (data->flags & MMC_DATA_READ)
1461 mode |= SDHCI_TRNS_READ;
1462 if (host->flags & SDHCI_REQ_USE_DMA)
1463 mode |= SDHCI_TRNS_DMA;
1465 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1468 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1470 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1471 ((mrq->cmd && mrq->cmd->error) ||
1472 (mrq->sbc && mrq->sbc->error) ||
1473 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1474 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1477 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1481 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1482 if (host->mrqs_done[i] == mrq) {
1488 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1489 if (!host->mrqs_done[i]) {
1490 host->mrqs_done[i] = mrq;
1495 WARN_ON(i >= SDHCI_MAX_MRQS);
1498 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1500 if (host->cmd && host->cmd->mrq == mrq)
1503 if (host->data_cmd && host->data_cmd->mrq == mrq)
1504 host->data_cmd = NULL;
1506 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1507 host->deferred_cmd = NULL;
1509 if (host->data && host->data->mrq == mrq)
1512 if (sdhci_needs_reset(host, mrq))
1513 host->pending_reset = true;
1515 sdhci_set_mrq_done(host, mrq);
1517 sdhci_del_timer(host, mrq);
1519 if (!sdhci_has_requests(host))
1520 sdhci_led_deactivate(host);
1523 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1525 __sdhci_finish_mrq(host, mrq);
1527 queue_work(host->complete_wq, &host->complete_work);
1530 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1532 struct mmc_command *data_cmd = host->data_cmd;
1533 struct mmc_data *data = host->data;
1536 host->data_cmd = NULL;
1539 * The controller needs a reset of internal state machines upon error
1543 if (!host->cmd || host->cmd == data_cmd)
1544 sdhci_do_reset(host, SDHCI_RESET_CMD);
1545 sdhci_do_reset(host, SDHCI_RESET_DATA);
1548 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1549 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1550 sdhci_adma_table_post(host, data);
1553 * The specification states that the block count register must
1554 * be updated, but it does not specify at what point in the
1555 * data flow. That makes the register entirely useless to read
1556 * back so we have to assume that nothing made it to the card
1557 * in the event of an error.
1560 data->bytes_xfered = 0;
1562 data->bytes_xfered = data->blksz * data->blocks;
1565 * Need to send CMD12 if -
1566 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1567 * b) error in multiblock transfer
1570 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1573 * 'cap_cmd_during_tfr' request must not use the command line
1574 * after mmc_command_done() has been called. It is upper layer's
1575 * responsibility to send the stop command if required.
1577 if (data->mrq->cap_cmd_during_tfr) {
1578 __sdhci_finish_mrq(host, data->mrq);
1580 /* Avoid triggering warning in sdhci_send_command() */
1582 if (!sdhci_send_command(host, data->stop)) {
1583 if (sw_data_timeout) {
1585 * This is anyway a sw data timeout, so
1588 data->stop->error = -EIO;
1589 __sdhci_finish_mrq(host, data->mrq);
1591 WARN_ON(host->deferred_cmd);
1592 host->deferred_cmd = data->stop;
1597 __sdhci_finish_mrq(host, data->mrq);
1601 static void sdhci_finish_data(struct sdhci_host *host)
1603 __sdhci_finish_data(host, false);
1606 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1610 unsigned long timeout;
1614 /* Initially, a command has no error */
1617 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1618 cmd->opcode == MMC_STOP_TRANSMISSION)
1619 cmd->flags |= MMC_RSP_BUSY;
1621 mask = SDHCI_CMD_INHIBIT;
1622 if (sdhci_data_line_cmd(cmd))
1623 mask |= SDHCI_DATA_INHIBIT;
1625 /* We shouldn't wait for data inihibit for stop commands, even
1626 though they might use busy signaling */
1627 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1628 mask &= ~SDHCI_DATA_INHIBIT;
1630 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1634 host->data_timeout = 0;
1635 if (sdhci_data_line_cmd(cmd)) {
1636 WARN_ON(host->data_cmd);
1637 host->data_cmd = cmd;
1638 sdhci_set_timeout(host, cmd);
1642 if (host->use_external_dma)
1643 sdhci_external_dma_prepare_data(host, cmd);
1645 sdhci_prepare_data(host, cmd);
1648 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1650 sdhci_set_transfer_mode(host, cmd);
1652 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1653 WARN_ONCE(1, "Unsupported response type!\n");
1655 * This does not happen in practice because 136-bit response
1656 * commands never have busy waiting, so rather than complicate
1657 * the error path, just remove busy waiting and continue.
1659 cmd->flags &= ~MMC_RSP_BUSY;
1662 if (!(cmd->flags & MMC_RSP_PRESENT))
1663 flags = SDHCI_CMD_RESP_NONE;
1664 else if (cmd->flags & MMC_RSP_136)
1665 flags = SDHCI_CMD_RESP_LONG;
1666 else if (cmd->flags & MMC_RSP_BUSY)
1667 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1669 flags = SDHCI_CMD_RESP_SHORT;
1671 if (cmd->flags & MMC_RSP_CRC)
1672 flags |= SDHCI_CMD_CRC;
1673 if (cmd->flags & MMC_RSP_OPCODE)
1674 flags |= SDHCI_CMD_INDEX;
1676 /* CMD19 is special in that the Data Present Select should be set */
1677 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1678 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1679 flags |= SDHCI_CMD_DATA;
1682 if (host->data_timeout)
1683 timeout += nsecs_to_jiffies(host->data_timeout);
1684 else if (!cmd->data && cmd->busy_timeout > 9000)
1685 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1688 sdhci_mod_timer(host, cmd->mrq, timeout);
1690 if (host->use_external_dma)
1691 sdhci_external_dma_pre_transfer(host, cmd);
1693 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1698 static bool sdhci_present_error(struct sdhci_host *host,
1699 struct mmc_command *cmd, bool present)
1701 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1702 cmd->error = -ENOMEDIUM;
1709 static bool sdhci_send_command_retry(struct sdhci_host *host,
1710 struct mmc_command *cmd,
1711 unsigned long flags)
1712 __releases(host->lock)
1713 __acquires(host->lock)
1715 struct mmc_command *deferred_cmd = host->deferred_cmd;
1716 int timeout = 10; /* Approx. 10 ms */
1719 while (!sdhci_send_command(host, cmd)) {
1721 pr_err("%s: Controller never released inhibit bit(s).\n",
1722 mmc_hostname(host->mmc));
1723 sdhci_dumpregs(host);
1728 spin_unlock_irqrestore(&host->lock, flags);
1730 usleep_range(1000, 1250);
1732 present = host->mmc->ops->get_cd(host->mmc);
1734 spin_lock_irqsave(&host->lock, flags);
1736 /* A deferred command might disappear, handle that */
1737 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1740 if (sdhci_present_error(host, cmd, present))
1744 if (cmd == host->deferred_cmd)
1745 host->deferred_cmd = NULL;
1750 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1754 for (i = 0; i < 4; i++) {
1755 reg = SDHCI_RESPONSE + (3 - i) * 4;
1756 cmd->resp[i] = sdhci_readl(host, reg);
1759 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1762 /* CRC is stripped so we need to do some shifting */
1763 for (i = 0; i < 4; i++) {
1766 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1770 static void sdhci_finish_command(struct sdhci_host *host)
1772 struct mmc_command *cmd = host->cmd;
1776 if (cmd->flags & MMC_RSP_PRESENT) {
1777 if (cmd->flags & MMC_RSP_136) {
1778 sdhci_read_rsp_136(host, cmd);
1780 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1784 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1785 mmc_command_done(host->mmc, cmd->mrq);
1788 * The host can send and interrupt when the busy state has
1789 * ended, allowing us to wait without wasting CPU cycles.
1790 * The busy signal uses DAT0 so this is similar to waiting
1791 * for data to complete.
1793 * Note: The 1.0 specification is a bit ambiguous about this
1794 * feature so there might be some problems with older
1797 if (cmd->flags & MMC_RSP_BUSY) {
1799 DBG("Cannot wait for busy signal when also doing a data transfer");
1800 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1801 cmd == host->data_cmd) {
1802 /* Command complete before busy is ended */
1807 /* Finished CMD23, now send actual command. */
1808 if (cmd == cmd->mrq->sbc) {
1809 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1810 WARN_ON(host->deferred_cmd);
1811 host->deferred_cmd = cmd->mrq->cmd;
1815 /* Processed actual command. */
1816 if (host->data && host->data_early)
1817 sdhci_finish_data(host);
1820 __sdhci_finish_mrq(host, cmd->mrq);
1824 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1828 switch (host->timing) {
1829 case MMC_TIMING_MMC_HS:
1830 case MMC_TIMING_SD_HS:
1831 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1833 case MMC_TIMING_UHS_SDR12:
1834 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1836 case MMC_TIMING_UHS_SDR25:
1837 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1839 case MMC_TIMING_UHS_SDR50:
1840 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1842 case MMC_TIMING_UHS_SDR104:
1843 case MMC_TIMING_MMC_HS200:
1844 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1846 case MMC_TIMING_UHS_DDR50:
1847 case MMC_TIMING_MMC_DDR52:
1848 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1850 case MMC_TIMING_MMC_HS400:
1851 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1854 pr_warn("%s: Invalid UHS-I mode selected\n",
1855 mmc_hostname(host->mmc));
1856 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1862 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1863 unsigned int *actual_clock)
1865 int div = 0; /* Initialized for compiler warning */
1866 int real_div = div, clk_mul = 1;
1868 bool switch_base_clk = false;
1870 if (host->version >= SDHCI_SPEC_300) {
1871 if (host->preset_enabled) {
1874 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1875 pre_val = sdhci_get_preset_value(host);
1876 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1877 if (host->clk_mul &&
1878 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1879 clk = SDHCI_PROG_CLOCK_MODE;
1881 clk_mul = host->clk_mul;
1883 real_div = max_t(int, 1, div << 1);
1889 * Check if the Host Controller supports Programmable Clock
1892 if (host->clk_mul) {
1893 for (div = 1; div <= 1024; div++) {
1894 if ((host->max_clk * host->clk_mul / div)
1898 if ((host->max_clk * host->clk_mul / div) <= clock) {
1900 * Set Programmable Clock Mode in the Clock
1903 clk = SDHCI_PROG_CLOCK_MODE;
1905 clk_mul = host->clk_mul;
1909 * Divisor can be too small to reach clock
1910 * speed requirement. Then use the base clock.
1912 switch_base_clk = true;
1916 if (!host->clk_mul || switch_base_clk) {
1917 /* Version 3.00 divisors must be a multiple of 2. */
1918 if (host->max_clk <= clock)
1921 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1923 if ((host->max_clk / div) <= clock)
1929 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1930 && !div && host->max_clk <= 25000000)
1934 /* Version 2.00 divisors must be a power of 2. */
1935 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1936 if ((host->max_clk / div) <= clock)
1945 *actual_clock = (host->max_clk * clk_mul) / real_div;
1946 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1947 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1948 << SDHCI_DIVIDER_HI_SHIFT;
1952 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1954 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1958 clk |= SDHCI_CLOCK_INT_EN;
1959 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1961 /* Wait max 150 ms */
1962 timeout = ktime_add_ms(ktime_get(), 150);
1964 bool timedout = ktime_after(ktime_get(), timeout);
1966 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1967 if (clk & SDHCI_CLOCK_INT_STABLE)
1970 pr_err("%s: Internal clock never stabilised.\n",
1971 mmc_hostname(host->mmc));
1972 sdhci_dumpregs(host);
1978 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1979 clk |= SDHCI_CLOCK_PLL_EN;
1980 clk &= ~SDHCI_CLOCK_INT_STABLE;
1981 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1983 /* Wait max 150 ms */
1984 timeout = ktime_add_ms(ktime_get(), 150);
1986 bool timedout = ktime_after(ktime_get(), timeout);
1988 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1989 if (clk & SDHCI_CLOCK_INT_STABLE)
1992 pr_err("%s: PLL clock never stabilised.\n",
1993 mmc_hostname(host->mmc));
1994 sdhci_dumpregs(host);
2001 clk |= SDHCI_CLOCK_CARD_EN;
2002 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2004 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2006 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2010 host->mmc->actual_clock = 0;
2012 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2017 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2018 sdhci_enable_clk(host, clk);
2020 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2022 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2025 struct mmc_host *mmc = host->mmc;
2027 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2029 if (mode != MMC_POWER_OFF)
2030 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2032 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2035 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2040 if (mode != MMC_POWER_OFF) {
2042 case MMC_VDD_165_195:
2044 * Without a regulator, SDHCI does not support 2.0v
2045 * so we only get here if the driver deliberately
2046 * added the 2.0v range to ocr_avail. Map it to 1.8v
2047 * for the purpose of turning on the power.
2050 pwr = SDHCI_POWER_180;
2054 pwr = SDHCI_POWER_300;
2059 * 3.4 ~ 3.6V are valid only for those platforms where it's
2060 * known that the voltage range is supported by hardware.
2064 pwr = SDHCI_POWER_330;
2067 WARN(1, "%s: Invalid vdd %#x\n",
2068 mmc_hostname(host->mmc), vdd);
2073 if (host->pwr == pwr)
2079 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2080 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2081 sdhci_runtime_pm_bus_off(host);
2084 * Spec says that we should clear the power reg before setting
2085 * a new value. Some controllers don't seem to like this though.
2087 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2088 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2091 * At least the Marvell CaFe chip gets confused if we set the
2092 * voltage and set turn on power at the same time, so set the
2095 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2096 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2098 pwr |= SDHCI_POWER_ON;
2100 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2102 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2103 sdhci_runtime_pm_bus_on(host);
2106 * Some controllers need an extra 10ms delay of 10ms before
2107 * they can apply clock after applying power
2109 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2113 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2115 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2118 if (IS_ERR(host->mmc->supply.vmmc))
2119 sdhci_set_power_noreg(host, mode, vdd);
2121 sdhci_set_power_reg(host, mode, vdd);
2123 EXPORT_SYMBOL_GPL(sdhci_set_power);
2126 * Some controllers need to configure a valid bus voltage on their power
2127 * register regardless of whether an external regulator is taking care of power
2128 * supply. This helper function takes care of it if set as the controller's
2129 * sdhci_ops.set_power callback.
2131 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2135 if (!IS_ERR(host->mmc->supply.vmmc)) {
2136 struct mmc_host *mmc = host->mmc;
2138 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2140 sdhci_set_power_noreg(host, mode, vdd);
2142 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2144 /*****************************************************************************\
2148 \*****************************************************************************/
2150 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2152 struct sdhci_host *host = mmc_priv(mmc);
2153 struct mmc_command *cmd;
2154 unsigned long flags;
2157 /* Firstly check card presence */
2158 present = mmc->ops->get_cd(mmc);
2160 spin_lock_irqsave(&host->lock, flags);
2162 sdhci_led_activate(host);
2164 if (sdhci_present_error(host, mrq->cmd, present))
2167 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2169 if (!sdhci_send_command_retry(host, cmd, flags))
2172 spin_unlock_irqrestore(&host->lock, flags);
2177 sdhci_finish_mrq(host, mrq);
2178 spin_unlock_irqrestore(&host->lock, flags);
2180 EXPORT_SYMBOL_GPL(sdhci_request);
2182 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2184 struct sdhci_host *host = mmc_priv(mmc);
2185 struct mmc_command *cmd;
2186 unsigned long flags;
2189 spin_lock_irqsave(&host->lock, flags);
2191 if (sdhci_present_error(host, mrq->cmd, true)) {
2192 sdhci_finish_mrq(host, mrq);
2196 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2199 * The HSQ may send a command in interrupt context without polling
2200 * the busy signaling, which means we should return BUSY if controller
2201 * has not released inhibit bits to allow HSQ trying to send request
2202 * again in non-atomic context. So we should not finish this request
2205 if (!sdhci_send_command(host, cmd))
2208 sdhci_led_activate(host);
2211 spin_unlock_irqrestore(&host->lock, flags);
2214 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2216 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2220 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2221 if (width == MMC_BUS_WIDTH_8) {
2222 ctrl &= ~SDHCI_CTRL_4BITBUS;
2223 ctrl |= SDHCI_CTRL_8BITBUS;
2225 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2226 ctrl &= ~SDHCI_CTRL_8BITBUS;
2227 if (width == MMC_BUS_WIDTH_4)
2228 ctrl |= SDHCI_CTRL_4BITBUS;
2230 ctrl &= ~SDHCI_CTRL_4BITBUS;
2232 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2234 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2236 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2240 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2241 /* Select Bus Speed Mode for host */
2242 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2243 if ((timing == MMC_TIMING_MMC_HS200) ||
2244 (timing == MMC_TIMING_UHS_SDR104))
2245 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2246 else if (timing == MMC_TIMING_UHS_SDR12)
2247 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2248 else if (timing == MMC_TIMING_UHS_SDR25)
2249 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2250 else if (timing == MMC_TIMING_UHS_SDR50)
2251 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2252 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2253 (timing == MMC_TIMING_MMC_DDR52))
2254 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2255 else if (timing == MMC_TIMING_MMC_HS400)
2256 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2257 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2259 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2261 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2263 struct sdhci_host *host = mmc_priv(mmc);
2266 if (ios->power_mode == MMC_POWER_UNDEFINED)
2269 if (host->flags & SDHCI_DEVICE_DEAD) {
2270 if (!IS_ERR(mmc->supply.vmmc) &&
2271 ios->power_mode == MMC_POWER_OFF)
2272 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2277 * Reset the chip on each power off.
2278 * Should clear out any weird states.
2280 if (ios->power_mode == MMC_POWER_OFF) {
2281 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2285 if (host->version >= SDHCI_SPEC_300 &&
2286 (ios->power_mode == MMC_POWER_UP) &&
2287 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2288 sdhci_enable_preset_value(host, false);
2290 if (!ios->clock || ios->clock != host->clock) {
2291 host->ops->set_clock(host, ios->clock);
2292 host->clock = ios->clock;
2294 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2296 host->timeout_clk = host->mmc->actual_clock ?
2297 host->mmc->actual_clock / 1000 :
2299 host->mmc->max_busy_timeout =
2300 host->ops->get_max_timeout_count ?
2301 host->ops->get_max_timeout_count(host) :
2303 host->mmc->max_busy_timeout /= host->timeout_clk;
2307 if (host->ops->set_power)
2308 host->ops->set_power(host, ios->power_mode, ios->vdd);
2310 sdhci_set_power(host, ios->power_mode, ios->vdd);
2312 if (host->ops->platform_send_init_74_clocks)
2313 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2315 host->ops->set_bus_width(host, ios->bus_width);
2317 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2319 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2320 if (ios->timing == MMC_TIMING_SD_HS ||
2321 ios->timing == MMC_TIMING_MMC_HS ||
2322 ios->timing == MMC_TIMING_MMC_HS400 ||
2323 ios->timing == MMC_TIMING_MMC_HS200 ||
2324 ios->timing == MMC_TIMING_MMC_DDR52 ||
2325 ios->timing == MMC_TIMING_UHS_SDR50 ||
2326 ios->timing == MMC_TIMING_UHS_SDR104 ||
2327 ios->timing == MMC_TIMING_UHS_DDR50 ||
2328 ios->timing == MMC_TIMING_UHS_SDR25)
2329 ctrl |= SDHCI_CTRL_HISPD;
2331 ctrl &= ~SDHCI_CTRL_HISPD;
2334 if (host->version >= SDHCI_SPEC_300) {
2337 if (!host->preset_enabled) {
2338 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2340 * We only need to set Driver Strength if the
2341 * preset value enable is not set.
2343 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2344 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2345 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2346 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2347 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2348 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2349 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2350 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2351 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2352 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2354 pr_warn("%s: invalid driver type, default to driver type B\n",
2356 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2359 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2362 * According to SDHC Spec v3.00, if the Preset Value
2363 * Enable in the Host Control 2 register is set, we
2364 * need to reset SD Clock Enable before changing High
2365 * Speed Enable to avoid generating clock gliches.
2368 /* Reset SD Clock Enable */
2369 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2370 clk &= ~SDHCI_CLOCK_CARD_EN;
2371 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2373 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2375 /* Re-enable SD Clock */
2376 host->ops->set_clock(host, host->clock);
2379 /* Reset SD Clock Enable */
2380 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2381 clk &= ~SDHCI_CLOCK_CARD_EN;
2382 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2384 host->ops->set_uhs_signaling(host, ios->timing);
2385 host->timing = ios->timing;
2387 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2388 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2389 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2390 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2391 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2392 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2393 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2396 sdhci_enable_preset_value(host, true);
2397 preset = sdhci_get_preset_value(host);
2398 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2402 /* Re-enable SD Clock */
2403 host->ops->set_clock(host, host->clock);
2405 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2408 * Some (ENE) controllers go apeshit on some ios operation,
2409 * signalling timeout and CRC errors even on CMD0. Resetting
2410 * it on each ios seems to solve the problem.
2412 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2413 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2415 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2417 static int sdhci_get_cd(struct mmc_host *mmc)
2419 struct sdhci_host *host = mmc_priv(mmc);
2420 int gpio_cd = mmc_gpio_get_cd(mmc);
2422 if (host->flags & SDHCI_DEVICE_DEAD)
2425 /* If nonremovable, assume that the card is always present. */
2426 if (!mmc_card_is_removable(host->mmc))
2430 * Try slot gpio detect, if defined it take precedence
2431 * over build in controller functionality
2436 /* If polling, assume that the card is always present. */
2437 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2440 /* Host native card detect */
2441 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2444 static int sdhci_check_ro(struct sdhci_host *host)
2446 unsigned long flags;
2449 spin_lock_irqsave(&host->lock, flags);
2451 if (host->flags & SDHCI_DEVICE_DEAD)
2453 else if (host->ops->get_ro)
2454 is_readonly = host->ops->get_ro(host);
2455 else if (mmc_can_gpio_ro(host->mmc))
2456 is_readonly = mmc_gpio_get_ro(host->mmc);
2458 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2459 & SDHCI_WRITE_PROTECT);
2461 spin_unlock_irqrestore(&host->lock, flags);
2463 /* This quirk needs to be replaced by a callback-function later */
2464 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2465 !is_readonly : is_readonly;
2468 #define SAMPLE_COUNT 5
2470 static int sdhci_get_ro(struct mmc_host *mmc)
2472 struct sdhci_host *host = mmc_priv(mmc);
2475 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2476 return sdhci_check_ro(host);
2479 for (i = 0; i < SAMPLE_COUNT; i++) {
2480 if (sdhci_check_ro(host)) {
2481 if (++ro_count > SAMPLE_COUNT / 2)
2489 static void sdhci_hw_reset(struct mmc_host *mmc)
2491 struct sdhci_host *host = mmc_priv(mmc);
2493 if (host->ops && host->ops->hw_reset)
2494 host->ops->hw_reset(host);
2497 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2499 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2501 host->ier |= SDHCI_INT_CARD_INT;
2503 host->ier &= ~SDHCI_INT_CARD_INT;
2505 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2506 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2510 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2512 struct sdhci_host *host = mmc_priv(mmc);
2513 unsigned long flags;
2516 pm_runtime_get_noresume(host->mmc->parent);
2518 spin_lock_irqsave(&host->lock, flags);
2519 sdhci_enable_sdio_irq_nolock(host, enable);
2520 spin_unlock_irqrestore(&host->lock, flags);
2523 pm_runtime_put_noidle(host->mmc->parent);
2525 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2527 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2529 struct sdhci_host *host = mmc_priv(mmc);
2530 unsigned long flags;
2532 spin_lock_irqsave(&host->lock, flags);
2533 sdhci_enable_sdio_irq_nolock(host, true);
2534 spin_unlock_irqrestore(&host->lock, flags);
2537 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2538 struct mmc_ios *ios)
2540 struct sdhci_host *host = mmc_priv(mmc);
2545 * Signal Voltage Switching is only applicable for Host Controllers
2548 if (host->version < SDHCI_SPEC_300)
2551 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2553 switch (ios->signal_voltage) {
2554 case MMC_SIGNAL_VOLTAGE_330:
2555 if (!(host->flags & SDHCI_SIGNALING_330))
2557 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2558 ctrl &= ~SDHCI_CTRL_VDD_180;
2559 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2561 if (!IS_ERR(mmc->supply.vqmmc)) {
2562 ret = mmc_regulator_set_vqmmc(mmc, ios);
2564 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2570 usleep_range(5000, 5500);
2572 /* 3.3V regulator output should be stable within 5 ms */
2573 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2574 if (!(ctrl & SDHCI_CTRL_VDD_180))
2577 pr_warn("%s: 3.3V regulator output did not become stable\n",
2581 case MMC_SIGNAL_VOLTAGE_180:
2582 if (!(host->flags & SDHCI_SIGNALING_180))
2584 if (!IS_ERR(mmc->supply.vqmmc)) {
2585 ret = mmc_regulator_set_vqmmc(mmc, ios);
2587 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2594 * Enable 1.8V Signal Enable in the Host Control2
2597 ctrl |= SDHCI_CTRL_VDD_180;
2598 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2600 /* Some controller need to do more when switching */
2601 if (host->ops->voltage_switch)
2602 host->ops->voltage_switch(host);
2604 /* 1.8V regulator output should be stable within 5 ms */
2605 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2606 if (ctrl & SDHCI_CTRL_VDD_180)
2609 pr_warn("%s: 1.8V regulator output did not become stable\n",
2613 case MMC_SIGNAL_VOLTAGE_120:
2614 if (!(host->flags & SDHCI_SIGNALING_120))
2616 if (!IS_ERR(mmc->supply.vqmmc)) {
2617 ret = mmc_regulator_set_vqmmc(mmc, ios);
2619 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2626 /* No signal voltage switch required */
2630 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2632 static int sdhci_card_busy(struct mmc_host *mmc)
2634 struct sdhci_host *host = mmc_priv(mmc);
2637 /* Check whether DAT[0] is 0 */
2638 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2640 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2643 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2645 struct sdhci_host *host = mmc_priv(mmc);
2646 unsigned long flags;
2648 spin_lock_irqsave(&host->lock, flags);
2649 host->flags |= SDHCI_HS400_TUNING;
2650 spin_unlock_irqrestore(&host->lock, flags);
2655 void sdhci_start_tuning(struct sdhci_host *host)
2659 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2660 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2661 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2662 ctrl |= SDHCI_CTRL_TUNED_CLK;
2663 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2666 * As per the Host Controller spec v3.00, tuning command
2667 * generates Buffer Read Ready interrupt, so enable that.
2669 * Note: The spec clearly says that when tuning sequence
2670 * is being performed, the controller does not generate
2671 * interrupts other than Buffer Read Ready interrupt. But
2672 * to make sure we don't hit a controller bug, we _only_
2673 * enable Buffer Read Ready interrupt here.
2675 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2676 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2678 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2680 void sdhci_end_tuning(struct sdhci_host *host)
2682 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2683 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2685 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2687 void sdhci_reset_tuning(struct sdhci_host *host)
2691 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2692 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2693 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2694 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2696 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2698 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2700 sdhci_reset_tuning(host);
2702 sdhci_do_reset(host, SDHCI_RESET_CMD);
2703 sdhci_do_reset(host, SDHCI_RESET_DATA);
2705 sdhci_end_tuning(host);
2707 mmc_abort_tuning(host->mmc, opcode);
2709 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2712 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2713 * tuning command does not have a data payload (or rather the hardware does it
2714 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2715 * interrupt setup is different to other commands and there is no timeout
2716 * interrupt so special handling is needed.
2718 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2720 struct mmc_host *mmc = host->mmc;
2721 struct mmc_command cmd = {};
2722 struct mmc_request mrq = {};
2723 unsigned long flags;
2724 u32 b = host->sdma_boundary;
2726 spin_lock_irqsave(&host->lock, flags);
2728 cmd.opcode = opcode;
2729 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2734 * In response to CMD19, the card sends 64 bytes of tuning
2735 * block to the Host Controller. So we set the block size
2738 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2739 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2740 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2742 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2745 * The tuning block is sent by the card to the host controller.
2746 * So we set the TRNS_READ bit in the Transfer Mode register.
2747 * This also takes care of setting DMA Enable and Multi Block
2748 * Select in the same register to 0.
2750 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2752 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2753 spin_unlock_irqrestore(&host->lock, flags);
2754 host->tuning_done = 0;
2760 sdhci_del_timer(host, &mrq);
2762 host->tuning_done = 0;
2764 spin_unlock_irqrestore(&host->lock, flags);
2766 /* Wait for Buffer Read Ready interrupt */
2767 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2768 msecs_to_jiffies(50));
2771 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2773 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2778 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2779 * of loops reaches tuning loop count.
2781 for (i = 0; i < host->tuning_loop_count; i++) {
2784 sdhci_send_tuning(host, opcode);
2786 if (!host->tuning_done) {
2787 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2788 mmc_hostname(host->mmc));
2789 sdhci_abort_tuning(host, opcode);
2793 /* Spec does not require a delay between tuning cycles */
2794 if (host->tuning_delay > 0)
2795 mdelay(host->tuning_delay);
2797 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2798 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2799 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2800 return 0; /* Success! */
2806 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2807 mmc_hostname(host->mmc));
2808 sdhci_reset_tuning(host);
2812 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2814 struct sdhci_host *host = mmc_priv(mmc);
2816 unsigned int tuning_count = 0;
2819 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2821 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2822 tuning_count = host->tuning_count;
2825 * The Host Controller needs tuning in case of SDR104 and DDR50
2826 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2827 * the Capabilities register.
2828 * If the Host Controller supports the HS200 mode then the
2829 * tuning function has to be executed.
2831 switch (host->timing) {
2832 /* HS400 tuning is done in HS200 mode */
2833 case MMC_TIMING_MMC_HS400:
2837 case MMC_TIMING_MMC_HS200:
2839 * Periodic re-tuning for HS400 is not expected to be needed, so
2846 case MMC_TIMING_UHS_SDR104:
2847 case MMC_TIMING_UHS_DDR50:
2850 case MMC_TIMING_UHS_SDR50:
2851 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2859 if (host->ops->platform_execute_tuning) {
2860 err = host->ops->platform_execute_tuning(host, opcode);
2864 host->mmc->retune_period = tuning_count;
2866 if (host->tuning_delay < 0)
2867 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2869 sdhci_start_tuning(host);
2871 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2873 sdhci_end_tuning(host);
2875 host->flags &= ~SDHCI_HS400_TUNING;
2879 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2881 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2883 /* Host Controller v3.00 defines preset value registers */
2884 if (host->version < SDHCI_SPEC_300)
2888 * We only enable or disable Preset Value if they are not already
2889 * enabled or disabled respectively. Otherwise, we bail out.
2891 if (host->preset_enabled != enable) {
2892 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2895 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2897 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2899 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2902 host->flags |= SDHCI_PV_ENABLED;
2904 host->flags &= ~SDHCI_PV_ENABLED;
2906 host->preset_enabled = enable;
2910 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2913 struct sdhci_host *host = mmc_priv(mmc);
2914 struct mmc_data *data = mrq->data;
2916 if (data->host_cookie != COOKIE_UNMAPPED)
2917 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2918 mmc_get_dma_dir(data));
2920 data->host_cookie = COOKIE_UNMAPPED;
2923 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2925 struct sdhci_host *host = mmc_priv(mmc);
2927 mrq->data->host_cookie = COOKIE_UNMAPPED;
2930 * No pre-mapping in the pre hook if we're using the bounce buffer,
2931 * for that we would need two bounce buffers since one buffer is
2932 * in flight when this is getting called.
2934 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2935 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2938 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2940 if (host->data_cmd) {
2941 host->data_cmd->error = err;
2942 sdhci_finish_mrq(host, host->data_cmd->mrq);
2946 host->cmd->error = err;
2947 sdhci_finish_mrq(host, host->cmd->mrq);
2951 static void sdhci_card_event(struct mmc_host *mmc)
2953 struct sdhci_host *host = mmc_priv(mmc);
2954 unsigned long flags;
2957 /* First check if client has provided their own card event */
2958 if (host->ops->card_event)
2959 host->ops->card_event(host);
2961 present = mmc->ops->get_cd(mmc);
2963 spin_lock_irqsave(&host->lock, flags);
2965 /* Check sdhci_has_requests() first in case we are runtime suspended */
2966 if (sdhci_has_requests(host) && !present) {
2967 pr_err("%s: Card removed during transfer!\n",
2968 mmc_hostname(host->mmc));
2969 pr_err("%s: Resetting controller.\n",
2970 mmc_hostname(host->mmc));
2972 sdhci_do_reset(host, SDHCI_RESET_CMD);
2973 sdhci_do_reset(host, SDHCI_RESET_DATA);
2975 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2978 spin_unlock_irqrestore(&host->lock, flags);
2981 static const struct mmc_host_ops sdhci_ops = {
2982 .request = sdhci_request,
2983 .post_req = sdhci_post_req,
2984 .pre_req = sdhci_pre_req,
2985 .set_ios = sdhci_set_ios,
2986 .get_cd = sdhci_get_cd,
2987 .get_ro = sdhci_get_ro,
2988 .hw_reset = sdhci_hw_reset,
2989 .enable_sdio_irq = sdhci_enable_sdio_irq,
2990 .ack_sdio_irq = sdhci_ack_sdio_irq,
2991 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2992 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2993 .execute_tuning = sdhci_execute_tuning,
2994 .card_event = sdhci_card_event,
2995 .card_busy = sdhci_card_busy,
2998 /*****************************************************************************\
3002 \*****************************************************************************/
3004 static bool sdhci_request_done(struct sdhci_host *host)
3006 unsigned long flags;
3007 struct mmc_request *mrq;
3010 spin_lock_irqsave(&host->lock, flags);
3012 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3013 mrq = host->mrqs_done[i];
3019 spin_unlock_irqrestore(&host->lock, flags);
3024 * The controller needs a reset of internal state machines
3025 * upon error conditions.
3027 if (sdhci_needs_reset(host, mrq)) {
3029 * Do not finish until command and data lines are available for
3030 * reset. Note there can only be one other mrq, so it cannot
3031 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3032 * would both be null.
3034 if (host->cmd || host->data_cmd) {
3035 spin_unlock_irqrestore(&host->lock, flags);
3039 /* Some controllers need this kick or reset won't work here */
3040 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3041 /* This is to force an update */
3042 host->ops->set_clock(host, host->clock);
3045 * Spec says we should do both at the same time, but Ricoh
3046 * controllers do not like that.
3048 sdhci_do_reset(host, SDHCI_RESET_CMD);
3049 sdhci_do_reset(host, SDHCI_RESET_DATA);
3051 host->pending_reset = false;
3055 * Always unmap the data buffers if they were mapped by
3056 * sdhci_prepare_data() whenever we finish with a request.
3057 * This avoids leaking DMA mappings on error.
3059 if (host->flags & SDHCI_REQ_USE_DMA) {
3060 struct mmc_data *data = mrq->data;
3062 if (host->use_external_dma && data &&
3063 (mrq->cmd->error || data->error)) {
3064 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3066 host->mrqs_done[i] = NULL;
3067 spin_unlock_irqrestore(&host->lock, flags);
3068 dmaengine_terminate_sync(chan);
3069 spin_lock_irqsave(&host->lock, flags);
3070 sdhci_set_mrq_done(host, mrq);
3073 if (data && data->host_cookie == COOKIE_MAPPED) {
3074 if (host->bounce_buffer) {
3076 * On reads, copy the bounced data into the
3079 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3080 unsigned int length = data->bytes_xfered;
3082 if (length > host->bounce_buffer_size) {
3083 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3084 mmc_hostname(host->mmc),
3085 host->bounce_buffer_size,
3086 data->bytes_xfered);
3087 /* Cap it down and continue */
3088 length = host->bounce_buffer_size;
3090 dma_sync_single_for_cpu(
3093 host->bounce_buffer_size,
3095 sg_copy_from_buffer(data->sg,
3097 host->bounce_buffer,
3100 /* No copying, just switch ownership */
3101 dma_sync_single_for_cpu(
3104 host->bounce_buffer_size,
3105 mmc_get_dma_dir(data));
3108 /* Unmap the raw data */
3109 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3111 mmc_get_dma_dir(data));
3113 data->host_cookie = COOKIE_UNMAPPED;
3117 host->mrqs_done[i] = NULL;
3119 spin_unlock_irqrestore(&host->lock, flags);
3121 if (host->ops->request_done)
3122 host->ops->request_done(host, mrq);
3124 mmc_request_done(host->mmc, mrq);
3129 static void sdhci_complete_work(struct work_struct *work)
3131 struct sdhci_host *host = container_of(work, struct sdhci_host,
3134 while (!sdhci_request_done(host))
3138 static void sdhci_timeout_timer(struct timer_list *t)
3140 struct sdhci_host *host;
3141 unsigned long flags;
3143 host = from_timer(host, t, timer);
3145 spin_lock_irqsave(&host->lock, flags);
3147 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3148 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3149 mmc_hostname(host->mmc));
3150 sdhci_dumpregs(host);
3152 host->cmd->error = -ETIMEDOUT;
3153 sdhci_finish_mrq(host, host->cmd->mrq);
3156 spin_unlock_irqrestore(&host->lock, flags);
3159 static void sdhci_timeout_data_timer(struct timer_list *t)
3161 struct sdhci_host *host;
3162 unsigned long flags;
3164 host = from_timer(host, t, data_timer);
3166 spin_lock_irqsave(&host->lock, flags);
3168 if (host->data || host->data_cmd ||
3169 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3170 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3171 mmc_hostname(host->mmc));
3172 sdhci_dumpregs(host);
3175 host->data->error = -ETIMEDOUT;
3176 __sdhci_finish_data(host, true);
3177 queue_work(host->complete_wq, &host->complete_work);
3178 } else if (host->data_cmd) {
3179 host->data_cmd->error = -ETIMEDOUT;
3180 sdhci_finish_mrq(host, host->data_cmd->mrq);
3182 host->cmd->error = -ETIMEDOUT;
3183 sdhci_finish_mrq(host, host->cmd->mrq);
3187 spin_unlock_irqrestore(&host->lock, flags);
3190 /*****************************************************************************\
3192 * Interrupt handling *
3194 \*****************************************************************************/
3196 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3198 /* Handle auto-CMD12 error */
3199 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3200 struct mmc_request *mrq = host->data_cmd->mrq;
3201 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3202 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3203 SDHCI_INT_DATA_TIMEOUT :
3206 /* Treat auto-CMD12 error the same as data error */
3207 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3208 *intmask_p |= data_err_bit;
3215 * SDHCI recovers from errors by resetting the cmd and data
3216 * circuits. Until that is done, there very well might be more
3217 * interrupts, so ignore them in that case.
3219 if (host->pending_reset)
3221 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3222 mmc_hostname(host->mmc), (unsigned)intmask);
3223 sdhci_dumpregs(host);
3227 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3228 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3229 if (intmask & SDHCI_INT_TIMEOUT)
3230 host->cmd->error = -ETIMEDOUT;
3232 host->cmd->error = -EILSEQ;
3234 /* Treat data command CRC error the same as data CRC error */
3235 if (host->cmd->data &&
3236 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3239 *intmask_p |= SDHCI_INT_DATA_CRC;
3243 __sdhci_finish_mrq(host, host->cmd->mrq);
3247 /* Handle auto-CMD23 error */
3248 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3249 struct mmc_request *mrq = host->cmd->mrq;
3250 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3251 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3255 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3256 mrq->sbc->error = err;
3257 __sdhci_finish_mrq(host, mrq);
3262 if (intmask & SDHCI_INT_RESPONSE)
3263 sdhci_finish_command(host);
3266 static void sdhci_adma_show_error(struct sdhci_host *host)
3268 void *desc = host->adma_table;
3269 dma_addr_t dma = host->adma_addr;
3271 sdhci_dumpregs(host);
3274 struct sdhci_adma2_64_desc *dma_desc = desc;
3276 if (host->flags & SDHCI_USE_64_BIT_DMA)
3277 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3278 (unsigned long long)dma,
3279 le32_to_cpu(dma_desc->addr_hi),
3280 le32_to_cpu(dma_desc->addr_lo),
3281 le16_to_cpu(dma_desc->len),
3282 le16_to_cpu(dma_desc->cmd));
3284 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3285 (unsigned long long)dma,
3286 le32_to_cpu(dma_desc->addr_lo),
3287 le16_to_cpu(dma_desc->len),
3288 le16_to_cpu(dma_desc->cmd));
3290 desc += host->desc_sz;
3291 dma += host->desc_sz;
3293 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3298 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3302 /* CMD19 generates _only_ Buffer Read Ready interrupt */
3303 if (intmask & SDHCI_INT_DATA_AVAIL) {
3304 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3305 if (command == MMC_SEND_TUNING_BLOCK ||
3306 command == MMC_SEND_TUNING_BLOCK_HS200) {
3307 host->tuning_done = 1;
3308 wake_up(&host->buf_ready_int);
3314 struct mmc_command *data_cmd = host->data_cmd;
3317 * The "data complete" interrupt is also used to
3318 * indicate that a busy state has ended. See comment
3319 * above in sdhci_cmd_irq().
3321 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3322 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3323 host->data_cmd = NULL;
3324 data_cmd->error = -ETIMEDOUT;
3325 __sdhci_finish_mrq(host, data_cmd->mrq);
3328 if (intmask & SDHCI_INT_DATA_END) {
3329 host->data_cmd = NULL;
3331 * Some cards handle busy-end interrupt
3332 * before the command completed, so make
3333 * sure we do things in the proper order.
3335 if (host->cmd == data_cmd)
3338 __sdhci_finish_mrq(host, data_cmd->mrq);
3344 * SDHCI recovers from errors by resetting the cmd and data
3345 * circuits. Until that is done, there very well might be more
3346 * interrupts, so ignore them in that case.
3348 if (host->pending_reset)
3351 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3352 mmc_hostname(host->mmc), (unsigned)intmask);
3353 sdhci_dumpregs(host);
3358 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3359 host->data->error = -ETIMEDOUT;
3360 else if (intmask & SDHCI_INT_DATA_END_BIT)
3361 host->data->error = -EILSEQ;
3362 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3363 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3365 host->data->error = -EILSEQ;
3366 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3367 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3369 sdhci_adma_show_error(host);
3370 host->data->error = -EIO;
3371 if (host->ops->adma_workaround)
3372 host->ops->adma_workaround(host, intmask);
3375 if (host->data->error)
3376 sdhci_finish_data(host);
3378 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3379 sdhci_transfer_pio(host);
3382 * We currently don't do anything fancy with DMA
3383 * boundaries, but as we can't disable the feature
3384 * we need to at least restart the transfer.
3386 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3387 * should return a valid address to continue from, but as
3388 * some controllers are faulty, don't trust them.
3390 if (intmask & SDHCI_INT_DMA_END) {
3391 dma_addr_t dmastart, dmanow;
3393 dmastart = sdhci_sdma_address(host);
3394 dmanow = dmastart + host->data->bytes_xfered;
3396 * Force update to the next DMA block boundary.
3399 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3400 SDHCI_DEFAULT_BOUNDARY_SIZE;
3401 host->data->bytes_xfered = dmanow - dmastart;
3402 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3403 &dmastart, host->data->bytes_xfered, &dmanow);
3404 sdhci_set_sdma_addr(host, dmanow);
3407 if (intmask & SDHCI_INT_DATA_END) {
3408 if (host->cmd == host->data_cmd) {
3410 * Data managed to finish before the
3411 * command completed. Make sure we do
3412 * things in the proper order.
3414 host->data_early = 1;
3416 sdhci_finish_data(host);
3422 static inline bool sdhci_defer_done(struct sdhci_host *host,
3423 struct mmc_request *mrq)
3425 struct mmc_data *data = mrq->data;
3427 return host->pending_reset || host->always_defer_done ||
3428 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3429 data->host_cookie == COOKIE_MAPPED);
3432 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3434 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3435 irqreturn_t result = IRQ_NONE;
3436 struct sdhci_host *host = dev_id;
3437 u32 intmask, mask, unexpected = 0;
3441 spin_lock(&host->lock);
3443 if (host->runtime_suspended) {
3444 spin_unlock(&host->lock);
3448 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3449 if (!intmask || intmask == 0xffffffff) {
3455 DBG("IRQ status 0x%08x\n", intmask);
3457 if (host->ops->irq) {
3458 intmask = host->ops->irq(host, intmask);
3463 /* Clear selected interrupts. */
3464 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3465 SDHCI_INT_BUS_POWER);
3466 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3468 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3469 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3473 * There is a observation on i.mx esdhc. INSERT
3474 * bit will be immediately set again when it gets
3475 * cleared, if a card is inserted. We have to mask
3476 * the irq to prevent interrupt storm which will
3477 * freeze the system. And the REMOVE gets the
3480 * More testing are needed here to ensure it works
3481 * for other platforms though.
3483 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3484 SDHCI_INT_CARD_REMOVE);
3485 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3486 SDHCI_INT_CARD_INSERT;
3487 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3488 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3490 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3491 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3493 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3494 SDHCI_INT_CARD_REMOVE);
3495 result = IRQ_WAKE_THREAD;
3498 if (intmask & SDHCI_INT_CMD_MASK)
3499 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3501 if (intmask & SDHCI_INT_DATA_MASK)
3502 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3504 if (intmask & SDHCI_INT_BUS_POWER)
3505 pr_err("%s: Card is consuming too much power!\n",
3506 mmc_hostname(host->mmc));
3508 if (intmask & SDHCI_INT_RETUNE)
3509 mmc_retune_needed(host->mmc);
3511 if ((intmask & SDHCI_INT_CARD_INT) &&
3512 (host->ier & SDHCI_INT_CARD_INT)) {
3513 sdhci_enable_sdio_irq_nolock(host, false);
3514 sdio_signal_irq(host->mmc);
3517 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3518 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3519 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3520 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3523 unexpected |= intmask;
3524 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3527 if (result == IRQ_NONE)
3528 result = IRQ_HANDLED;
3530 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3531 } while (intmask && --max_loops);
3533 /* Determine if mrqs can be completed immediately */
3534 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3535 struct mmc_request *mrq = host->mrqs_done[i];
3540 if (sdhci_defer_done(host, mrq)) {
3541 result = IRQ_WAKE_THREAD;
3544 host->mrqs_done[i] = NULL;
3548 if (host->deferred_cmd)
3549 result = IRQ_WAKE_THREAD;
3551 spin_unlock(&host->lock);
3553 /* Process mrqs ready for immediate completion */
3554 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3558 if (host->ops->request_done)
3559 host->ops->request_done(host, mrqs_done[i]);
3561 mmc_request_done(host->mmc, mrqs_done[i]);
3565 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3566 mmc_hostname(host->mmc), unexpected);
3567 sdhci_dumpregs(host);
3573 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3575 struct sdhci_host *host = dev_id;
3576 struct mmc_command *cmd;
3577 unsigned long flags;
3580 while (!sdhci_request_done(host))
3583 spin_lock_irqsave(&host->lock, flags);
3585 isr = host->thread_isr;
3586 host->thread_isr = 0;
3588 cmd = host->deferred_cmd;
3589 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3590 sdhci_finish_mrq(host, cmd->mrq);
3592 spin_unlock_irqrestore(&host->lock, flags);
3594 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3595 struct mmc_host *mmc = host->mmc;
3597 mmc->ops->card_event(mmc);
3598 mmc_detect_change(mmc, msecs_to_jiffies(200));
3604 /*****************************************************************************\
3608 \*****************************************************************************/
3612 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3614 return mmc_card_is_removable(host->mmc) &&
3615 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3616 !mmc_can_gpio_cd(host->mmc);
3620 * To enable wakeup events, the corresponding events have to be enabled in
3621 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3622 * Table' in the SD Host Controller Standard Specification.
3623 * It is useless to restore SDHCI_INT_ENABLE state in
3624 * sdhci_disable_irq_wakeups() since it will be set by
3625 * sdhci_enable_card_detection() or sdhci_init().
3627 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3629 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3635 if (sdhci_cd_irq_can_wakeup(host)) {
3636 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3637 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3640 if (mmc_card_wake_sdio_irq(host->mmc)) {
3641 wake_val |= SDHCI_WAKE_ON_INT;
3642 irq_val |= SDHCI_INT_CARD_INT;
3648 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3651 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3653 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3655 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3657 return host->irq_wake_enabled;
3660 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3663 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3664 | SDHCI_WAKE_ON_INT;
3666 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3668 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3670 disable_irq_wake(host->irq);
3672 host->irq_wake_enabled = false;
3675 int sdhci_suspend_host(struct sdhci_host *host)
3677 sdhci_disable_card_detection(host);
3679 mmc_retune_timer_stop(host->mmc);
3681 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3682 !sdhci_enable_irq_wakeups(host)) {
3684 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3685 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3686 free_irq(host->irq, host);
3692 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3694 int sdhci_resume_host(struct sdhci_host *host)
3696 struct mmc_host *mmc = host->mmc;
3699 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3700 if (host->ops->enable_dma)
3701 host->ops->enable_dma(host);
3704 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3705 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3706 /* Card keeps power but host controller does not */
3707 sdhci_init(host, 0);
3710 mmc->ops->set_ios(mmc, &mmc->ios);
3712 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3715 if (host->irq_wake_enabled) {
3716 sdhci_disable_irq_wakeups(host);
3718 ret = request_threaded_irq(host->irq, sdhci_irq,
3719 sdhci_thread_irq, IRQF_SHARED,
3720 mmc_hostname(host->mmc), host);
3725 sdhci_enable_card_detection(host);
3730 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3732 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3734 unsigned long flags;
3736 mmc_retune_timer_stop(host->mmc);
3738 spin_lock_irqsave(&host->lock, flags);
3739 host->ier &= SDHCI_INT_CARD_INT;
3740 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3741 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3742 spin_unlock_irqrestore(&host->lock, flags);
3744 synchronize_hardirq(host->irq);
3746 spin_lock_irqsave(&host->lock, flags);
3747 host->runtime_suspended = true;
3748 spin_unlock_irqrestore(&host->lock, flags);
3752 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3754 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3756 struct mmc_host *mmc = host->mmc;
3757 unsigned long flags;
3758 int host_flags = host->flags;
3760 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3761 if (host->ops->enable_dma)
3762 host->ops->enable_dma(host);
3765 sdhci_init(host, soft_reset);
3767 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3768 mmc->ios.power_mode != MMC_POWER_OFF) {
3769 /* Force clock and power re-program */
3772 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3773 mmc->ops->set_ios(mmc, &mmc->ios);
3775 if ((host_flags & SDHCI_PV_ENABLED) &&
3776 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3777 spin_lock_irqsave(&host->lock, flags);
3778 sdhci_enable_preset_value(host, true);
3779 spin_unlock_irqrestore(&host->lock, flags);
3782 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3783 mmc->ops->hs400_enhanced_strobe)
3784 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3787 spin_lock_irqsave(&host->lock, flags);
3789 host->runtime_suspended = false;
3791 /* Enable SDIO IRQ */
3792 if (sdio_irq_claimed(mmc))
3793 sdhci_enable_sdio_irq_nolock(host, true);
3795 /* Enable Card Detection */
3796 sdhci_enable_card_detection(host);
3798 spin_unlock_irqrestore(&host->lock, flags);
3802 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3804 #endif /* CONFIG_PM */
3806 /*****************************************************************************\
3808 * Command Queue Engine (CQE) helpers *
3810 \*****************************************************************************/
3812 void sdhci_cqe_enable(struct mmc_host *mmc)
3814 struct sdhci_host *host = mmc_priv(mmc);
3815 unsigned long flags;
3818 spin_lock_irqsave(&host->lock, flags);
3820 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3821 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3823 * Host from V4.10 supports ADMA3 DMA type.
3824 * ADMA3 performs integrated descriptor which is more suitable
3825 * for cmd queuing to fetch both command and transfer descriptors.
3827 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3828 ctrl |= SDHCI_CTRL_ADMA3;
3829 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3830 ctrl |= SDHCI_CTRL_ADMA64;
3832 ctrl |= SDHCI_CTRL_ADMA32;
3833 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3835 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3838 /* Set maximum timeout */
3839 sdhci_set_timeout(host, NULL);
3841 host->ier = host->cqe_ier;
3843 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3844 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3846 host->cqe_on = true;
3848 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3849 mmc_hostname(mmc), host->ier,
3850 sdhci_readl(host, SDHCI_INT_STATUS));
3852 spin_unlock_irqrestore(&host->lock, flags);
3854 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3856 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3858 struct sdhci_host *host = mmc_priv(mmc);
3859 unsigned long flags;
3861 spin_lock_irqsave(&host->lock, flags);
3863 sdhci_set_default_irqs(host);
3865 host->cqe_on = false;
3868 sdhci_do_reset(host, SDHCI_RESET_CMD);
3869 sdhci_do_reset(host, SDHCI_RESET_DATA);
3872 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3873 mmc_hostname(mmc), host->ier,
3874 sdhci_readl(host, SDHCI_INT_STATUS));
3876 spin_unlock_irqrestore(&host->lock, flags);
3878 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3880 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3888 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3889 *cmd_error = -EILSEQ;
3890 else if (intmask & SDHCI_INT_TIMEOUT)
3891 *cmd_error = -ETIMEDOUT;
3895 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3896 *data_error = -EILSEQ;
3897 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3898 *data_error = -ETIMEDOUT;
3899 else if (intmask & SDHCI_INT_ADMA_ERROR)
3904 /* Clear selected interrupts. */
3905 mask = intmask & host->cqe_ier;
3906 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3908 if (intmask & SDHCI_INT_BUS_POWER)
3909 pr_err("%s: Card is consuming too much power!\n",
3910 mmc_hostname(host->mmc));
3912 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3914 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3915 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3916 mmc_hostname(host->mmc), intmask);
3917 sdhci_dumpregs(host);
3922 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3924 /*****************************************************************************\
3926 * Device allocation/registration *
3928 \*****************************************************************************/
3930 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3933 struct mmc_host *mmc;
3934 struct sdhci_host *host;
3936 WARN_ON(dev == NULL);
3938 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3940 return ERR_PTR(-ENOMEM);
3942 host = mmc_priv(mmc);
3944 host->mmc_host_ops = sdhci_ops;
3945 mmc->ops = &host->mmc_host_ops;
3947 host->flags = SDHCI_SIGNALING_330;
3949 host->cqe_ier = SDHCI_CQE_INT_MASK;
3950 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3952 host->tuning_delay = -1;
3953 host->tuning_loop_count = MAX_TUNING_LOOP;
3955 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3958 * The DMA table descriptor count is calculated as the maximum
3959 * number of segments times 2, to allow for an alignment
3960 * descriptor for each segment, plus 1 for a nop end descriptor.
3962 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3963 host->max_adma = 65536;
3968 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3970 static int sdhci_set_dma_mask(struct sdhci_host *host)
3972 struct mmc_host *mmc = host->mmc;
3973 struct device *dev = mmc_dev(mmc);
3976 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3977 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3979 /* Try 64-bit mask if hardware is capable of it */
3980 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3981 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3983 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3985 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3989 /* 32-bit mask as default & fallback */
3991 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3993 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4000 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4001 const u32 *caps, const u32 *caps1)
4004 u64 dt_caps_mask = 0;
4007 if (host->read_caps)
4010 host->read_caps = true;
4013 host->quirks = debug_quirks;
4016 host->quirks2 = debug_quirks2;
4018 sdhci_do_reset(host, SDHCI_RESET_ALL);
4021 sdhci_do_enable_v4_mode(host);
4023 device_property_read_u64_array(mmc_dev(host->mmc),
4024 "sdhci-caps-mask", &dt_caps_mask, 1);
4025 device_property_read_u64_array(mmc_dev(host->mmc),
4026 "sdhci-caps", &dt_caps, 1);
4028 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4029 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4031 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4037 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4038 host->caps &= ~lower_32_bits(dt_caps_mask);
4039 host->caps |= lower_32_bits(dt_caps);
4042 if (host->version < SDHCI_SPEC_300)
4046 host->caps1 = *caps1;
4048 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4049 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4050 host->caps1 |= upper_32_bits(dt_caps);
4053 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4055 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4057 struct mmc_host *mmc = host->mmc;
4058 unsigned int max_blocks;
4059 unsigned int bounce_size;
4063 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4064 * has diminishing returns, this is probably because SD/MMC
4065 * cards are usually optimized to handle this size of requests.
4067 bounce_size = SZ_64K;
4069 * Adjust downwards to maximum request size if this is less
4070 * than our segment size, else hammer down the maximum
4071 * request size to the maximum buffer size.
4073 if (mmc->max_req_size < bounce_size)
4074 bounce_size = mmc->max_req_size;
4075 max_blocks = bounce_size / 512;
4078 * When we just support one segment, we can get significant
4079 * speedups by the help of a bounce buffer to group scattered
4080 * reads/writes together.
4082 host->bounce_buffer = devm_kmalloc(mmc->parent,
4085 if (!host->bounce_buffer) {
4086 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4090 * Exiting with zero here makes sure we proceed with
4091 * mmc->max_segs == 1.
4096 host->bounce_addr = dma_map_single(mmc->parent,
4097 host->bounce_buffer,
4100 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
4102 /* Again fall back to max_segs == 1 */
4104 host->bounce_buffer_size = bounce_size;
4106 /* Lie about this since we're bouncing */
4107 mmc->max_segs = max_blocks;
4108 mmc->max_seg_size = bounce_size;
4109 mmc->max_req_size = bounce_size;
4111 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4112 mmc_hostname(mmc), max_blocks, bounce_size);
4115 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4118 * According to SD Host Controller spec v4.10, bit[27] added from
4119 * version 4.10 in Capabilities Register is used as 64-bit System
4120 * Address support for V4 mode.
4122 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4123 return host->caps & SDHCI_CAN_64BIT_V4;
4125 return host->caps & SDHCI_CAN_64BIT;
4128 int sdhci_setup_host(struct sdhci_host *host)
4130 struct mmc_host *mmc;
4131 u32 max_current_caps;
4132 unsigned int ocr_avail;
4133 unsigned int override_timeout_clk;
4136 bool enable_vqmmc = false;
4138 WARN_ON(host == NULL);
4145 * If there are external regulators, get them. Note this must be done
4146 * early before resetting the host and reading the capabilities so that
4147 * the host can take the appropriate action if regulators are not
4150 if (!mmc->supply.vqmmc) {
4151 ret = mmc_regulator_get_supply(mmc);
4154 enable_vqmmc = true;
4157 DBG("Version: 0x%08x | Present: 0x%08x\n",
4158 sdhci_readw(host, SDHCI_HOST_VERSION),
4159 sdhci_readl(host, SDHCI_PRESENT_STATE));
4160 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4161 sdhci_readl(host, SDHCI_CAPABILITIES),
4162 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4164 sdhci_read_caps(host);
4166 override_timeout_clk = host->timeout_clk;
4168 if (host->version > SDHCI_SPEC_420) {
4169 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4170 mmc_hostname(mmc), host->version);
4173 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4174 host->flags |= SDHCI_USE_SDMA;
4175 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4176 DBG("Controller doesn't have SDMA capability\n");
4178 host->flags |= SDHCI_USE_SDMA;
4180 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4181 (host->flags & SDHCI_USE_SDMA)) {
4182 DBG("Disabling DMA as it is marked broken\n");
4183 host->flags &= ~SDHCI_USE_SDMA;
4186 if ((host->version >= SDHCI_SPEC_200) &&
4187 (host->caps & SDHCI_CAN_DO_ADMA2))
4188 host->flags |= SDHCI_USE_ADMA;
4190 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4191 (host->flags & SDHCI_USE_ADMA)) {
4192 DBG("Disabling ADMA as it is marked broken\n");
4193 host->flags &= ~SDHCI_USE_ADMA;
4196 if (sdhci_can_64bit_dma(host))
4197 host->flags |= SDHCI_USE_64_BIT_DMA;
4199 if (host->use_external_dma) {
4200 ret = sdhci_external_dma_init(host);
4201 if (ret == -EPROBE_DEFER)
4204 * Fall back to use the DMA/PIO integrated in standard SDHCI
4205 * instead of external DMA devices.
4208 sdhci_switch_external_dma(host, false);
4209 /* Disable internal DMA sources */
4211 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4214 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4215 if (host->ops->set_dma_mask)
4216 ret = host->ops->set_dma_mask(host);
4218 ret = sdhci_set_dma_mask(host);
4220 if (!ret && host->ops->enable_dma)
4221 ret = host->ops->enable_dma(host);
4224 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4226 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4232 /* SDMA does not support 64-bit DMA if v4 mode not set */
4233 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4234 host->flags &= ~SDHCI_USE_SDMA;
4236 if (host->flags & SDHCI_USE_ADMA) {
4240 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4241 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4242 else if (!host->alloc_desc_sz)
4243 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4245 host->desc_sz = host->alloc_desc_sz;
4246 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4248 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4250 * Use zalloc to zero the reserved high 32-bits of 128-bit
4251 * descriptors so that they never need to be written.
4253 buf = dma_alloc_coherent(mmc_dev(mmc),
4254 host->align_buffer_sz + host->adma_table_sz,
4257 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4259 host->flags &= ~SDHCI_USE_ADMA;
4260 } else if ((dma + host->align_buffer_sz) &
4261 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4262 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4264 host->flags &= ~SDHCI_USE_ADMA;
4265 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4266 host->adma_table_sz, buf, dma);
4268 host->align_buffer = buf;
4269 host->align_addr = dma;
4271 host->adma_table = buf + host->align_buffer_sz;
4272 host->adma_addr = dma + host->align_buffer_sz;
4277 * If we use DMA, then it's up to the caller to set the DMA
4278 * mask, but PIO does not need the hw shim so we set a new
4279 * mask here in that case.
4281 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4282 host->dma_mask = DMA_BIT_MASK(64);
4283 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4286 if (host->version >= SDHCI_SPEC_300)
4287 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4289 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4291 host->max_clk *= 1000000;
4292 if (host->max_clk == 0 || host->quirks &
4293 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4294 if (!host->ops->get_max_clock) {
4295 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4300 host->max_clk = host->ops->get_max_clock(host);
4304 * In case of Host Controller v3.00, find out whether clock
4305 * multiplier is supported.
4307 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4310 * In case the value in Clock Multiplier is 0, then programmable
4311 * clock mode is not supported, otherwise the actual clock
4312 * multiplier is one more than the value of Clock Multiplier
4313 * in the Capabilities Register.
4319 * Set host parameters.
4321 max_clk = host->max_clk;
4323 if (host->ops->get_min_clock)
4324 mmc->f_min = host->ops->get_min_clock(host);
4325 else if (host->version >= SDHCI_SPEC_300) {
4327 max_clk = host->max_clk * host->clk_mul;
4329 * Divided Clock Mode minimum clock rate is always less than
4330 * Programmable Clock Mode minimum clock rate.
4332 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4334 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4336 if (!mmc->f_max || mmc->f_max > max_clk)
4337 mmc->f_max = max_clk;
4339 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4340 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4342 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4343 host->timeout_clk *= 1000;
4345 if (host->timeout_clk == 0) {
4346 if (!host->ops->get_timeout_clock) {
4347 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4354 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4358 if (override_timeout_clk)
4359 host->timeout_clk = override_timeout_clk;
4361 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4362 host->ops->get_max_timeout_count(host) : 1 << 27;
4363 mmc->max_busy_timeout /= host->timeout_clk;
4366 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4367 !host->ops->get_max_timeout_count)
4368 mmc->max_busy_timeout = 0;
4370 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4371 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4373 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4374 host->flags |= SDHCI_AUTO_CMD12;
4377 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4378 * For v4 mode, SDMA may use Auto-CMD23 as well.
4380 if ((host->version >= SDHCI_SPEC_300) &&
4381 ((host->flags & SDHCI_USE_ADMA) ||
4382 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4383 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4384 host->flags |= SDHCI_AUTO_CMD23;
4385 DBG("Auto-CMD23 available\n");
4387 DBG("Auto-CMD23 unavailable\n");
4391 * A controller may support 8-bit width, but the board itself
4392 * might not have the pins brought out. Boards that support
4393 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4394 * their platform code before calling sdhci_add_host(), and we
4395 * won't assume 8-bit width for hosts without that CAP.
4397 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4398 mmc->caps |= MMC_CAP_4_BIT_DATA;
4400 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4401 mmc->caps &= ~MMC_CAP_CMD23;
4403 if (host->caps & SDHCI_CAN_DO_HISPD)
4404 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4406 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4407 mmc_card_is_removable(mmc) &&
4408 mmc_gpio_get_cd(host->mmc) < 0)
4409 mmc->caps |= MMC_CAP_NEEDS_POLL;
4411 if (!IS_ERR(mmc->supply.vqmmc)) {
4413 ret = regulator_enable(mmc->supply.vqmmc);
4414 host->sdhci_core_to_disable_vqmmc = !ret;
4417 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4418 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4420 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4421 SDHCI_SUPPORT_SDR50 |
4422 SDHCI_SUPPORT_DDR50);
4424 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4425 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4427 host->flags &= ~SDHCI_SIGNALING_330;
4430 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4431 mmc_hostname(mmc), ret);
4432 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4437 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4438 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4439 SDHCI_SUPPORT_DDR50);
4441 * The SDHCI controller in a SoC might support HS200/HS400
4442 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4443 * but if the board is modeled such that the IO lines are not
4444 * connected to 1.8v then HS200/HS400 cannot be supported.
4445 * Disable HS200/HS400 if the board does not have 1.8v connected
4446 * to the IO lines. (Applicable for other modes in 1.8v)
4448 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4449 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4452 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4453 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4454 SDHCI_SUPPORT_DDR50))
4455 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4457 /* SDR104 supports also implies SDR50 support */
4458 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4459 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4460 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4461 * field can be promoted to support HS200.
4463 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4464 mmc->caps2 |= MMC_CAP2_HS200;
4465 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4466 mmc->caps |= MMC_CAP_UHS_SDR50;
4469 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4470 (host->caps1 & SDHCI_SUPPORT_HS400))
4471 mmc->caps2 |= MMC_CAP2_HS400;
4473 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4474 (IS_ERR(mmc->supply.vqmmc) ||
4475 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4477 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4479 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4480 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4481 mmc->caps |= MMC_CAP_UHS_DDR50;
4483 /* Does the host need tuning for SDR50? */
4484 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4485 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4487 /* Driver Type(s) (A, C, D) supported by the host */
4488 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4489 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4490 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4491 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4492 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4493 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4495 /* Initial value for re-tuning timer count */
4496 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4500 * In case Re-tuning Timer is not disabled, the actual value of
4501 * re-tuning timer will be 2 ^ (n - 1).
4503 if (host->tuning_count)
4504 host->tuning_count = 1 << (host->tuning_count - 1);
4506 /* Re-tuning mode supported by the Host Controller */
4507 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4512 * According to SD Host Controller spec v3.00, if the Host System
4513 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4514 * the value is meaningful only if Voltage Support in the Capabilities
4515 * register is set. The actual current value is 4 times the register
4518 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4519 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4520 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4523 /* convert to SDHCI_MAX_CURRENT format */
4524 curr = curr/1000; /* convert to mA */
4525 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4527 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4529 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4530 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4531 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4535 if (host->caps & SDHCI_CAN_VDD_330) {
4536 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4538 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4540 SDHCI_MAX_CURRENT_MULTIPLIER;
4542 if (host->caps & SDHCI_CAN_VDD_300) {
4543 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4545 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4547 SDHCI_MAX_CURRENT_MULTIPLIER;
4549 if (host->caps & SDHCI_CAN_VDD_180) {
4550 ocr_avail |= MMC_VDD_165_195;
4552 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4554 SDHCI_MAX_CURRENT_MULTIPLIER;
4557 /* If OCR set by host, use it instead. */
4559 ocr_avail = host->ocr_mask;
4561 /* If OCR set by external regulators, give it highest prio. */
4563 ocr_avail = mmc->ocr_avail;
4565 mmc->ocr_avail = ocr_avail;
4566 mmc->ocr_avail_sdio = ocr_avail;
4567 if (host->ocr_avail_sdio)
4568 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4569 mmc->ocr_avail_sd = ocr_avail;
4570 if (host->ocr_avail_sd)
4571 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4572 else /* normal SD controllers don't support 1.8V */
4573 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4574 mmc->ocr_avail_mmc = ocr_avail;
4575 if (host->ocr_avail_mmc)
4576 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4578 if (mmc->ocr_avail == 0) {
4579 pr_err("%s: Hardware doesn't report any support voltages.\n",
4585 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4586 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4587 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4588 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4589 host->flags |= SDHCI_SIGNALING_180;
4591 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4592 host->flags |= SDHCI_SIGNALING_120;
4594 spin_lock_init(&host->lock);
4597 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4598 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4601 mmc->max_req_size = 524288;
4604 * Maximum number of segments. Depends on if the hardware
4605 * can do scatter/gather or not.
4607 if (host->flags & SDHCI_USE_ADMA) {
4608 mmc->max_segs = SDHCI_MAX_SEGS;
4609 } else if (host->flags & SDHCI_USE_SDMA) {
4611 if (swiotlb_max_segment()) {
4612 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4614 mmc->max_req_size = min(mmc->max_req_size,
4618 mmc->max_segs = SDHCI_MAX_SEGS;
4622 * Maximum segment size. Could be one segment with the maximum number
4623 * of bytes. When doing hardware scatter/gather, each entry cannot
4624 * be larger than 64 KiB though.
4626 if (host->flags & SDHCI_USE_ADMA) {
4627 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4628 host->max_adma = 65532; /* 32-bit alignment */
4629 mmc->max_seg_size = 65535;
4631 mmc->max_seg_size = 65536;
4634 mmc->max_seg_size = mmc->max_req_size;
4638 * Maximum block size. This varies from controller to controller and
4639 * is specified in the capabilities register.
4641 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4642 mmc->max_blk_size = 2;
4644 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4645 SDHCI_MAX_BLOCK_SHIFT;
4646 if (mmc->max_blk_size >= 3) {
4647 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4649 mmc->max_blk_size = 0;
4653 mmc->max_blk_size = 512 << mmc->max_blk_size;
4656 * Maximum block count.
4658 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4660 if (mmc->max_segs == 1)
4661 /* This may alter mmc->*_blk_* parameters */
4662 sdhci_allocate_bounce_buffer(host);
4667 if (host->sdhci_core_to_disable_vqmmc)
4668 regulator_disable(mmc->supply.vqmmc);
4670 if (host->align_buffer)
4671 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4672 host->adma_table_sz, host->align_buffer,
4674 host->adma_table = NULL;
4675 host->align_buffer = NULL;
4679 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4681 void sdhci_cleanup_host(struct sdhci_host *host)
4683 struct mmc_host *mmc = host->mmc;
4685 if (host->sdhci_core_to_disable_vqmmc)
4686 regulator_disable(mmc->supply.vqmmc);
4688 if (host->align_buffer)
4689 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4690 host->adma_table_sz, host->align_buffer,
4693 if (host->use_external_dma)
4694 sdhci_external_dma_release(host);
4696 host->adma_table = NULL;
4697 host->align_buffer = NULL;
4699 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4701 int __sdhci_add_host(struct sdhci_host *host)
4703 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4704 struct mmc_host *mmc = host->mmc;
4707 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4708 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4709 mmc->caps2 &= ~MMC_CAP2_CQE;
4710 mmc->cqe_ops = NULL;
4713 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4714 if (!host->complete_wq)
4717 INIT_WORK(&host->complete_work, sdhci_complete_work);
4719 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4720 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4722 init_waitqueue_head(&host->buf_ready_int);
4724 sdhci_init(host, 0);
4726 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4727 IRQF_SHARED, mmc_hostname(mmc), host);
4729 pr_err("%s: Failed to request IRQ %d: %d\n",
4730 mmc_hostname(mmc), host->irq, ret);
4734 ret = sdhci_led_register(host);
4736 pr_err("%s: Failed to register LED device: %d\n",
4737 mmc_hostname(mmc), ret);
4741 ret = mmc_add_host(mmc);
4745 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4746 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4747 host->use_external_dma ? "External DMA" :
4748 (host->flags & SDHCI_USE_ADMA) ?
4749 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4750 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4752 sdhci_enable_card_detection(host);
4757 sdhci_led_unregister(host);
4759 sdhci_do_reset(host, SDHCI_RESET_ALL);
4760 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4761 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4762 free_irq(host->irq, host);
4764 destroy_workqueue(host->complete_wq);
4768 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4770 int sdhci_add_host(struct sdhci_host *host)
4774 ret = sdhci_setup_host(host);
4778 ret = __sdhci_add_host(host);
4785 sdhci_cleanup_host(host);
4789 EXPORT_SYMBOL_GPL(sdhci_add_host);
4791 void sdhci_remove_host(struct sdhci_host *host, int dead)
4793 struct mmc_host *mmc = host->mmc;
4794 unsigned long flags;
4797 spin_lock_irqsave(&host->lock, flags);
4799 host->flags |= SDHCI_DEVICE_DEAD;
4801 if (sdhci_has_requests(host)) {
4802 pr_err("%s: Controller removed during "
4803 " transfer!\n", mmc_hostname(mmc));
4804 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4807 spin_unlock_irqrestore(&host->lock, flags);
4810 sdhci_disable_card_detection(host);
4812 mmc_remove_host(mmc);
4814 sdhci_led_unregister(host);
4817 sdhci_do_reset(host, SDHCI_RESET_ALL);
4819 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4820 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4821 free_irq(host->irq, host);
4823 del_timer_sync(&host->timer);
4824 del_timer_sync(&host->data_timer);
4826 destroy_workqueue(host->complete_wq);
4828 if (host->sdhci_core_to_disable_vqmmc)
4829 regulator_disable(mmc->supply.vqmmc);
4831 if (host->align_buffer)
4832 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4833 host->adma_table_sz, host->align_buffer,
4836 if (host->use_external_dma)
4837 sdhci_external_dma_release(host);
4839 host->adma_table = NULL;
4840 host->align_buffer = NULL;
4843 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4845 void sdhci_free_host(struct sdhci_host *host)
4847 mmc_free_host(host->mmc);
4850 EXPORT_SYMBOL_GPL(sdhci_free_host);
4852 /*****************************************************************************\
4854 * Driver init/exit *
4856 \*****************************************************************************/
4858 static int __init sdhci_drv_init(void)
4861 ": Secure Digital Host Controller Interface driver\n");
4862 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4867 static void __exit sdhci_drv_exit(void)
4871 module_init(sdhci_drv_init);
4872 module_exit(sdhci_drv_exit);
4874 module_param(debug_quirks, uint, 0444);
4875 module_param(debug_quirks2, uint, 0444);
4877 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4878 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4879 MODULE_LICENSE("GPL");
4881 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4882 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");