1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
8 #include <linux/module.h>
9 #include <linux/of_device.h>
10 #include <linux/delay.h>
11 #include <linux/mmc/mmc.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_opp.h>
14 #include <linux/slab.h>
15 #include <linux/iopoll.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/interconnect.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/reset.h>
21 #include "sdhci-pltfm.h"
24 #define CORE_MCI_VERSION 0x50
25 #define CORE_VERSION_MAJOR_SHIFT 28
26 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
27 #define CORE_VERSION_MINOR_MASK 0xff
29 #define CORE_MCI_GENERICS 0x70
30 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
32 #define HC_MODE_EN 0x1
33 #define CORE_POWER 0x0
34 #define CORE_SW_RST BIT(7)
35 #define FF_CLK_SW_RST_DIS BIT(13)
37 #define CORE_PWRCTL_BUS_OFF BIT(0)
38 #define CORE_PWRCTL_BUS_ON BIT(1)
39 #define CORE_PWRCTL_IO_LOW BIT(2)
40 #define CORE_PWRCTL_IO_HIGH BIT(3)
41 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
42 #define CORE_PWRCTL_BUS_FAIL BIT(1)
43 #define CORE_PWRCTL_IO_SUCCESS BIT(2)
44 #define CORE_PWRCTL_IO_FAIL BIT(3)
45 #define REQ_BUS_OFF BIT(0)
46 #define REQ_BUS_ON BIT(1)
47 #define REQ_IO_LOW BIT(2)
48 #define REQ_IO_HIGH BIT(3)
51 #define CORE_DLL_LOCK BIT(7)
52 #define CORE_DDR_DLL_LOCK BIT(11)
53 #define CORE_DLL_EN BIT(16)
54 #define CORE_CDR_EN BIT(17)
55 #define CORE_CK_OUT_EN BIT(18)
56 #define CORE_CDR_EXT_EN BIT(19)
57 #define CORE_DLL_PDN BIT(29)
58 #define CORE_DLL_RST BIT(30)
59 #define CORE_CMD_DAT_TRACK_SEL BIT(0)
61 #define CORE_DDR_CAL_EN BIT(0)
62 #define CORE_FLL_CYCLE_CNT BIT(18)
63 #define CORE_DLL_CLOCK_DISABLE BIT(21)
65 #define DLL_USR_CTL_POR_VAL 0x10800
66 #define ENABLE_DLL_LOCK_STATUS BIT(26)
67 #define FINE_TUNE_MODE_EN BIT(27)
68 #define BIAS_OK_SIGNAL BIT(29)
70 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
71 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
73 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c
74 #define CORE_CLK_PWRSAVE BIT(1)
75 #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
76 #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
77 #define CORE_HC_MCLK_SEL_MASK (3 << 8)
78 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
79 #define CORE_IO_PAD_PWR_SWITCH BIT(16)
80 #define CORE_HC_SELECT_IN_EN BIT(18)
81 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
82 #define CORE_HC_SELECT_IN_MASK (7 << 19)
84 #define CORE_3_0V_SUPPORT BIT(25)
85 #define CORE_1_8V_SUPPORT BIT(26)
86 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
88 #define CORE_CSR_CDC_CTLR_CFG0 0x130
89 #define CORE_SW_TRIG_FULL_CALIB BIT(16)
90 #define CORE_HW_AUTOCAL_ENA BIT(17)
92 #define CORE_CSR_CDC_CTLR_CFG1 0x134
93 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
94 #define CORE_TIMER_ENA BIT(16)
96 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
97 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
98 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
99 #define CORE_CDC_OFFSET_CFG 0x14C
100 #define CORE_CSR_CDC_DELAY_CFG 0x150
101 #define CORE_CDC_SLAVE_DDA_CFG 0x160
102 #define CORE_CSR_CDC_STATUS0 0x164
103 #define CORE_CALIBRATION_DONE BIT(0)
105 #define CORE_CDC_ERROR_CODE_MASK 0x7000000
107 #define CORE_CSR_CDC_GEN_CFG 0x178
108 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
109 #define CORE_CDC_SWITCH_RC_EN BIT(1)
111 #define CORE_CDC_T4_DLY_SEL BIT(0)
112 #define CORE_CMDIN_RCLK_EN BIT(1)
113 #define CORE_START_CDC_TRAFFIC BIT(6)
115 #define CORE_PWRSAVE_DLL BIT(3)
117 #define DDR_CONFIG_POR_VAL 0x80040873
120 #define INVALID_TUNING_PHASE -1
121 #define SDHCI_MSM_MIN_CLOCK 400000
122 #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
124 #define CDR_SELEXT_SHIFT 20
125 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
126 #define CMUX_SHIFT_PHASE_SHIFT 24
127 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
129 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
131 /* Timeout value to avoid infinite waiting for pwr_irq */
132 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
134 /* Max load for eMMC Vdd-io supply */
135 #define MMC_VQMMC_MAX_LOAD_UA 325000
137 #define msm_host_readl(msm_host, host, offset) \
138 msm_host->var_ops->msm_readl_relaxed(host, offset)
140 #define msm_host_writel(msm_host, val, host, offset) \
141 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
143 /* CQHCI vendor specific registers */
144 #define CQHCI_VENDOR_CFG1 0xA00
145 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
147 struct sdhci_msm_offset {
149 u32 core_mci_data_cnt;
151 u32 core_mci_fifo_cnt;
152 u32 core_mci_version;
154 u32 core_testbus_config;
155 u32 core_testbus_sel2_bit;
156 u32 core_testbus_ena;
157 u32 core_testbus_sel2;
158 u32 core_pwrctl_status;
159 u32 core_pwrctl_mask;
160 u32 core_pwrctl_clear;
162 u32 core_sdcc_debug_reg;
165 u32 core_vendor_spec;
166 u32 core_vendor_spec_adma_err_addr0;
167 u32 core_vendor_spec_adma_err_addr1;
168 u32 core_vendor_spec_func2;
169 u32 core_vendor_spec_capabilities0;
170 u32 core_ddr_200_cfg;
171 u32 core_vendor_spec3;
172 u32 core_dll_config_2;
173 u32 core_dll_config_3;
174 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
176 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */
179 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
180 .core_mci_data_cnt = 0x35c,
181 .core_mci_status = 0x324,
182 .core_mci_fifo_cnt = 0x308,
183 .core_mci_version = 0x318,
184 .core_generics = 0x320,
185 .core_testbus_config = 0x32c,
186 .core_testbus_sel2_bit = 3,
187 .core_testbus_ena = (1 << 31),
188 .core_testbus_sel2 = (1 << 3),
189 .core_pwrctl_status = 0x240,
190 .core_pwrctl_mask = 0x244,
191 .core_pwrctl_clear = 0x248,
192 .core_pwrctl_ctl = 0x24c,
193 .core_sdcc_debug_reg = 0x358,
194 .core_dll_config = 0x200,
195 .core_dll_status = 0x208,
196 .core_vendor_spec = 0x20c,
197 .core_vendor_spec_adma_err_addr0 = 0x214,
198 .core_vendor_spec_adma_err_addr1 = 0x218,
199 .core_vendor_spec_func2 = 0x210,
200 .core_vendor_spec_capabilities0 = 0x21c,
201 .core_ddr_200_cfg = 0x224,
202 .core_vendor_spec3 = 0x250,
203 .core_dll_config_2 = 0x254,
204 .core_dll_config_3 = 0x258,
205 .core_ddr_config = 0x25c,
206 .core_dll_usr_ctl = 0x388,
209 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
210 .core_hc_mode = 0x78,
211 .core_mci_data_cnt = 0x30,
212 .core_mci_status = 0x34,
213 .core_mci_fifo_cnt = 0x44,
214 .core_mci_version = 0x050,
215 .core_generics = 0x70,
216 .core_testbus_config = 0x0cc,
217 .core_testbus_sel2_bit = 4,
218 .core_testbus_ena = (1 << 3),
219 .core_testbus_sel2 = (1 << 4),
220 .core_pwrctl_status = 0xdc,
221 .core_pwrctl_mask = 0xe0,
222 .core_pwrctl_clear = 0xe4,
223 .core_pwrctl_ctl = 0xe8,
224 .core_sdcc_debug_reg = 0x124,
225 .core_dll_config = 0x100,
226 .core_dll_status = 0x108,
227 .core_vendor_spec = 0x10c,
228 .core_vendor_spec_adma_err_addr0 = 0x114,
229 .core_vendor_spec_adma_err_addr1 = 0x118,
230 .core_vendor_spec_func2 = 0x110,
231 .core_vendor_spec_capabilities0 = 0x11c,
232 .core_ddr_200_cfg = 0x184,
233 .core_vendor_spec3 = 0x1b0,
234 .core_dll_config_2 = 0x1b4,
235 .core_ddr_config_old = 0x1b8,
236 .core_ddr_config = 0x1bc,
239 struct sdhci_msm_variant_ops {
240 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
241 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
246 * From V5, register spaces have changed. Wrap this info in a structure
247 * and choose the data_structure based on version info mentioned in DT.
249 struct sdhci_msm_variant_info {
251 bool restore_dll_config;
252 bool uses_tassadar_dll;
253 const struct sdhci_msm_variant_ops *var_ops;
254 const struct sdhci_msm_offset *offset;
257 struct sdhci_msm_host {
258 struct platform_device *pdev;
259 void __iomem *core_mem; /* MSM SDCC mapped address */
260 int pwr_irq; /* power irq */
261 struct clk *bus_clk; /* SDHC bus voter clock */
262 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
263 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
264 unsigned long clk_rate;
265 struct mmc_host *mmc;
266 struct opp_table *opp_table;
267 bool use_14lpp_dll_reset;
269 bool calibration_done;
270 u8 saved_tuning_phase;
274 wait_queue_head_t pwr_irq_wait;
278 bool restore_dll_config;
279 const struct sdhci_msm_variant_ops *var_ops;
280 const struct sdhci_msm_offset *offset;
283 bool updated_ddr_cfg;
284 bool uses_tassadar_dll;
290 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
292 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
293 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
295 return msm_host->offset;
299 * APIs to read/write to vendor specific registers which were there in the
300 * core_mem region before MCI was removed.
302 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
305 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
306 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
308 return readl_relaxed(msm_host->core_mem + offset);
311 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
314 return readl_relaxed(host->ioaddr + offset);
317 static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
318 struct sdhci_host *host, u32 offset)
320 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
321 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
323 writel_relaxed(val, msm_host->core_mem + offset);
326 static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
327 struct sdhci_host *host, u32 offset)
329 writel_relaxed(val, host->ioaddr + offset);
332 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
335 struct mmc_ios ios = host->mmc->ios;
337 * The SDHC requires internal clock frequency to be double the
338 * actual clock that will be set for DDR mode. The controller
339 * uses the faster clock(100/400MHz) for some of its parts and
340 * send the actual required clock (50/200MHz) to the card.
342 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
343 ios.timing == MMC_TIMING_MMC_DDR52 ||
344 ios.timing == MMC_TIMING_MMC_HS400 ||
345 host->flags & SDHCI_HS400_TUNING)
350 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
353 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
354 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
355 struct mmc_ios curr_ios = host->mmc->ios;
356 struct clk *core_clk = msm_host->bulk_clks[0].clk;
359 clock = msm_get_clock_rate_for_bus_mode(host, clock);
360 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
362 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
363 mmc_hostname(host->mmc), clock,
367 msm_host->clk_rate = clock;
368 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
369 mmc_hostname(host->mmc), clk_get_rate(core_clk),
373 /* Platform specific tuning */
374 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
378 struct mmc_host *mmc = host->mmc;
379 const struct sdhci_msm_offset *msm_offset =
380 sdhci_priv_msm_offset(host);
382 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
383 ck_out_en = !!(readl_relaxed(host->ioaddr +
384 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
386 while (ck_out_en != poll) {
387 if (--wait_cnt == 0) {
388 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
389 mmc_hostname(mmc), poll);
394 ck_out_en = !!(readl_relaxed(host->ioaddr +
395 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
401 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
404 static const u8 grey_coded_phase_table[] = {
405 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
406 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
410 struct mmc_host *mmc = host->mmc;
411 const struct sdhci_msm_offset *msm_offset =
412 sdhci_priv_msm_offset(host);
417 spin_lock_irqsave(&host->lock, flags);
419 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
420 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
421 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
422 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
424 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
425 rc = msm_dll_poll_ck_out_en(host, 0);
430 * Write the selected DLL clock output phase (0 ... 15)
431 * to CDR_SELEXT bit field of DLL_CONFIG register.
433 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
434 config &= ~CDR_SELEXT_MASK;
435 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
436 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
438 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
439 config |= CORE_CK_OUT_EN;
440 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
442 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
443 rc = msm_dll_poll_ck_out_en(host, 1);
447 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
448 config |= CORE_CDR_EN;
449 config &= ~CORE_CDR_EXT_EN;
450 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
454 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
455 mmc_hostname(mmc), phase);
457 spin_unlock_irqrestore(&host->lock, flags);
462 * Find out the greatest range of consecuitive selected
463 * DLL clock output phases that can be used as sampling
464 * setting for SD3.0 UHS-I card read operation (in SDR104
465 * timing mode) or for eMMC4.5 card read operation (in
466 * HS400/HS200 timing mode).
467 * Select the 3/4 of the range and configure the DLL with the
468 * selected DLL clock output phase.
471 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
472 u8 *phase_table, u8 total_phases)
475 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
476 u8 phases_per_row[MAX_PHASES] = { 0 };
477 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
478 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
479 bool phase_0_found = false, phase_15_found = false;
480 struct mmc_host *mmc = host->mmc;
482 if (!total_phases || (total_phases > MAX_PHASES)) {
483 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
484 mmc_hostname(mmc), total_phases);
488 for (cnt = 0; cnt < total_phases; cnt++) {
489 ranges[row_index][col_index] = phase_table[cnt];
490 phases_per_row[row_index] += 1;
493 if ((cnt + 1) == total_phases) {
495 /* check if next phase in phase_table is consecutive or not */
496 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
502 if (row_index >= MAX_PHASES)
505 /* Check if phase-0 is present in first valid window? */
507 phase_0_found = true;
508 phase_0_raw_index = 0;
509 /* Check if cycle exist between 2 valid windows */
510 for (cnt = 1; cnt <= row_index; cnt++) {
511 if (phases_per_row[cnt]) {
512 for (i = 0; i < phases_per_row[cnt]; i++) {
513 if (ranges[cnt][i] == 15) {
514 phase_15_found = true;
515 phase_15_raw_index = cnt;
523 /* If 2 valid windows form cycle then merge them as single window */
524 if (phase_0_found && phase_15_found) {
525 /* number of phases in raw where phase 0 is present */
526 u8 phases_0 = phases_per_row[phase_0_raw_index];
527 /* number of phases in raw where phase 15 is present */
528 u8 phases_15 = phases_per_row[phase_15_raw_index];
530 if (phases_0 + phases_15 >= MAX_PHASES)
532 * If there are more than 1 phase windows then total
533 * number of phases in both the windows should not be
534 * more than or equal to MAX_PHASES.
538 /* Merge 2 cyclic windows */
540 for (cnt = 0; cnt < phases_0; cnt++) {
541 ranges[phase_15_raw_index][i] =
542 ranges[phase_0_raw_index][cnt];
543 if (++i >= MAX_PHASES)
547 phases_per_row[phase_0_raw_index] = 0;
548 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
551 for (cnt = 0; cnt <= row_index; cnt++) {
552 if (phases_per_row[cnt] > curr_max) {
553 curr_max = phases_per_row[cnt];
554 selected_row_index = cnt;
558 i = (curr_max * 3) / 4;
562 ret = ranges[selected_row_index][i];
564 if (ret >= MAX_PHASES) {
566 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
567 mmc_hostname(mmc), ret);
573 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
575 u32 mclk_freq = 0, config;
576 const struct sdhci_msm_offset *msm_offset =
577 sdhci_priv_msm_offset(host);
579 /* Program the MCLK value to MCLK_FREQ bit field */
580 if (host->clock <= 112000000)
582 else if (host->clock <= 125000000)
584 else if (host->clock <= 137000000)
586 else if (host->clock <= 150000000)
588 else if (host->clock <= 162000000)
590 else if (host->clock <= 175000000)
592 else if (host->clock <= 187000000)
594 else if (host->clock <= 200000000)
597 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
598 config &= ~CMUX_SHIFT_PHASE_MASK;
599 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
600 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
603 /* Initialize the DLL (Programmable Delay Line) */
604 static int msm_init_cm_dll(struct sdhci_host *host)
606 struct mmc_host *mmc = host->mmc;
607 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
608 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
610 unsigned long flags, xo_clk = 0;
612 const struct sdhci_msm_offset *msm_offset =
615 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
616 xo_clk = clk_get_rate(msm_host->xo_clk);
618 spin_lock_irqsave(&host->lock, flags);
621 * Make sure that clock is always enabled when DLL
622 * tuning is in progress. Keeping PWRSAVE ON may
623 * turn off the clock.
625 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
626 config &= ~CORE_CLK_PWRSAVE;
627 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
629 if (msm_host->dll_config)
630 writel_relaxed(msm_host->dll_config,
631 host->ioaddr + msm_offset->core_dll_config);
633 if (msm_host->use_14lpp_dll_reset) {
634 config = readl_relaxed(host->ioaddr +
635 msm_offset->core_dll_config);
636 config &= ~CORE_CK_OUT_EN;
637 writel_relaxed(config, host->ioaddr +
638 msm_offset->core_dll_config);
640 config = readl_relaxed(host->ioaddr +
641 msm_offset->core_dll_config_2);
642 config |= CORE_DLL_CLOCK_DISABLE;
643 writel_relaxed(config, host->ioaddr +
644 msm_offset->core_dll_config_2);
647 config = readl_relaxed(host->ioaddr +
648 msm_offset->core_dll_config);
649 config |= CORE_DLL_RST;
650 writel_relaxed(config, host->ioaddr +
651 msm_offset->core_dll_config);
653 config = readl_relaxed(host->ioaddr +
654 msm_offset->core_dll_config);
655 config |= CORE_DLL_PDN;
656 writel_relaxed(config, host->ioaddr +
657 msm_offset->core_dll_config);
659 if (!msm_host->dll_config)
660 msm_cm_dll_set_freq(host);
662 if (msm_host->use_14lpp_dll_reset &&
663 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
666 config = readl_relaxed(host->ioaddr +
667 msm_offset->core_dll_config_2);
668 config &= CORE_FLL_CYCLE_CNT;
670 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
673 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
676 config = readl_relaxed(host->ioaddr +
677 msm_offset->core_dll_config_2);
678 config &= ~(0xFF << 10);
679 config |= mclk_freq << 10;
681 writel_relaxed(config, host->ioaddr +
682 msm_offset->core_dll_config_2);
683 /* wait for 5us before enabling DLL clock */
687 config = readl_relaxed(host->ioaddr +
688 msm_offset->core_dll_config);
689 config &= ~CORE_DLL_RST;
690 writel_relaxed(config, host->ioaddr +
691 msm_offset->core_dll_config);
693 config = readl_relaxed(host->ioaddr +
694 msm_offset->core_dll_config);
695 config &= ~CORE_DLL_PDN;
696 writel_relaxed(config, host->ioaddr +
697 msm_offset->core_dll_config);
699 if (msm_host->use_14lpp_dll_reset) {
700 if (!msm_host->dll_config)
701 msm_cm_dll_set_freq(host);
702 config = readl_relaxed(host->ioaddr +
703 msm_offset->core_dll_config_2);
704 config &= ~CORE_DLL_CLOCK_DISABLE;
705 writel_relaxed(config, host->ioaddr +
706 msm_offset->core_dll_config_2);
710 * Configure DLL user control register to enable DLL status.
711 * This setting is applicable to SDCC v5.1 onwards only.
713 if (msm_host->uses_tassadar_dll) {
714 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
715 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
716 writel_relaxed(config, host->ioaddr +
717 msm_offset->core_dll_usr_ctl);
719 config = readl_relaxed(host->ioaddr +
720 msm_offset->core_dll_config_3);
722 if (msm_host->clk_rate < 150000000)
723 config |= DLL_CONFIG_3_LOW_FREQ_VAL;
725 config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
726 writel_relaxed(config, host->ioaddr +
727 msm_offset->core_dll_config_3);
730 config = readl_relaxed(host->ioaddr +
731 msm_offset->core_dll_config);
732 config |= CORE_DLL_EN;
733 writel_relaxed(config, host->ioaddr +
734 msm_offset->core_dll_config);
736 config = readl_relaxed(host->ioaddr +
737 msm_offset->core_dll_config);
738 config |= CORE_CK_OUT_EN;
739 writel_relaxed(config, host->ioaddr +
740 msm_offset->core_dll_config);
742 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
743 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
745 /* max. wait for 50us sec for LOCK bit to be set */
746 if (--wait_cnt == 0) {
747 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
749 spin_unlock_irqrestore(&host->lock, flags);
755 spin_unlock_irqrestore(&host->lock, flags);
759 static void msm_hc_select_default(struct sdhci_host *host)
761 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
762 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
764 const struct sdhci_msm_offset *msm_offset =
767 if (!msm_host->use_cdclp533) {
768 config = readl_relaxed(host->ioaddr +
769 msm_offset->core_vendor_spec3);
770 config &= ~CORE_PWRSAVE_DLL;
771 writel_relaxed(config, host->ioaddr +
772 msm_offset->core_vendor_spec3);
775 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
776 config &= ~CORE_HC_MCLK_SEL_MASK;
777 config |= CORE_HC_MCLK_SEL_DFLT;
778 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
781 * Disable HC_SELECT_IN to be able to use the UHS mode select
782 * configuration from Host Control2 register for all other
784 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
785 * in VENDOR_SPEC_FUNC
787 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
788 config &= ~CORE_HC_SELECT_IN_EN;
789 config &= ~CORE_HC_SELECT_IN_MASK;
790 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
793 * Make sure above writes impacting free running MCLK are completed
794 * before changing the clk_rate at GCC.
799 static void msm_hc_select_hs400(struct sdhci_host *host)
801 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
802 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
803 struct mmc_ios ios = host->mmc->ios;
804 u32 config, dll_lock;
806 const struct sdhci_msm_offset *msm_offset =
809 /* Select the divided clock (free running MCLK/2) */
810 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
811 config &= ~CORE_HC_MCLK_SEL_MASK;
812 config |= CORE_HC_MCLK_SEL_HS400;
814 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
816 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
819 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
820 !msm_host->calibration_done) {
821 config = readl_relaxed(host->ioaddr +
822 msm_offset->core_vendor_spec);
823 config |= CORE_HC_SELECT_IN_HS400;
824 config |= CORE_HC_SELECT_IN_EN;
825 writel_relaxed(config, host->ioaddr +
826 msm_offset->core_vendor_spec);
828 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
830 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
831 * core_dll_status to be set. This should get set
832 * within 15 us at 200 MHz.
834 rc = readl_relaxed_poll_timeout(host->ioaddr +
835 msm_offset->core_dll_status,
839 CORE_DDR_DLL_LOCK)), 10,
841 if (rc == -ETIMEDOUT)
842 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
843 mmc_hostname(host->mmc), dll_lock);
846 * Make sure above writes impacting free running MCLK are completed
847 * before changing the clk_rate at GCC.
853 * sdhci_msm_hc_select_mode :- In general all timing modes are
854 * controlled via UHS mode select in Host Control2 register.
855 * eMMC specific HS200/HS400 doesn't have their respective modes
856 * defined here, hence we use these values.
858 * HS200 - SDR104 (Since they both are equivalent in functionality)
859 * HS400 - This involves multiple configurations
860 * Initially SDR104 - when tuning is required as HS200
861 * Then when switching to DDR @ 400MHz (HS400) we use
862 * the vendor specific HC_SELECT_IN to control the mode.
864 * In addition to controlling the modes we also need to select the
865 * correct input clock for DLL depending on the mode.
867 * HS400 - divided clock (free running MCLK/2)
868 * All other modes - default (free running MCLK)
870 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
872 struct mmc_ios ios = host->mmc->ios;
874 if (ios.timing == MMC_TIMING_MMC_HS400 ||
875 host->flags & SDHCI_HS400_TUNING)
876 msm_hc_select_hs400(host);
878 msm_hc_select_default(host);
881 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
883 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
884 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
885 u32 config, calib_done;
887 const struct sdhci_msm_offset *msm_offset =
890 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
893 * Retuning in HS400 (DDR mode) will fail, just reset the
894 * tuning block and restore the saved tuning phase.
896 ret = msm_init_cm_dll(host);
900 /* Set the selected phase in delay line hw block */
901 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
905 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
906 config |= CORE_CMD_DAT_TRACK_SEL;
907 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
909 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
910 config &= ~CORE_CDC_T4_DLY_SEL;
911 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
913 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
914 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
915 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
917 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
918 config |= CORE_CDC_SWITCH_RC_EN;
919 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
921 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
922 config &= ~CORE_START_CDC_TRAFFIC;
923 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
925 /* Perform CDC Register Initialization Sequence */
927 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
928 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
929 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
930 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
931 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
932 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
933 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
934 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
935 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
937 /* CDC HW Calibration */
939 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
940 config |= CORE_SW_TRIG_FULL_CALIB;
941 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
943 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
944 config &= ~CORE_SW_TRIG_FULL_CALIB;
945 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
947 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
948 config |= CORE_HW_AUTOCAL_ENA;
949 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
951 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
952 config |= CORE_TIMER_ENA;
953 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
955 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
957 (calib_done & CORE_CALIBRATION_DONE),
960 if (ret == -ETIMEDOUT) {
961 pr_err("%s: %s: CDC calibration was not completed\n",
962 mmc_hostname(host->mmc), __func__);
966 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
967 & CORE_CDC_ERROR_CODE_MASK;
969 pr_err("%s: %s: CDC error code %d\n",
970 mmc_hostname(host->mmc), __func__, ret);
975 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
976 config |= CORE_START_CDC_TRAFFIC;
977 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
979 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
984 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
986 struct mmc_host *mmc = host->mmc;
987 u32 dll_status, config, ddr_cfg_offset;
989 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
990 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
991 const struct sdhci_msm_offset *msm_offset =
992 sdhci_priv_msm_offset(host);
994 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
997 * Currently the core_ddr_config register defaults to desired
998 * configuration on reset. Currently reprogramming the power on
999 * reset (POR) value in case it might have been modified by
1000 * bootloaders. In the future, if this changes, then the desired
1001 * values will need to be programmed appropriately.
1003 if (msm_host->updated_ddr_cfg)
1004 ddr_cfg_offset = msm_offset->core_ddr_config;
1006 ddr_cfg_offset = msm_offset->core_ddr_config_old;
1007 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
1009 if (mmc->ios.enhanced_strobe) {
1010 config = readl_relaxed(host->ioaddr +
1011 msm_offset->core_ddr_200_cfg);
1012 config |= CORE_CMDIN_RCLK_EN;
1013 writel_relaxed(config, host->ioaddr +
1014 msm_offset->core_ddr_200_cfg);
1017 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
1018 config |= CORE_DDR_CAL_EN;
1019 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
1021 ret = readl_relaxed_poll_timeout(host->ioaddr +
1022 msm_offset->core_dll_status,
1024 (dll_status & CORE_DDR_DLL_LOCK),
1027 if (ret == -ETIMEDOUT) {
1028 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
1029 mmc_hostname(host->mmc), __func__);
1034 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1035 * When MCLK is gated OFF, it is not gated for less than 0.5us
1036 * and MCLK must be switched on for at-least 1us before DATA
1037 * starts coming. Controllers with 14lpp and later tech DLL cannot
1038 * guarantee above requirement. So PWRSAVE_DLL should not be
1039 * turned on for host controllers using this DLL.
1041 if (!msm_host->use_14lpp_dll_reset) {
1042 config = readl_relaxed(host->ioaddr +
1043 msm_offset->core_vendor_spec3);
1044 config |= CORE_PWRSAVE_DLL;
1045 writel_relaxed(config, host->ioaddr +
1046 msm_offset->core_vendor_spec3);
1050 * Drain writebuffer to ensure above DLL calibration
1051 * and PWRSAVE DLL is enabled.
1055 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1060 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1062 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1063 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1064 struct mmc_host *mmc = host->mmc;
1067 const struct sdhci_msm_offset *msm_offset =
1070 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1073 * Retuning in HS400 (DDR mode) will fail, just reset the
1074 * tuning block and restore the saved tuning phase.
1076 ret = msm_init_cm_dll(host);
1080 if (!mmc->ios.enhanced_strobe) {
1081 /* Set the selected phase in delay line hw block */
1082 ret = msm_config_cm_dll_phase(host,
1083 msm_host->saved_tuning_phase);
1086 config = readl_relaxed(host->ioaddr +
1087 msm_offset->core_dll_config);
1088 config |= CORE_CMD_DAT_TRACK_SEL;
1089 writel_relaxed(config, host->ioaddr +
1090 msm_offset->core_dll_config);
1093 if (msm_host->use_cdclp533)
1094 ret = sdhci_msm_cdclp533_calibration(host);
1096 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1098 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1103 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1105 struct mmc_ios *ios = &host->mmc->ios;
1108 * Tuning is required for SDR104, HS200 and HS400 cards and
1109 * if clock frequency is greater than 100MHz in these modes.
1111 if (host->clock <= CORE_FREQ_100MHZ ||
1112 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1113 ios->timing == MMC_TIMING_MMC_HS200 ||
1114 ios->timing == MMC_TIMING_UHS_SDR104) ||
1115 ios->enhanced_strobe)
1121 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1124 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1128 * SDR DLL comes into picture only for timing modes which needs
1131 if (!sdhci_msm_is_tuning_needed(host))
1134 /* Reset the tuning block */
1135 ret = msm_init_cm_dll(host);
1139 /* Restore the tuning block */
1140 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1145 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1147 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1148 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1149 msm_offset->core_dll_config);
1153 config |= CORE_CDR_EN;
1154 config &= ~CORE_CDR_EXT_EN;
1156 config &= ~CORE_CDR_EN;
1157 config |= CORE_CDR_EXT_EN;
1160 if (config != oldconfig) {
1161 writel_relaxed(config, host->ioaddr +
1162 msm_offset->core_dll_config);
1166 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1168 struct sdhci_host *host = mmc_priv(mmc);
1169 int tuning_seq_cnt = 10;
1170 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1172 struct mmc_ios ios = host->mmc->ios;
1173 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1174 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1176 if (!sdhci_msm_is_tuning_needed(host)) {
1177 msm_host->use_cdr = false;
1178 sdhci_msm_set_cdr(host, false);
1182 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1183 msm_host->use_cdr = true;
1186 * Clear tuning_done flag before tuning to ensure proper
1189 msm_host->tuning_done = 0;
1192 * For HS400 tuning in HS200 timing requires:
1193 * - select MCLK/2 in VENDOR_SPEC
1194 * - program MCLK to 400MHz (or nearest supported) in GCC
1196 if (host->flags & SDHCI_HS400_TUNING) {
1197 sdhci_msm_hc_select_mode(host);
1198 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1199 host->flags &= ~SDHCI_HS400_TUNING;
1203 /* First of all reset the tuning block */
1204 rc = msm_init_cm_dll(host);
1210 /* Set the phase in delay line hw block */
1211 rc = msm_config_cm_dll_phase(host, phase);
1215 rc = mmc_send_tuning(mmc, opcode, NULL);
1217 /* Tuning is successful at this tuning point */
1218 tuned_phases[tuned_phase_cnt++] = phase;
1219 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1220 mmc_hostname(mmc), phase);
1222 } while (++phase < ARRAY_SIZE(tuned_phases));
1224 if (tuned_phase_cnt) {
1225 if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
1227 * All phases valid is _almost_ as bad as no phases
1228 * valid. Probably all phases are not really reliable
1229 * but we didn't detect where the unreliable place is.
1230 * That means we'll essentially be guessing and hoping
1231 * we get a good phase. Better to try a few times.
1233 dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
1235 if (--tuning_seq_cnt) {
1236 tuned_phase_cnt = 0;
1241 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1249 * Finally set the selected phase in delay
1252 rc = msm_config_cm_dll_phase(host, phase);
1255 msm_host->saved_tuning_phase = phase;
1256 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1257 mmc_hostname(mmc), phase);
1259 if (--tuning_seq_cnt)
1262 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1268 msm_host->tuning_done = true;
1273 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1274 * This needs to be done for both tuning and enhanced_strobe mode.
1275 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
1276 * fixed feedback clock is used.
1278 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1280 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1281 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1284 if (host->clock > CORE_FREQ_100MHZ &&
1285 (msm_host->tuning_done || ios->enhanced_strobe) &&
1286 !msm_host->calibration_done) {
1287 ret = sdhci_msm_hs400_dll_calibration(host);
1289 msm_host->calibration_done = true;
1291 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1292 mmc_hostname(host->mmc), ret);
1296 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1299 struct mmc_host *mmc = host->mmc;
1300 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1301 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1304 const struct sdhci_msm_offset *msm_offset =
1307 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1308 /* Select Bus Speed Mode for host */
1309 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1311 case MMC_TIMING_UHS_SDR12:
1312 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1314 case MMC_TIMING_UHS_SDR25:
1315 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1317 case MMC_TIMING_UHS_SDR50:
1318 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1320 case MMC_TIMING_MMC_HS400:
1321 case MMC_TIMING_MMC_HS200:
1322 case MMC_TIMING_UHS_SDR104:
1323 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1325 case MMC_TIMING_UHS_DDR50:
1326 case MMC_TIMING_MMC_DDR52:
1327 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1332 * When clock frequency is less than 100MHz, the feedback clock must be
1333 * provided and DLL must not be used so that tuning can be skipped. To
1334 * provide feedback clock, the mode selection can be any value less
1335 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1337 if (host->clock <= CORE_FREQ_100MHZ) {
1338 if (uhs == MMC_TIMING_MMC_HS400 ||
1339 uhs == MMC_TIMING_MMC_HS200 ||
1340 uhs == MMC_TIMING_UHS_SDR104)
1341 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1343 * DLL is not required for clock <= 100MHz
1344 * Thus, make sure DLL it is disabled when not required
1346 config = readl_relaxed(host->ioaddr +
1347 msm_offset->core_dll_config);
1348 config |= CORE_DLL_RST;
1349 writel_relaxed(config, host->ioaddr +
1350 msm_offset->core_dll_config);
1352 config = readl_relaxed(host->ioaddr +
1353 msm_offset->core_dll_config);
1354 config |= CORE_DLL_PDN;
1355 writel_relaxed(config, host->ioaddr +
1356 msm_offset->core_dll_config);
1359 * The DLL needs to be restored and CDCLP533 recalibrated
1360 * when the clock frequency is set back to 400MHz.
1362 msm_host->calibration_done = false;
1365 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1366 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1367 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1369 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1370 sdhci_msm_hs400(host, &mmc->ios);
1373 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
1375 struct platform_device *pdev = msm_host->pdev;
1379 ret = pinctrl_pm_select_default_state(&pdev->dev);
1381 ret = pinctrl_pm_select_sleep_state(&pdev->dev);
1386 static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
1388 if (IS_ERR(mmc->supply.vmmc))
1391 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
1394 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
1395 struct mmc_host *mmc, bool level)
1400 if (msm_host->vqmmc_enabled == level)
1404 /* Set the IO voltage regulator to default voltage level */
1405 if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
1406 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330;
1407 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT)
1408 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180;
1410 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1411 ret = mmc_regulator_set_vqmmc(mmc, &ios);
1413 dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n",
1414 mmc_hostname(mmc), ret);
1418 ret = regulator_enable(mmc->supply.vqmmc);
1420 ret = regulator_disable(mmc->supply.vqmmc);
1424 dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n",
1425 mmc_hostname(mmc), level ? "en":"dis", ret);
1427 msm_host->vqmmc_enabled = level;
1432 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host,
1433 struct mmc_host *mmc, bool hpm)
1437 load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0;
1438 ret = regulator_set_load(mmc->supply.vqmmc, load);
1440 dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n",
1441 mmc_hostname(mmc), ret);
1445 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host,
1446 struct mmc_host *mmc, bool level)
1451 if (IS_ERR(mmc->supply.vqmmc) ||
1452 (mmc->ios.power_mode == MMC_POWER_UNDEFINED))
1455 * For eMMC don't turn off Vqmmc, Instead just configure it in LPM
1456 * and HPM modes by setting the corresponding load.
1458 * Till eMMC is initialized (i.e. always_on == 0), just turn on/off
1459 * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off
1460 * gets invoked. Once eMMC is initialized (i.e. always_on == 1),
1461 * Vqmmc should remain ON, So just set the load instead of turning it
1464 always_on = !mmc_card_is_removable(mmc) &&
1465 mmc->card && mmc_card_mmc(mmc->card);
1468 ret = msm_config_vqmmc_mode(msm_host, mmc, level);
1470 ret = msm_toggle_vqmmc(msm_host, mmc, level);
1475 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1477 init_waitqueue_head(&msm_host->pwr_irq_wait);
1480 static inline void sdhci_msm_complete_pwr_irq_wait(
1481 struct sdhci_msm_host *msm_host)
1483 wake_up(&msm_host->pwr_irq_wait);
1487 * sdhci_msm_check_power_status API should be called when registers writes
1488 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1489 * To what state the register writes will change the IO lines should be passed
1490 * as the argument req_type. This API will check whether the IO line's state
1491 * is already the expected state and will wait for power irq only if
1492 * power irq is expected to be triggered based on the current IO line state
1493 * and expected IO line state.
1495 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1497 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1498 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1500 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1501 const struct sdhci_msm_offset *msm_offset =
1504 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1505 mmc_hostname(host->mmc), __func__, req_type,
1506 msm_host->curr_pwr_state, msm_host->curr_io_level);
1509 * The power interrupt will not be generated for signal voltage
1510 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1511 * Since sdhci-msm-v5, this bit has been removed and SW must consider
1514 if (!msm_host->mci_removed)
1515 val = msm_host_readl(msm_host, host,
1516 msm_offset->core_generics);
1517 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1518 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1523 * The IRQ for request type IO High/LOW will be generated when -
1524 * there is a state change in 1.8V enable bit (bit 3) of
1525 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1526 * which indicates 3.3V IO voltage. So, when MMC core layer tries
1527 * to set it to 3.3V before card detection happens, the
1528 * IRQ doesn't get triggered as there is no state change in this bit.
1529 * The driver already handles this case by changing the IO voltage
1530 * level to high as part of controller power up sequence. Hence, check
1531 * for host->pwr to handle a case where IO voltage high request is
1532 * issued even before controller power up.
1534 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1535 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1536 mmc_hostname(host->mmc), req_type);
1539 if ((req_type & msm_host->curr_pwr_state) ||
1540 (req_type & msm_host->curr_io_level))
1543 * This is needed here to handle cases where register writes will
1544 * not change the current bus state or io level of the controller.
1545 * In this case, no power irq will be triggerred and we should
1549 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1550 msm_host->pwr_irq_flag,
1551 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1552 dev_warn(&msm_host->pdev->dev,
1553 "%s: pwr_irq for req: (%d) timed out\n",
1554 mmc_hostname(host->mmc), req_type);
1556 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1557 __func__, req_type);
1560 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1562 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1563 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1564 const struct sdhci_msm_offset *msm_offset =
1567 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1568 mmc_hostname(host->mmc),
1569 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1570 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1571 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1574 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1576 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1577 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1578 struct mmc_host *mmc = host->mmc;
1579 u32 irq_status, irq_ack = 0;
1580 int retry = 10, ret;
1581 u32 pwr_state = 0, io_level = 0;
1583 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1585 irq_status = msm_host_readl(msm_host, host,
1586 msm_offset->core_pwrctl_status);
1587 irq_status &= INT_MASK;
1589 msm_host_writel(msm_host, irq_status, host,
1590 msm_offset->core_pwrctl_clear);
1593 * There is a rare HW scenario where the first clear pulse could be
1594 * lost when actual reset and clear/read of status register is
1595 * happening at a time. Hence, retry for at least 10 times to make
1596 * sure status register is cleared. Otherwise, this will result in
1597 * a spurious power IRQ resulting in system instability.
1599 while (irq_status & msm_host_readl(msm_host, host,
1600 msm_offset->core_pwrctl_status)) {
1602 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1603 mmc_hostname(host->mmc), irq_status);
1604 sdhci_msm_dump_pwr_ctrl_regs(host);
1608 msm_host_writel(msm_host, irq_status, host,
1609 msm_offset->core_pwrctl_clear);
1614 /* Handle BUS ON/OFF*/
1615 if (irq_status & CORE_PWRCTL_BUS_ON) {
1616 pwr_state = REQ_BUS_ON;
1617 io_level = REQ_IO_HIGH;
1619 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1620 pwr_state = REQ_BUS_OFF;
1621 io_level = REQ_IO_LOW;
1625 ret = sdhci_msm_set_vmmc(mmc);
1627 ret = sdhci_msm_set_vqmmc(msm_host, mmc,
1628 pwr_state & REQ_BUS_ON);
1630 ret = sdhci_msm_set_pincfg(msm_host,
1631 pwr_state & REQ_BUS_ON);
1633 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1635 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1638 /* Handle IO LOW/HIGH */
1639 if (irq_status & CORE_PWRCTL_IO_LOW)
1640 io_level = REQ_IO_LOW;
1642 if (irq_status & CORE_PWRCTL_IO_HIGH)
1643 io_level = REQ_IO_HIGH;
1646 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1648 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) {
1649 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
1651 dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n",
1652 mmc_hostname(mmc), ret,
1653 mmc->ios.signal_voltage, mmc->ios.vdd,
1655 irq_ack |= CORE_PWRCTL_IO_FAIL;
1660 * The driver has to acknowledge the interrupt, switch voltages and
1661 * report back if it succeded or not to this register. The voltage
1662 * switches are handled by the sdhci core, so just report success.
1664 msm_host_writel(msm_host, irq_ack, host,
1665 msm_offset->core_pwrctl_ctl);
1668 * If we don't have info regarding the voltage levels supported by
1669 * regulators, don't change the IO PAD PWR SWITCH.
1671 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1674 * We should unset IO PAD PWR switch only if the register write
1675 * can set IO lines high and the regulator also switches to 3 V.
1676 * Else, we should keep the IO PAD PWR switch set.
1677 * This is applicable to certain targets where eMMC vccq supply
1678 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
1679 * IO PAD PWR switch must be kept set to reflect actual
1680 * regulator voltage. This way, during initialization of
1681 * controllers with only 1.8V, we will set the IO PAD bit
1682 * without waiting for a REQ_IO_LOW.
1684 config = readl_relaxed(host->ioaddr +
1685 msm_offset->core_vendor_spec);
1686 new_config = config;
1688 if ((io_level & REQ_IO_HIGH) &&
1689 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1690 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1691 else if ((io_level & REQ_IO_LOW) ||
1692 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1693 new_config |= CORE_IO_PAD_PWR_SWITCH;
1695 if (config ^ new_config)
1696 writel_relaxed(new_config, host->ioaddr +
1697 msm_offset->core_vendor_spec);
1701 msm_host->curr_pwr_state = pwr_state;
1703 msm_host->curr_io_level = io_level;
1705 dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1706 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1710 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1712 struct sdhci_host *host = (struct sdhci_host *)data;
1713 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1714 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1716 sdhci_msm_handle_pwr_irq(host, irq);
1717 msm_host->pwr_irq_flag = 1;
1718 sdhci_msm_complete_pwr_irq_wait(msm_host);
1724 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1726 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1727 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1728 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1730 return clk_round_rate(core_clk, ULONG_MAX);
1733 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1735 return SDHCI_MSM_MIN_CLOCK;
1739 * __sdhci_msm_set_clock - sdhci_msm clock control.
1742 * MSM controller does not use internal divider and
1743 * instead directly control the GCC clock as per
1744 * HW recommendation.
1746 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1750 * Keep actual_clock as zero -
1751 * - since there is no divider used so no need of having actual_clock.
1752 * - MSM controller uses SDCLK for data timeout calculation. If
1753 * actual_clock is zero, host->clock is taken for calculation.
1755 host->mmc->actual_clock = 0;
1757 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1763 * MSM controller do not use clock divider.
1764 * Thus read SDHCI_CLOCK_CONTROL and only enable
1765 * clock with no divider value programmed.
1767 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1768 sdhci_enable_clk(host, clk);
1771 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1772 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1774 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1775 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1778 msm_host->clk_rate = clock;
1782 sdhci_msm_hc_select_mode(host);
1784 msm_set_clock_rate_for_bus_mode(host, clock);
1786 __sdhci_msm_set_clock(host, clock);
1789 /*****************************************************************************\
1791 * MSM Command Queue Engine (CQE) *
1793 \*****************************************************************************/
1795 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
1800 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1803 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1807 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
1809 struct sdhci_host *host = mmc_priv(mmc);
1810 unsigned long flags;
1814 * When CQE is halted, the legacy SDHCI path operates only
1815 * on 16-byte descriptors in 64bit mode.
1817 if (host->flags & SDHCI_USE_64_BIT_DMA)
1820 spin_lock_irqsave(&host->lock, flags);
1823 * During CQE command transfers, command complete bit gets latched.
1824 * So s/w should clear command complete interrupt status when CQE is
1825 * either halted or disabled. Otherwise unexpected SDCHI legacy
1826 * interrupt gets triggered when CQE is halted/disabled.
1828 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
1829 ctrl |= SDHCI_INT_RESPONSE;
1830 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
1831 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
1833 spin_unlock_irqrestore(&host->lock, flags);
1835 sdhci_cqe_disable(mmc, recovery);
1838 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1840 u32 count, start = 15;
1842 __sdhci_set_timeout(host, cmd);
1843 count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
1845 * Update software timeout value if its value is less than hardware data
1846 * timeout value. Qcom SoC hardware data timeout value was calculated
1847 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
1849 if (cmd && cmd->data && host->clock > 400000 &&
1850 host->clock <= 50000000 &&
1851 ((1 << (count + start)) > (10 * host->clock)))
1852 host->data_timeout = 22LL * NSEC_PER_SEC;
1855 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
1856 .enable = sdhci_cqe_enable,
1857 .disable = sdhci_msm_cqe_disable,
1860 static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
1861 struct platform_device *pdev)
1863 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1864 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1865 struct cqhci_host *cq_host;
1871 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
1872 * So ensure ADMA table is allocated for 16byte descriptors.
1874 if (host->caps & SDHCI_CAN_64BIT)
1875 host->alloc_desc_sz = 16;
1877 ret = sdhci_setup_host(host);
1881 cq_host = cqhci_pltfm_init(pdev);
1882 if (IS_ERR(cq_host)) {
1883 ret = PTR_ERR(cq_host);
1884 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
1888 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1889 cq_host->ops = &sdhci_msm_cqhci_ops;
1891 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1893 ret = cqhci_init(cq_host, host->mmc, dma64);
1895 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
1896 mmc_hostname(host->mmc), ret);
1900 /* Disable cqe reset due to cqe enable signal */
1901 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
1902 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
1903 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
1906 * SDHC expects 12byte ADMA descriptors till CQE is enabled.
1907 * So limit desc_sz to 12 so that the data commands that are sent
1908 * during card initialization (before CQE gets enabled) would
1909 * get executed without any issues.
1911 if (host->flags & SDHCI_USE_64_BIT_DMA)
1914 ret = __sdhci_add_host(host);
1918 dev_info(&pdev->dev, "%s: CQE init: success\n",
1919 mmc_hostname(host->mmc));
1923 sdhci_cleanup_host(host);
1928 * Platform specific register write functions. This is so that, if any
1929 * register write needs to be followed up by platform specific actions,
1930 * they can be added here. These functions can go to sleep when writes
1931 * to certain registers are done.
1932 * These functions are relying on sdhci_set_ios not using spinlock.
1934 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1936 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1937 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1941 case SDHCI_HOST_CONTROL2:
1942 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1945 case SDHCI_SOFTWARE_RESET:
1946 if (host->pwr && (val & SDHCI_RESET_ALL))
1947 req_type = REQ_BUS_OFF;
1949 case SDHCI_POWER_CONTROL:
1950 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1952 case SDHCI_TRANSFER_MODE:
1953 msm_host->transfer_mode = val;
1956 if (!msm_host->use_cdr)
1958 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1959 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1960 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1961 sdhci_msm_set_cdr(host, true);
1963 sdhci_msm_set_cdr(host, false);
1968 msm_host->pwr_irq_flag = 0;
1970 * Since this register write may trigger a power irq, ensure
1971 * all previous register writes are complete by this point.
1978 /* This function may sleep*/
1979 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1983 req_type = __sdhci_msm_check_write(host, val, reg);
1984 writew_relaxed(val, host->ioaddr + reg);
1987 sdhci_msm_check_power_status(host, req_type);
1990 /* This function may sleep*/
1991 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1995 req_type = __sdhci_msm_check_write(host, val, reg);
1997 writeb_relaxed(val, host->ioaddr + reg);
2000 sdhci_msm_check_power_status(host, req_type);
2003 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
2005 struct mmc_host *mmc = msm_host->mmc;
2006 struct regulator *supply = mmc->supply.vqmmc;
2007 u32 caps = 0, config;
2008 struct sdhci_host *host = mmc_priv(mmc);
2009 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2011 if (!IS_ERR(mmc->supply.vqmmc)) {
2012 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
2013 caps |= CORE_1_8V_SUPPORT;
2014 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
2015 caps |= CORE_3_0V_SUPPORT;
2018 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
2024 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
2025 * bit can be used as required later on.
2027 u32 io_level = msm_host->curr_io_level;
2029 config = readl_relaxed(host->ioaddr +
2030 msm_offset->core_vendor_spec);
2031 config |= CORE_IO_PAD_PWR_SWITCH_EN;
2033 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
2034 config &= ~CORE_IO_PAD_PWR_SWITCH;
2035 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
2036 config |= CORE_IO_PAD_PWR_SWITCH;
2038 writel_relaxed(config,
2039 host->ioaddr + msm_offset->core_vendor_spec);
2041 msm_host->caps_0 |= caps;
2042 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
2045 static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
2047 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
2048 cqhci_deactivate(host->mmc);
2049 sdhci_reset(host, mask);
2052 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host)
2056 ret = mmc_regulator_get_supply(msm_host->mmc);
2060 sdhci_msm_set_regulator_caps(msm_host);
2065 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc,
2066 struct mmc_ios *ios)
2068 struct sdhci_host *host = mmc_priv(mmc);
2072 * Signal Voltage Switching is only applicable for Host Controllers
2075 if (host->version < SDHCI_SPEC_300)
2078 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2080 switch (ios->signal_voltage) {
2081 case MMC_SIGNAL_VOLTAGE_330:
2082 if (!(host->flags & SDHCI_SIGNALING_330))
2085 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2086 ctrl &= ~SDHCI_CTRL_VDD_180;
2088 case MMC_SIGNAL_VOLTAGE_180:
2089 if (!(host->flags & SDHCI_SIGNALING_180))
2092 /* Enable 1.8V Signal Enable in the Host Control2 register */
2093 ctrl |= SDHCI_CTRL_VDD_180;
2100 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2103 usleep_range(5000, 5500);
2105 /* regulator output should be stable within 5 ms */
2106 status = ctrl & SDHCI_CTRL_VDD_180;
2107 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2108 if ((ctrl & SDHCI_CTRL_VDD_180) == status)
2111 dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n",
2117 #define DRIVER_NAME "sdhci_msm"
2118 #define SDHCI_MSM_DUMP(f, x...) \
2119 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
2121 static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2124 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2125 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2127 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
2130 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
2131 readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
2132 readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
2133 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
2135 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
2136 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
2137 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
2138 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
2140 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
2141 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
2142 readl_relaxed(host->ioaddr +
2143 msm_offset->core_vendor_spec_func2),
2144 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
2147 static const struct sdhci_msm_variant_ops mci_var_ops = {
2148 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
2149 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
2152 static const struct sdhci_msm_variant_ops v5_var_ops = {
2153 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
2154 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
2157 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
2158 .var_ops = &mci_var_ops,
2159 .offset = &sdhci_msm_mci_offset,
2162 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
2163 .mci_removed = true,
2164 .var_ops = &v5_var_ops,
2165 .offset = &sdhci_msm_v5_offset,
2168 static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
2169 .mci_removed = true,
2170 .restore_dll_config = true,
2171 .var_ops = &v5_var_ops,
2172 .offset = &sdhci_msm_v5_offset,
2175 static const struct sdhci_msm_variant_info sm8250_sdhci_var = {
2176 .mci_removed = true,
2177 .uses_tassadar_dll = true,
2178 .var_ops = &v5_var_ops,
2179 .offset = &sdhci_msm_v5_offset,
2182 static const struct of_device_id sdhci_msm_dt_match[] = {
2183 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
2184 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
2185 {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
2186 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
2187 {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
2188 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
2192 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
2194 static const struct sdhci_ops sdhci_msm_ops = {
2195 .reset = sdhci_msm_reset,
2196 .set_clock = sdhci_msm_set_clock,
2197 .get_min_clock = sdhci_msm_get_min_clock,
2198 .get_max_clock = sdhci_msm_get_max_clock,
2199 .set_bus_width = sdhci_set_bus_width,
2200 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
2201 .write_w = sdhci_msm_writew,
2202 .write_b = sdhci_msm_writeb,
2203 .irq = sdhci_msm_cqe_irq,
2204 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
2205 .set_power = sdhci_set_power_noreg,
2206 .set_timeout = sdhci_msm_set_timeout,
2209 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
2210 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
2211 SDHCI_QUIRK_SINGLE_POWER_WRITE |
2212 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
2213 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
2215 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
2216 .ops = &sdhci_msm_ops,
2219 static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
2220 struct sdhci_host *host)
2222 struct device_node *node = pdev->dev.of_node;
2223 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2224 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2226 if (of_property_read_u32(node, "qcom,ddr-config",
2227 &msm_host->ddr_config))
2228 msm_host->ddr_config = DDR_CONFIG_POR_VAL;
2230 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
2233 static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
2235 struct reset_control *reset;
2238 reset = reset_control_get_optional_exclusive(dev, NULL);
2240 return dev_err_probe(dev, PTR_ERR(reset),
2241 "unable to acquire core_reset\n");
2246 ret = reset_control_assert(reset);
2248 reset_control_put(reset);
2249 return dev_err_probe(dev, ret, "core_reset assert failed\n");
2253 * The hardware requirement for delay between assert/deassert
2254 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
2255 * ~125us (4/32768). To be on the safe side add 200us delay.
2257 usleep_range(200, 210);
2259 ret = reset_control_deassert(reset);
2261 reset_control_put(reset);
2262 return dev_err_probe(dev, ret, "core_reset deassert failed\n");
2265 usleep_range(200, 210);
2266 reset_control_put(reset);
2271 static int sdhci_msm_probe(struct platform_device *pdev)
2273 struct sdhci_host *host;
2274 struct sdhci_pltfm_host *pltfm_host;
2275 struct sdhci_msm_host *msm_host;
2278 u16 host_version, core_minor;
2279 u32 core_version, config;
2281 const struct sdhci_msm_offset *msm_offset;
2282 const struct sdhci_msm_variant_info *var_info;
2283 struct device_node *node = pdev->dev.of_node;
2285 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
2287 return PTR_ERR(host);
2289 host->sdma_boundary = 0;
2290 pltfm_host = sdhci_priv(host);
2291 msm_host = sdhci_pltfm_priv(pltfm_host);
2292 msm_host->mmc = host->mmc;
2293 msm_host->pdev = pdev;
2295 ret = mmc_of_parse(host->mmc);
2300 * Based on the compatible string, load the required msm host info from
2301 * the data associated with the version info.
2303 var_info = of_device_get_match_data(&pdev->dev);
2305 msm_host->mci_removed = var_info->mci_removed;
2306 msm_host->restore_dll_config = var_info->restore_dll_config;
2307 msm_host->var_ops = var_info->var_ops;
2308 msm_host->offset = var_info->offset;
2309 msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll;
2311 msm_offset = msm_host->offset;
2313 sdhci_get_of_property(pdev);
2314 sdhci_msm_get_of_property(pdev, host);
2316 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2318 ret = sdhci_msm_gcc_reset(&pdev->dev, host);
2322 /* Setup SDCC bus voter clock. */
2323 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
2324 if (!IS_ERR(msm_host->bus_clk)) {
2325 /* Vote for max. clk rate for max. performance */
2326 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2329 ret = clk_prepare_enable(msm_host->bus_clk);
2334 /* Setup main peripheral bus clock */
2335 clk = devm_clk_get(&pdev->dev, "iface");
2338 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
2339 goto bus_clk_disable;
2341 msm_host->bulk_clks[1].clk = clk;
2343 /* Setup SDC MMC clock */
2344 clk = devm_clk_get(&pdev->dev, "core");
2347 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
2348 goto bus_clk_disable;
2350 msm_host->bulk_clks[0].clk = clk;
2352 /* Check for optional interconnect paths */
2353 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL);
2355 goto bus_clk_disable;
2357 msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
2358 if (IS_ERR(msm_host->opp_table)) {
2359 ret = PTR_ERR(msm_host->opp_table);
2360 goto bus_clk_disable;
2363 /* OPP table is optional */
2364 ret = dev_pm_opp_of_add_table(&pdev->dev);
2365 if (ret && ret != -ENODEV) {
2366 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
2367 goto opp_put_clkname;
2370 /* Vote for maximum clock rate for maximum performance */
2371 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
2373 dev_warn(&pdev->dev, "core clock boost failed\n");
2375 clk = devm_clk_get(&pdev->dev, "cal");
2378 msm_host->bulk_clks[2].clk = clk;
2380 clk = devm_clk_get(&pdev->dev, "sleep");
2383 msm_host->bulk_clks[3].clk = clk;
2385 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2386 msm_host->bulk_clks);
2391 * xo clock is needed for FLL feature of cm_dll.
2392 * In case if xo clock is not mentioned in DT, warn and proceed.
2394 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2395 if (IS_ERR(msm_host->xo_clk)) {
2396 ret = PTR_ERR(msm_host->xo_clk);
2397 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2400 if (!msm_host->mci_removed) {
2401 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2402 if (IS_ERR(msm_host->core_mem)) {
2403 ret = PTR_ERR(msm_host->core_mem);
2408 /* Reset the vendor spec register to power on reset state */
2409 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2410 host->ioaddr + msm_offset->core_vendor_spec);
2412 if (!msm_host->mci_removed) {
2413 /* Set HC_MODE_EN bit in HC_MODE register */
2414 msm_host_writel(msm_host, HC_MODE_EN, host,
2415 msm_offset->core_hc_mode);
2416 config = msm_host_readl(msm_host, host,
2417 msm_offset->core_hc_mode);
2418 config |= FF_CLK_SW_RST_DIS;
2419 msm_host_writel(msm_host, config, host,
2420 msm_offset->core_hc_mode);
2423 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2424 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2425 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2426 SDHCI_VENDOR_VER_SHIFT));
2428 core_version = msm_host_readl(msm_host, host,
2429 msm_offset->core_mci_version);
2430 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2431 CORE_VERSION_MAJOR_SHIFT;
2432 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2433 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2434 core_version, core_major, core_minor);
2436 if (core_major == 1 && core_minor >= 0x42)
2437 msm_host->use_14lpp_dll_reset = true;
2440 * SDCC 5 controller with major version 1, minor version 0x34 and later
2441 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
2443 if (core_major == 1 && core_minor < 0x34)
2444 msm_host->use_cdclp533 = true;
2447 * Support for some capabilities is not advertised by newer
2448 * controller versions and must be explicitly enabled.
2450 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2451 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2452 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2453 writel_relaxed(config, host->ioaddr +
2454 msm_offset->core_vendor_spec_capabilities0);
2457 if (core_major == 1 && core_minor >= 0x49)
2458 msm_host->updated_ddr_cfg = true;
2460 ret = sdhci_msm_register_vreg(msm_host);
2465 * Power on reset state may trigger power irq if previous status of
2466 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2467 * interrupt in GIC, any pending power irq interrupt should be
2468 * acknowledged. Otherwise power irq interrupt handler would be
2469 * fired prematurely.
2471 sdhci_msm_handle_pwr_irq(host, 0);
2474 * Ensure that above writes are propogated before interrupt enablement
2479 /* Setup IRQ for handling power/voltage tasks with PMIC */
2480 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2481 if (msm_host->pwr_irq < 0) {
2482 ret = msm_host->pwr_irq;
2486 sdhci_msm_init_pwr_irq_wait(msm_host);
2487 /* Enable pwr irq interrupts */
2488 msm_host_writel(msm_host, INT_MASK, host,
2489 msm_offset->core_pwrctl_mask);
2491 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2492 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2493 dev_name(&pdev->dev), host);
2495 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2499 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2501 pm_runtime_get_noresume(&pdev->dev);
2502 pm_runtime_set_active(&pdev->dev);
2503 pm_runtime_enable(&pdev->dev);
2504 pm_runtime_set_autosuspend_delay(&pdev->dev,
2505 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2506 pm_runtime_use_autosuspend(&pdev->dev);
2508 host->mmc_host_ops.start_signal_voltage_switch =
2509 sdhci_msm_start_signal_voltage_switch;
2510 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2511 if (of_property_read_bool(node, "supports-cqe"))
2512 ret = sdhci_msm_cqe_add_host(host, pdev);
2514 ret = sdhci_add_host(host);
2516 goto pm_runtime_disable;
2518 pm_runtime_mark_last_busy(&pdev->dev);
2519 pm_runtime_put_autosuspend(&pdev->dev);
2524 pm_runtime_disable(&pdev->dev);
2525 pm_runtime_set_suspended(&pdev->dev);
2526 pm_runtime_put_noidle(&pdev->dev);
2528 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2529 msm_host->bulk_clks);
2531 dev_pm_opp_of_remove_table(&pdev->dev);
2533 dev_pm_opp_put_clkname(msm_host->opp_table);
2535 if (!IS_ERR(msm_host->bus_clk))
2536 clk_disable_unprepare(msm_host->bus_clk);
2538 sdhci_pltfm_free(pdev);
2542 static int sdhci_msm_remove(struct platform_device *pdev)
2544 struct sdhci_host *host = platform_get_drvdata(pdev);
2545 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2546 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2547 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2550 sdhci_remove_host(host, dead);
2552 dev_pm_opp_of_remove_table(&pdev->dev);
2553 dev_pm_opp_put_clkname(msm_host->opp_table);
2554 pm_runtime_get_sync(&pdev->dev);
2555 pm_runtime_disable(&pdev->dev);
2556 pm_runtime_put_noidle(&pdev->dev);
2558 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2559 msm_host->bulk_clks);
2560 if (!IS_ERR(msm_host->bus_clk))
2561 clk_disable_unprepare(msm_host->bus_clk);
2562 sdhci_pltfm_free(pdev);
2566 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2568 struct sdhci_host *host = dev_get_drvdata(dev);
2569 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2570 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2572 /* Drop the performance vote */
2573 dev_pm_opp_set_rate(dev, 0);
2574 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2575 msm_host->bulk_clks);
2580 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2582 struct sdhci_host *host = dev_get_drvdata(dev);
2583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2584 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2587 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2588 msm_host->bulk_clks);
2592 * Whenever core-clock is gated dynamically, it's needed to
2593 * restore the SDR DLL settings when the clock is ungated.
2595 if (msm_host->restore_dll_config && msm_host->clk_rate)
2596 ret = sdhci_msm_restore_sdr_dll_config(host);
2598 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2603 static const struct dev_pm_ops sdhci_msm_pm_ops = {
2604 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2605 pm_runtime_force_resume)
2606 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2607 sdhci_msm_runtime_resume,
2611 static struct platform_driver sdhci_msm_driver = {
2612 .probe = sdhci_msm_probe,
2613 .remove = sdhci_msm_remove,
2615 .name = "sdhci_msm",
2616 .of_match_table = sdhci_msm_dt_match,
2617 .pm = &sdhci_msm_pm_ops,
2618 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2622 module_platform_driver(sdhci_msm_driver);
2624 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2625 MODULE_LICENSE("GPL v2");