1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 MediaTek Inc.
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
30 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 #define CREATE_TRACE_POINTS
33 #include "ufs-mediatek-trace.h"
34 #undef CREATE_TRACE_POINTS
36 #define MAX_SUPP_MAC 64
37 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
39 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
40 { .wmanufacturerid = UFS_ANY_VENDOR,
41 .model = UFS_ANY_MODEL,
42 .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
43 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
44 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
45 .model = "H9HQ21AFAMZDAR",
46 .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
50 static const struct of_device_id ufs_mtk_of_match[] = {
51 { .compatible = "mediatek,mt8183-ufshci" },
56 * Details of UIC Errors
58 static const char *const ufs_uic_err_str[] = {
62 "Transport Link Layer",
66 static const char *const ufs_uic_pa_err_str[] = {
67 "PHY error on Lane 0",
68 "PHY error on Lane 1",
69 "PHY error on Lane 2",
70 "PHY error on Lane 3",
71 "Generic PHY Adapter Error. This should be the LINERESET indication"
74 static const char *const ufs_uic_dl_err_str[] = {
76 "TCx_REPLAY_TIMER_EXPIRED",
77 "AFCx_REQUEST_TIMER_EXPIRED",
78 "FCx_PROTECTION_TIMER_EXPIRED",
81 "MAX_FRAME_LENGTH_EXCEEDED",
82 "WRONG_SEQUENCE_NUMBER",
83 "AFC_FRAME_SYNTAX_ERROR",
84 "NAC_FRAME_SYNTAX_ERROR",
87 "BAD_CTRL_SYMBOL_TYPE",
89 "PA_ERROR_IND_RECEIVED",
93 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
95 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97 return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
100 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
102 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
107 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
109 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111 return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
114 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
116 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
118 return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
121 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
127 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
129 (1 << RX_SYMBOL_CLK_GATE_EN) |
130 (1 << SYS_CLK_GATE_EN) |
131 (1 << TX_CLK_GATE_EN);
133 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
136 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
137 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
139 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
142 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
143 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
144 (1 << SYS_CLK_GATE_EN) |
145 (1 << TX_CLK_GATE_EN));
147 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
150 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
151 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
153 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
157 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
159 struct arm_smccc_res res;
161 ufs_mtk_crypto_ctrl(res, 1);
163 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
165 hba->caps &= ~UFSHCD_CAP_CRYPTO;
169 static void ufs_mtk_host_reset(struct ufs_hba *hba)
171 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
173 reset_control_assert(host->hci_reset);
174 reset_control_assert(host->crypto_reset);
175 reset_control_assert(host->unipro_reset);
177 usleep_range(100, 110);
179 reset_control_deassert(host->unipro_reset);
180 reset_control_deassert(host->crypto_reset);
181 reset_control_deassert(host->hci_reset);
184 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
185 struct reset_control **rc,
188 *rc = devm_reset_control_get(hba->dev, str);
190 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
196 static void ufs_mtk_init_reset(struct ufs_hba *hba)
198 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
200 ufs_mtk_init_reset_control(hba, &host->hci_reset,
202 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
204 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
208 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
209 enum ufs_notify_change_status status)
211 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
213 if (status == PRE_CHANGE) {
214 if (host->unipro_lpm) {
215 hba->vps->hba_enable_delay_us = 0;
217 hba->vps->hba_enable_delay_us = 600;
218 ufs_mtk_host_reset(hba);
221 if (hba->caps & UFSHCD_CAP_CRYPTO)
222 ufs_mtk_crypto_enable(hba);
224 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
225 ufshcd_writel(hba, 0,
226 REG_AUTO_HIBERNATE_IDLE_TIMER);
227 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
232 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
233 * to prevent host hang issue
236 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
243 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
245 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
246 struct device *dev = hba->dev;
247 struct device_node *np = dev->of_node;
250 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
252 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
254 * UFS driver might be probed before the phy driver does.
255 * In that case we would like to return EPROBE_DEFER code.
259 "%s: required phy hasn't probed yet. err = %d\n",
261 } else if (IS_ERR(host->mphy)) {
262 err = PTR_ERR(host->mphy);
263 if (err != -ENODEV) {
264 dev_info(dev, "%s: PHY get failed %d\n", __func__,
272 * Allow unbound mphy because not every platform needs specific
281 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
283 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
284 struct arm_smccc_res res;
285 ktime_t timeout, time_checked;
288 if (host->ref_clk_enabled == on)
291 ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
294 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
296 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
297 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
301 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
303 time_checked = ktime_get();
304 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
306 /* Wait until ack bit equals to req bit */
307 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
310 usleep_range(100, 200);
311 } while (ktime_before(time_checked, timeout));
313 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
315 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
320 host->ref_clk_enabled = on;
322 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
324 ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
329 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
332 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
334 if (hba->dev_info.clk_gating_wait_us) {
335 host->ref_clk_gating_wait_us =
336 hba->dev_info.clk_gating_wait_us;
338 host->ref_clk_gating_wait_us = gating_us;
341 host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
344 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
346 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
348 if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
349 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
350 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
351 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
352 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
353 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
355 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
359 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
360 unsigned long retry_ms)
362 u64 timeout, time_checked;
366 /* cannot use plain ktime_get() in suspend */
367 timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
369 /* wait a specific time after check base */
374 time_checked = ktime_get_mono_fast_ns();
375 ufs_mtk_dbg_sel(hba);
376 val = ufshcd_readl(hba, REG_UFS_PROBE);
381 * if state is in H8 enter and H8 enter confirm
382 * wait until return to idle state.
384 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
388 } else if (!wait_idle)
391 if (wait_idle && (sm == VS_HCE_BASE))
393 } while (time_checked < timeout);
395 if (wait_idle && sm != VS_HCE_BASE)
396 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
399 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
400 unsigned long max_wait_ms)
402 ktime_t timeout, time_checked;
405 timeout = ktime_add_ms(ktime_get(), max_wait_ms);
407 time_checked = ktime_get();
408 ufs_mtk_dbg_sel(hba);
409 val = ufshcd_readl(hba, REG_UFS_PROBE);
415 /* Sleep for max. 200us */
416 usleep_range(100, 200);
417 } while (ktime_before(time_checked, timeout));
422 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
424 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
425 struct phy *mphy = host->mphy;
426 struct arm_smccc_res res;
429 if (!mphy || !(on ^ host->mphy_powered_on))
433 if (ufs_mtk_is_va09_supported(hba)) {
434 ret = regulator_enable(host->reg_va09);
437 /* wait 200 us to stablize VA09 */
438 usleep_range(200, 210);
439 ufs_mtk_va09_pwr_ctrl(res, 1);
444 if (ufs_mtk_is_va09_supported(hba)) {
445 ufs_mtk_va09_pwr_ctrl(res, 0);
446 ret = regulator_disable(host->reg_va09);
452 "failed to %s va09: %d\n",
453 on ? "enable" : "disable",
456 host->mphy_powered_on = on;
462 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
463 struct clk **clk_out)
468 clk = devm_clk_get(dev, name);
477 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
479 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
480 struct ufs_mtk_crypt_cfg *cfg;
481 struct regulator *reg;
484 if (!ufs_mtk_is_boost_crypt_enabled(hba))
488 volt = cfg->vcore_volt;
489 reg = cfg->reg_vcore;
491 ret = clk_prepare_enable(cfg->clk_crypt_mux);
493 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
499 ret = regulator_set_voltage(reg, volt, INT_MAX);
502 "failed to set vcore to %d\n", volt);
506 ret = clk_set_parent(cfg->clk_crypt_mux,
507 cfg->clk_crypt_perf);
510 "failed to set clk_crypt_perf\n");
511 regulator_set_voltage(reg, 0, INT_MAX);
515 ret = clk_set_parent(cfg->clk_crypt_mux,
519 "failed to set clk_crypt_lp\n");
523 ret = regulator_set_voltage(reg, 0, INT_MAX);
526 "failed to set vcore to MIN\n");
530 clk_disable_unprepare(cfg->clk_crypt_mux);
533 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
538 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
540 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
547 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
549 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
550 struct ufs_mtk_crypt_cfg *cfg;
551 struct device *dev = hba->dev;
552 struct regulator *reg;
555 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
560 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
562 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
567 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
569 dev_info(dev, "failed to get boost-crypt-vcore-min");
574 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
575 &cfg->clk_crypt_mux))
578 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
582 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
583 &cfg->clk_crypt_perf))
586 cfg->reg_vcore = reg;
587 cfg->vcore_volt = volt;
588 host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
594 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
596 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
598 host->reg_va09 = regulator_get(hba->dev, "va09");
599 if (IS_ERR(host->reg_va09))
600 dev_info(hba->dev, "failed to get va09");
602 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
605 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
607 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
608 struct device_node *np = hba->dev->of_node;
610 if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
611 ufs_mtk_init_boost_crypt(hba);
613 if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
614 ufs_mtk_init_va09_pwr_ctrl(hba);
616 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
617 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
619 if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
620 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
622 if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
623 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
625 dev_info(hba->dev, "caps: 0x%x", host->caps);
628 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
630 ufs_mtk_boost_crypt(hba, scale_up);
633 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
635 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
638 phy_power_on(host->mphy);
639 ufs_mtk_setup_ref_clk(hba, on);
640 if (!ufshcd_is_clkscaling_supported(hba))
641 ufs_mtk_scale_perf(hba, on);
643 if (!ufshcd_is_clkscaling_supported(hba))
644 ufs_mtk_scale_perf(hba, on);
645 ufs_mtk_setup_ref_clk(hba, on);
646 phy_power_off(host->mphy);
650 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
652 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
655 if (!is_mcq_enabled(hba))
658 if (host->mcq_nr_intr == 0)
661 for (i = 0; i < host->mcq_nr_intr; i++) {
662 irq = host->mcq_intr_info[i].irq;
665 host->is_mcq_intr_enabled = false;
668 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
670 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
673 if (!is_mcq_enabled(hba))
676 if (host->mcq_nr_intr == 0)
679 if (host->is_mcq_intr_enabled == true)
682 for (i = 0; i < host->mcq_nr_intr; i++) {
683 irq = host->mcq_intr_info[i].irq;
686 host->is_mcq_intr_enabled = true;
690 * ufs_mtk_setup_clocks - enables/disable clocks
691 * @hba: host controller instance
692 * @on: If true, enable clocks else disable them.
693 * @status: PRE_CHANGE or POST_CHANGE notify
695 * Return: 0 on success, non-zero on failure.
697 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
698 enum ufs_notify_change_status status)
700 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
701 bool clk_pwr_off = false;
705 * In case ufs_mtk_init() is not yet done, simply ignore.
706 * This ufs_mtk_setup_clocks() shall be called from
707 * ufs_mtk_init() after init is done.
712 if (!on && status == PRE_CHANGE) {
713 if (ufshcd_is_link_off(hba)) {
715 } else if (ufshcd_is_link_hibern8(hba) ||
716 (!ufshcd_can_hibern8_during_gating(hba) &&
717 ufshcd_is_auto_hibern8_enabled(hba))) {
719 * Gate ref-clk and poweroff mphy if link state is in
720 * OFF or Hibern8 by either Auto-Hibern8 or
721 * ufshcd_link_state_transition().
723 ret = ufs_mtk_wait_link_state(hba,
731 ufs_mtk_pwr_ctrl(hba, false);
732 ufs_mtk_mcq_disable_irq(hba);
733 } else if (on && status == POST_CHANGE) {
734 ufs_mtk_pwr_ctrl(hba, true);
735 ufs_mtk_mcq_enable_irq(hba);
741 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
743 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
746 if (host->hw_ver.major)
749 /* Set default (minimum) version anyway */
750 host->hw_ver.major = 2;
752 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
754 if (ver >= UFS_UNIPRO_VER_1_8) {
755 host->hw_ver.major = 3;
757 * Fix HCI version for some platforms with
760 if (hba->ufs_version < ufshci_version(3, 0))
761 hba->ufs_version = ufshci_version(3, 0);
766 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
768 return hba->ufs_version;
772 * ufs_mtk_init_clocks - Init mtk driver private clocks
774 * @hba: per adapter instance
776 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
778 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
779 struct list_head *head = &hba->clk_list_head;
780 struct ufs_mtk_clk *mclk = &host->mclk;
781 struct ufs_clk_info *clki, *clki_tmp;
784 * Find private clocks and store them in struct ufs_mtk_clk.
785 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
786 * being switched on/off in clock gating.
788 list_for_each_entry_safe(clki, clki_tmp, head, list) {
789 if (!strcmp(clki->name, "ufs_sel")) {
790 host->mclk.ufs_sel_clki = clki;
791 } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
792 host->mclk.ufs_sel_max_clki = clki;
793 clk_disable_unprepare(clki->clk);
794 list_del(&clki->list);
795 } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
796 host->mclk.ufs_sel_min_clki = clki;
797 clk_disable_unprepare(clki->clk);
798 list_del(&clki->list);
802 if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
803 !mclk->ufs_sel_min_clki) {
804 hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
806 "%s: Clk-scaling not ready. Feature disabled.",
811 #define MAX_VCC_NAME 30
812 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
814 struct ufs_vreg_info *info = &hba->vreg_info;
815 struct device_node *np = hba->dev->of_node;
816 struct device *dev = hba->dev;
817 char vcc_name[MAX_VCC_NAME];
818 struct arm_smccc_res res;
821 if (hba->vreg_info.vcc)
824 if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
825 ufs_mtk_get_vcc_num(res);
826 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
827 snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
830 } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
831 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
832 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
837 err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
841 err = ufshcd_get_vreg(dev, info->vcc);
845 err = regulator_enable(info->vcc->reg);
847 info->vcc->enabled = true;
848 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
854 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
856 struct ufs_vreg_info *info = &hba->vreg_info;
857 struct ufs_vreg **vreg_on, **vreg_off;
859 if (hba->dev_info.wspecversion >= 0x0300) {
860 vreg_on = &info->vccq;
861 vreg_off = &info->vccq2;
863 vreg_on = &info->vccq2;
864 vreg_off = &info->vccq;
868 (*vreg_on)->always_on = true;
871 regulator_disable((*vreg_off)->reg);
872 devm_kfree(hba->dev, (*vreg_off)->name);
873 devm_kfree(hba->dev, *vreg_off);
878 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
880 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
881 struct platform_device *pdev;
885 host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
886 pdev = container_of(hba->dev, struct platform_device, dev);
888 for (i = 0; i < host->mcq_nr_intr; i++) {
889 /* irq index 0 is legacy irq, sq/cq irq start from index 1 */
890 irq = platform_get_irq(pdev, i + 1);
892 host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
895 host->mcq_intr_info[i].hba = hba;
896 host->mcq_intr_info[i].irq = irq;
897 dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
902 /* invalidate irq info */
903 for (i = 0; i < host->mcq_nr_intr; i++)
904 host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
906 host->mcq_nr_intr = 0;
910 * ufs_mtk_init - find other essential mmio bases
911 * @hba: host controller instance
913 * Binds PHY with controller and powers up PHY enabling clocks
916 * Return: -EPROBE_DEFER if binding fails, returns negative error
917 * on phy power up failure and returns zero on success.
919 static int ufs_mtk_init(struct ufs_hba *hba)
921 const struct of_device_id *id;
922 struct device *dev = hba->dev;
923 struct ufs_mtk_host *host;
924 struct Scsi_Host *shost = hba->host;
927 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
930 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
935 ufshcd_set_variant(hba, host);
937 id = of_match_device(ufs_mtk_of_match, dev);
943 /* Initialize host capability */
944 ufs_mtk_init_host_caps(hba);
946 ufs_mtk_init_mcq_irq(hba);
948 err = ufs_mtk_bind_mphy(hba);
950 goto out_variant_clear;
952 ufs_mtk_init_reset(hba);
954 /* Enable runtime autosuspend */
955 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
957 /* Enable clock-gating */
958 hba->caps |= UFSHCD_CAP_CLK_GATING;
960 /* Enable inline encryption */
961 hba->caps |= UFSHCD_CAP_CRYPTO;
963 /* Enable WriteBooster */
964 hba->caps |= UFSHCD_CAP_WB_EN;
966 /* Enable clk scaling*/
967 hba->caps |= UFSHCD_CAP_CLK_SCALING;
969 /* Set runtime pm delay to replace default */
970 shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
972 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
973 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
974 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
975 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
977 if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
978 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
980 ufs_mtk_init_clocks(hba);
983 * ufshcd_vops_init() is invoked after
984 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
985 * phy clock setup is skipped.
987 * Enable phy clocks specifically here.
989 ufs_mtk_mphy_power_on(hba, true);
990 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
992 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
997 ufshcd_set_variant(hba, NULL);
1002 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1003 struct ufs_pa_layer_attr *dev_req_params)
1005 if (!ufs_mtk_is_pmc_via_fastauto(hba))
1008 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1011 if (dev_req_params->pwr_tx != FAST_MODE &&
1012 dev_req_params->gear_tx < UFS_HS_G4)
1015 if (dev_req_params->pwr_rx != FAST_MODE &&
1016 dev_req_params->gear_rx < UFS_HS_G4)
1022 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1023 struct ufs_pa_layer_attr *dev_max_params,
1024 struct ufs_pa_layer_attr *dev_req_params)
1026 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1027 struct ufs_host_params host_params;
1030 ufshcd_init_host_params(&host_params);
1031 host_params.hs_rx_gear = UFS_HS_G5;
1032 host_params.hs_tx_gear = UFS_HS_G5;
1034 ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1036 pr_info("%s: failed to determine capabilities\n",
1040 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1041 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1042 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1044 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1045 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1047 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1048 dev_req_params->lane_tx);
1049 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1050 dev_req_params->lane_rx);
1051 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1052 dev_req_params->hs_rate);
1054 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1057 ret = ufshcd_uic_change_pwr_mode(hba,
1058 FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1061 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1066 if (host->hw_ver.major >= 3) {
1067 ret = ufshcd_dme_configure_adapt(hba,
1068 dev_req_params->gear_tx,
1075 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1076 enum ufs_notify_change_status stage,
1077 struct ufs_pa_layer_attr *dev_max_params,
1078 struct ufs_pa_layer_attr *dev_req_params)
1084 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1097 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1100 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1102 ret = ufshcd_dme_set(hba,
1103 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1107 * Forcibly set as non-LPM mode if UIC commands is failed
1108 * to use default hba_enable_delay_us value for re-enabling
1111 host->unipro_lpm = lpm;
1117 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1122 ufs_mtk_get_controller_version(hba);
1124 ret = ufs_mtk_unipro_set_lpm(hba, false);
1129 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1130 * to make sure that both host and device TX LCC are disabled
1131 * once link startup is completed.
1133 ret = ufshcd_disable_host_tx_lcc(hba);
1137 /* disable deep stall */
1138 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1144 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1149 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1153 if (ufshcd_is_clkgating_allowed(hba)) {
1154 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1155 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1159 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1163 static void ufs_mtk_post_link(struct ufs_hba *hba)
1165 /* enable unipro clock gating feature */
1166 ufs_mtk_cfg_unipro_cg(hba, true);
1168 /* will be configured during probe hba */
1169 if (ufshcd_is_auto_hibern8_supported(hba))
1170 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1171 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1173 ufs_mtk_setup_clk_gating(hba);
1176 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1177 enum ufs_notify_change_status stage)
1183 ret = ufs_mtk_pre_link(hba);
1186 ufs_mtk_post_link(hba);
1196 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1198 struct arm_smccc_res res;
1200 /* disable hba before device reset */
1201 ufshcd_hba_stop(hba);
1203 ufs_mtk_device_reset_ctrl(0, res);
1206 * The reset signal is active low. UFS devices shall detect
1207 * more than or equal to 1us of positive or negative RST_n
1210 * To be on safe side, keep the reset low for at least 10us.
1212 usleep_range(10, 15);
1214 ufs_mtk_device_reset_ctrl(1, res);
1216 /* Some devices may need time to respond to rst_n */
1217 usleep_range(10000, 15000);
1219 dev_info(hba->dev, "device reset done\n");
1224 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1228 err = ufshcd_hba_enable(hba);
1232 err = ufs_mtk_unipro_set_lpm(hba, false);
1236 err = ufshcd_uic_hibern8_exit(hba);
1240 /* Check link state to make sure exit h8 success */
1241 ufs_mtk_wait_idle_state(hba, 5);
1242 err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1244 dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1247 ufshcd_set_link_active(hba);
1249 err = ufshcd_make_hba_operational(hba);
1253 if (is_mcq_enabled(hba)) {
1254 ufs_mtk_config_mcq(hba, false);
1255 ufshcd_mcq_make_queues_operational(hba);
1256 ufshcd_mcq_config_mac(hba, hba->nutrs);
1257 ufshcd_mcq_enable(hba);
1263 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1267 /* Disable reset confirm feature by UniPro */
1269 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1270 REG_UFS_XOUFS_CTRL);
1272 err = ufs_mtk_unipro_set_lpm(hba, true);
1274 /* Resume UniPro state for following error recovery */
1275 ufs_mtk_unipro_set_lpm(hba, false);
1282 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1284 struct ufs_vreg *vccqx = NULL;
1286 if (hba->vreg_info.vccq)
1287 vccqx = hba->vreg_info.vccq;
1289 vccqx = hba->vreg_info.vccq2;
1291 regulator_set_mode(vccqx->reg,
1292 lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1295 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1297 struct arm_smccc_res res;
1299 ufs_mtk_device_pwr_ctrl(!lpm,
1300 (unsigned long)hba->dev_info.wspecversion,
1304 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1306 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1309 /* Skip if VCC is assumed always-on */
1310 if (!hba->vreg_info.vcc)
1313 /* Bypass LPM when device is still active */
1314 if (lpm && ufshcd_is_ufs_dev_active(hba))
1317 /* Bypass LPM if VCC is enabled */
1318 if (lpm && hba->vreg_info.vcc->enabled)
1322 ufs_mtk_vccqx_set_lpm(hba, lpm);
1323 ufs_mtk_vsx_set_lpm(hba, lpm);
1325 ufs_mtk_vsx_set_lpm(hba, lpm);
1326 ufs_mtk_vccqx_set_lpm(hba, lpm);
1330 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1334 /* disable auto-hibern8 */
1335 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1337 /* wait host return to idle state when auto-hibern8 off */
1338 ufs_mtk_wait_idle_state(hba, 5);
1340 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1342 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1345 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1346 enum ufs_notify_change_status status)
1349 struct arm_smccc_res res;
1351 if (status == PRE_CHANGE) {
1352 if (ufshcd_is_auto_hibern8_supported(hba))
1353 ufs_mtk_auto_hibern8_disable(hba);
1357 if (ufshcd_is_link_hibern8(hba)) {
1358 err = ufs_mtk_link_set_lpm(hba);
1363 if (!ufshcd_is_link_active(hba)) {
1365 * Make sure no error will be returned to prevent
1366 * ufshcd_suspend() re-enabling regulators while vreg is still
1367 * in low-power mode.
1369 err = ufs_mtk_mphy_power_on(hba, false);
1374 if (ufshcd_is_link_off(hba))
1375 ufs_mtk_device_reset_ctrl(0, res);
1377 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1382 * Set link as off state enforcedly to trigger
1383 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1384 * for completed host reset.
1386 ufshcd_set_link_off(hba);
1390 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1393 struct arm_smccc_res res;
1395 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1396 ufs_mtk_dev_vreg_set_lpm(hba, false);
1398 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1400 err = ufs_mtk_mphy_power_on(hba, true);
1404 if (ufshcd_is_link_hibern8(hba)) {
1405 err = ufs_mtk_link_set_hpm(hba);
1412 return ufshcd_link_recovery(hba);
1415 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1417 /* Dump ufshci register 0x140 ~ 0x14C */
1418 ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1419 "XOUFS Ctrl (0x140): ");
1421 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1423 /* Dump ufshci register 0x2200 ~ 0x22AC */
1424 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1425 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1426 "MPHY Ctrl (0x2200): ");
1428 /* Direct debugging information to REG_MTK_PROBE */
1429 ufs_mtk_dbg_sel(hba);
1430 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1433 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1435 struct ufs_dev_info *dev_info = &hba->dev_info;
1436 u16 mid = dev_info->wmanufacturerid;
1438 if (mid == UFS_VENDOR_SAMSUNG) {
1439 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1440 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1444 * Decide waiting time before gating reference clock and
1445 * after ungating reference clock according to vendors'
1448 if (mid == UFS_VENDOR_SAMSUNG)
1449 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1450 else if (mid == UFS_VENDOR_SKHYNIX)
1451 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1452 else if (mid == UFS_VENDOR_TOSHIBA)
1453 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1455 ufs_mtk_setup_ref_clk_wait_us(hba,
1456 REFCLK_DEFAULT_WAIT_US);
1460 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1462 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1464 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1465 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1466 hba->vreg_info.vcc->always_on = true;
1468 * VCC will be kept always-on thus we don't
1469 * need any delay during regulator operations
1471 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1472 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1475 ufs_mtk_vreg_fix_vcc(hba);
1476 ufs_mtk_vreg_fix_vccqx(hba);
1479 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1480 enum ufs_event_type evt, void *data)
1482 unsigned int val = *(u32 *)data;
1486 trace_ufs_mtk_event(evt, val);
1488 /* Print details of UIC Errors */
1489 if (evt <= UFS_EVT_DME_ERR) {
1491 "Host UIC Error Code (%s): %08x\n",
1492 ufs_uic_err_str[evt], val);
1496 if (evt == UFS_EVT_PA_ERR) {
1497 for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_pa_err_str))
1498 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1501 if (evt == UFS_EVT_DL_ERR) {
1502 for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_dl_err_str))
1503 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1507 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1508 struct devfreq_dev_profile *profile,
1509 struct devfreq_simple_ondemand_data *data)
1511 /* Customize min gear in clk scaling */
1512 hba->clk_scaling.min_gear = UFS_HS_G4;
1514 hba->vps->devfreq_profile.polling_ms = 200;
1515 hba->vps->ondemand_data.upthreshold = 50;
1516 hba->vps->ondemand_data.downdifferential = 20;
1520 * ufs_mtk_clk_scale - Internal clk scaling operation
1522 * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1523 * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1524 * Max and min clocks rate of ufs_sel defined in dts should match rate of
1525 * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1526 * This prevent changing rate of pll clock that is shared between modules.
1528 * @hba: per adapter instance
1529 * @scale_up: True for scaling up and false for scaling down
1531 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1533 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1534 struct ufs_mtk_clk *mclk = &host->mclk;
1535 struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1538 ret = clk_prepare_enable(clki->clk);
1541 "clk_prepare_enable() fail, ret: %d\n", ret);
1546 ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1547 clki->curr_freq = clki->max_freq;
1549 ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1550 clki->curr_freq = clki->min_freq;
1555 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1558 clk_disable_unprepare(clki->clk);
1560 trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1563 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1564 enum ufs_notify_change_status status)
1566 if (!ufshcd_is_clkscaling_supported(hba))
1569 if (status == PRE_CHANGE) {
1570 /* Switch parent before clk_set_rate() */
1571 ufs_mtk_clk_scale(hba, scale_up);
1573 /* Request interrupt latency QoS accordingly */
1574 ufs_mtk_scale_perf(hba, scale_up);
1580 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1582 return MAX_SUPP_MAC;
1585 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1587 struct ufshcd_mcq_opr_info_t *opr;
1590 hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1591 hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1592 hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1593 hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1595 for (i = 0; i < OPR_MAX; i++) {
1596 opr = &hba->mcq_opr[i];
1597 opr->stride = REG_UFS_MCQ_STRIDE;
1598 opr->base = hba->mmio_base + opr->offset;
1604 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1606 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1608 /* fail mcq initialization if interrupt is not filled properly */
1609 if (!host->mcq_nr_intr) {
1610 dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1614 hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1618 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1620 struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1621 struct ufs_hba *hba = mcq_intr_info->hba;
1622 struct ufs_hw_queue *hwq;
1624 int qid = mcq_intr_info->qid;
1626 hwq = &hba->uhq[qid];
1628 events = ufshcd_mcq_read_cqis(hba, qid);
1630 ufshcd_mcq_write_cqis(hba, events, qid);
1632 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1633 ufshcd_mcq_poll_cqe_lock(hba, hwq);
1638 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1640 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1644 for (i = 0; i < host->mcq_nr_intr; i++) {
1645 irq = host->mcq_intr_info[i].irq;
1646 if (irq == MTK_MCQ_INVALID_IRQ) {
1647 dev_err(hba->dev, "invalid irq. %d\n", i);
1651 host->mcq_intr_info[i].qid = i;
1652 ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1653 &host->mcq_intr_info[i]);
1655 dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1658 dev_err(hba->dev, "Cannot request irq %d\n", ret);
1666 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1668 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1671 if (!host->mcq_set_intr) {
1672 /* Disable irq option register */
1673 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1676 ret = ufs_mtk_config_mcq_irq(hba);
1681 host->mcq_set_intr = true;
1684 ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1685 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1690 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1692 return ufs_mtk_config_mcq(hba, true);
1696 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1698 * The variant operations configure the necessary controller and PHY
1699 * handshake during initialization.
1701 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1702 .name = "mediatek.ufshci",
1703 .init = ufs_mtk_init,
1704 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1705 .setup_clocks = ufs_mtk_setup_clocks,
1706 .hce_enable_notify = ufs_mtk_hce_enable_notify,
1707 .link_startup_notify = ufs_mtk_link_startup_notify,
1708 .pwr_change_notify = ufs_mtk_pwr_change_notify,
1709 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
1710 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
1711 .suspend = ufs_mtk_suspend,
1712 .resume = ufs_mtk_resume,
1713 .dbg_register_dump = ufs_mtk_dbg_register_dump,
1714 .device_reset = ufs_mtk_device_reset,
1715 .event_notify = ufs_mtk_event_notify,
1716 .config_scaling_param = ufs_mtk_config_scaling_param,
1717 .clk_scale_notify = ufs_mtk_clk_scale_notify,
1719 .get_hba_mac = ufs_mtk_get_hba_mac,
1720 .op_runtime_config = ufs_mtk_op_runtime_config,
1721 .mcq_config_resource = ufs_mtk_mcq_config_resource,
1722 .config_esi = ufs_mtk_config_esi,
1726 * ufs_mtk_probe - probe routine of the driver
1727 * @pdev: pointer to Platform device handle
1729 * Return: zero for success and non-zero for failure.
1731 static int ufs_mtk_probe(struct platform_device *pdev)
1734 struct device *dev = &pdev->dev;
1735 struct device_node *reset_node;
1736 struct platform_device *reset_pdev;
1737 struct device_link *link;
1739 reset_node = of_find_compatible_node(NULL, NULL,
1742 dev_notice(dev, "find ti,syscon-reset fail\n");
1745 reset_pdev = of_find_device_by_node(reset_node);
1747 dev_notice(dev, "find reset_pdev fail\n");
1750 link = device_link_add(dev, &reset_pdev->dev,
1751 DL_FLAG_AUTOPROBE_CONSUMER);
1752 put_device(&reset_pdev->dev);
1754 dev_notice(dev, "add reset device_link fail\n");
1757 /* supplier is not probed */
1758 if (link->status == DL_STATE_DORMANT) {
1759 err = -EPROBE_DEFER;
1764 /* perform generic probe */
1765 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1769 dev_err(dev, "probe failed %d\n", err);
1771 of_node_put(reset_node);
1776 * ufs_mtk_remove - set driver_data of the device to NULL
1777 * @pdev: pointer to platform device handle
1781 static void ufs_mtk_remove(struct platform_device *pdev)
1783 struct ufs_hba *hba = platform_get_drvdata(pdev);
1785 pm_runtime_get_sync(&(pdev)->dev);
1789 #ifdef CONFIG_PM_SLEEP
1790 static int ufs_mtk_system_suspend(struct device *dev)
1792 struct ufs_hba *hba = dev_get_drvdata(dev);
1795 ret = ufshcd_system_suspend(dev);
1799 ufs_mtk_dev_vreg_set_lpm(hba, true);
1804 static int ufs_mtk_system_resume(struct device *dev)
1806 struct ufs_hba *hba = dev_get_drvdata(dev);
1808 ufs_mtk_dev_vreg_set_lpm(hba, false);
1810 return ufshcd_system_resume(dev);
1815 static int ufs_mtk_runtime_suspend(struct device *dev)
1817 struct ufs_hba *hba = dev_get_drvdata(dev);
1820 ret = ufshcd_runtime_suspend(dev);
1824 ufs_mtk_dev_vreg_set_lpm(hba, true);
1829 static int ufs_mtk_runtime_resume(struct device *dev)
1831 struct ufs_hba *hba = dev_get_drvdata(dev);
1833 ufs_mtk_dev_vreg_set_lpm(hba, false);
1835 return ufshcd_runtime_resume(dev);
1839 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1840 SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1841 ufs_mtk_system_resume)
1842 SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1843 ufs_mtk_runtime_resume, NULL)
1844 .prepare = ufshcd_suspend_prepare,
1845 .complete = ufshcd_resume_complete,
1848 static struct platform_driver ufs_mtk_pltform = {
1849 .probe = ufs_mtk_probe,
1850 .remove_new = ufs_mtk_remove,
1852 .name = "ufshcd-mtk",
1853 .pm = &ufs_mtk_pm_ops,
1854 .of_match_table = ufs_mtk_of_match,
1858 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1859 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1860 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1861 MODULE_LICENSE("GPL v2");
1863 module_platform_driver(ufs_mtk_pltform);