GNU Linux-libre 6.9.1-gnu
[releases.git] / drivers / ufs / host / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29
30 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
31
32 #define CREATE_TRACE_POINTS
33 #include "ufs-mediatek-trace.h"
34 #undef CREATE_TRACE_POINTS
35
36 #define MAX_SUPP_MAC 64
37 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
38
39 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
40         { .wmanufacturerid = UFS_ANY_VENDOR,
41           .model = UFS_ANY_MODEL,
42           .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
43                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
44         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
45           .model = "H9HQ21AFAMZDAR",
46           .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
47         {}
48 };
49
50 static const struct of_device_id ufs_mtk_of_match[] = {
51         { .compatible = "mediatek,mt8183-ufshci" },
52         {},
53 };
54
55 /*
56  * Details of UIC Errors
57  */
58 static const char *const ufs_uic_err_str[] = {
59         "PHY Adapter Layer",
60         "Data Link Layer",
61         "Network Link Layer",
62         "Transport Link Layer",
63         "DME"
64 };
65
66 static const char *const ufs_uic_pa_err_str[] = {
67         "PHY error on Lane 0",
68         "PHY error on Lane 1",
69         "PHY error on Lane 2",
70         "PHY error on Lane 3",
71         "Generic PHY Adapter Error. This should be the LINERESET indication"
72 };
73
74 static const char *const ufs_uic_dl_err_str[] = {
75         "NAC_RECEIVED",
76         "TCx_REPLAY_TIMER_EXPIRED",
77         "AFCx_REQUEST_TIMER_EXPIRED",
78         "FCx_PROTECTION_TIMER_EXPIRED",
79         "CRC_ERROR",
80         "RX_BUFFER_OVERFLOW",
81         "MAX_FRAME_LENGTH_EXCEEDED",
82         "WRONG_SEQUENCE_NUMBER",
83         "AFC_FRAME_SYNTAX_ERROR",
84         "NAC_FRAME_SYNTAX_ERROR",
85         "EOF_SYNTAX_ERROR",
86         "FRAME_SYNTAX_ERROR",
87         "BAD_CTRL_SYMBOL_TYPE",
88         "PA_INIT_ERROR",
89         "PA_ERROR_IND_RECEIVED",
90         "PA_INIT"
91 };
92
93 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
94 {
95         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
96
97         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
98 }
99
100 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
101 {
102         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
103
104         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
105 }
106
107 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
108 {
109         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
110
111         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
112 }
113
114 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
115 {
116         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
117
118         return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
119 }
120
121 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
122 {
123         u32 tmp;
124
125         if (enable) {
126                 ufshcd_dme_get(hba,
127                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
128                 tmp = tmp |
129                       (1 << RX_SYMBOL_CLK_GATE_EN) |
130                       (1 << SYS_CLK_GATE_EN) |
131                       (1 << TX_CLK_GATE_EN);
132                 ufshcd_dme_set(hba,
133                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
134
135                 ufshcd_dme_get(hba,
136                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
137                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
138                 ufshcd_dme_set(hba,
139                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
140         } else {
141                 ufshcd_dme_get(hba,
142                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
143                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
144                               (1 << SYS_CLK_GATE_EN) |
145                               (1 << TX_CLK_GATE_EN));
146                 ufshcd_dme_set(hba,
147                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
148
149                 ufshcd_dme_get(hba,
150                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
151                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
152                 ufshcd_dme_set(hba,
153                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
154         }
155 }
156
157 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
158 {
159         struct arm_smccc_res res;
160
161         ufs_mtk_crypto_ctrl(res, 1);
162         if (res.a0) {
163                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
164                          __func__, res.a0);
165                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
166         }
167 }
168
169 static void ufs_mtk_host_reset(struct ufs_hba *hba)
170 {
171         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
172
173         reset_control_assert(host->hci_reset);
174         reset_control_assert(host->crypto_reset);
175         reset_control_assert(host->unipro_reset);
176
177         usleep_range(100, 110);
178
179         reset_control_deassert(host->unipro_reset);
180         reset_control_deassert(host->crypto_reset);
181         reset_control_deassert(host->hci_reset);
182 }
183
184 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
185                                        struct reset_control **rc,
186                                        char *str)
187 {
188         *rc = devm_reset_control_get(hba->dev, str);
189         if (IS_ERR(*rc)) {
190                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
191                          str, PTR_ERR(*rc));
192                 *rc = NULL;
193         }
194 }
195
196 static void ufs_mtk_init_reset(struct ufs_hba *hba)
197 {
198         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
199
200         ufs_mtk_init_reset_control(hba, &host->hci_reset,
201                                    "hci_rst");
202         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
203                                    "unipro_rst");
204         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
205                                    "crypto_rst");
206 }
207
208 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
209                                      enum ufs_notify_change_status status)
210 {
211         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
212
213         if (status == PRE_CHANGE) {
214                 if (host->unipro_lpm) {
215                         hba->vps->hba_enable_delay_us = 0;
216                 } else {
217                         hba->vps->hba_enable_delay_us = 600;
218                         ufs_mtk_host_reset(hba);
219                 }
220
221                 if (hba->caps & UFSHCD_CAP_CRYPTO)
222                         ufs_mtk_crypto_enable(hba);
223
224                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
225                         ufshcd_writel(hba, 0,
226                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
227                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
228                         hba->ahit = 0;
229                 }
230
231                 /*
232                  * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
233                  * to prevent host hang issue
234                  */
235                 ufshcd_writel(hba,
236                               ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
237                               REG_UFS_XOUFS_CTRL);
238         }
239
240         return 0;
241 }
242
243 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
244 {
245         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
246         struct device *dev = hba->dev;
247         struct device_node *np = dev->of_node;
248         int err = 0;
249
250         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
251
252         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
253                 /*
254                  * UFS driver might be probed before the phy driver does.
255                  * In that case we would like to return EPROBE_DEFER code.
256                  */
257                 err = -EPROBE_DEFER;
258                 dev_info(dev,
259                          "%s: required phy hasn't probed yet. err = %d\n",
260                         __func__, err);
261         } else if (IS_ERR(host->mphy)) {
262                 err = PTR_ERR(host->mphy);
263                 if (err != -ENODEV) {
264                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
265                                  err);
266                 }
267         }
268
269         if (err)
270                 host->mphy = NULL;
271         /*
272          * Allow unbound mphy because not every platform needs specific
273          * mphy control.
274          */
275         if (err == -ENODEV)
276                 err = 0;
277
278         return err;
279 }
280
281 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
282 {
283         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
284         struct arm_smccc_res res;
285         ktime_t timeout, time_checked;
286         u32 value;
287
288         if (host->ref_clk_enabled == on)
289                 return 0;
290
291         ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
292
293         if (on) {
294                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
295         } else {
296                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
297                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
298         }
299
300         /* Wait for ack */
301         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
302         do {
303                 time_checked = ktime_get();
304                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
305
306                 /* Wait until ack bit equals to req bit */
307                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
308                         goto out;
309
310                 usleep_range(100, 200);
311         } while (ktime_before(time_checked, timeout));
312
313         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
314
315         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
316
317         return -ETIMEDOUT;
318
319 out:
320         host->ref_clk_enabled = on;
321         if (on)
322                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
323
324         ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
325
326         return 0;
327 }
328
329 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
330                                           u16 gating_us)
331 {
332         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
333
334         if (hba->dev_info.clk_gating_wait_us) {
335                 host->ref_clk_gating_wait_us =
336                         hba->dev_info.clk_gating_wait_us;
337         } else {
338                 host->ref_clk_gating_wait_us = gating_us;
339         }
340
341         host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
342 }
343
344 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
345 {
346         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
347
348         if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
349                 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
350                 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
351                 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
352                 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
353                 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
354         } else {
355                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
356         }
357 }
358
359 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
360                             unsigned long retry_ms)
361 {
362         u64 timeout, time_checked;
363         u32 val, sm;
364         bool wait_idle;
365
366         /* cannot use plain ktime_get() in suspend */
367         timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
368
369         /* wait a specific time after check base */
370         udelay(10);
371         wait_idle = false;
372
373         do {
374                 time_checked = ktime_get_mono_fast_ns();
375                 ufs_mtk_dbg_sel(hba);
376                 val = ufshcd_readl(hba, REG_UFS_PROBE);
377
378                 sm = val & 0x1f;
379
380                 /*
381                  * if state is in H8 enter and H8 enter confirm
382                  * wait until return to idle state.
383                  */
384                 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
385                         wait_idle = true;
386                         udelay(50);
387                         continue;
388                 } else if (!wait_idle)
389                         break;
390
391                 if (wait_idle && (sm == VS_HCE_BASE))
392                         break;
393         } while (time_checked < timeout);
394
395         if (wait_idle && sm != VS_HCE_BASE)
396                 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
397 }
398
399 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
400                                    unsigned long max_wait_ms)
401 {
402         ktime_t timeout, time_checked;
403         u32 val;
404
405         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
406         do {
407                 time_checked = ktime_get();
408                 ufs_mtk_dbg_sel(hba);
409                 val = ufshcd_readl(hba, REG_UFS_PROBE);
410                 val = val >> 28;
411
412                 if (val == state)
413                         return 0;
414
415                 /* Sleep for max. 200us */
416                 usleep_range(100, 200);
417         } while (ktime_before(time_checked, timeout));
418
419         return -ETIMEDOUT;
420 }
421
422 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
423 {
424         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
425         struct phy *mphy = host->mphy;
426         struct arm_smccc_res res;
427         int ret = 0;
428
429         if (!mphy || !(on ^ host->mphy_powered_on))
430                 return 0;
431
432         if (on) {
433                 if (ufs_mtk_is_va09_supported(hba)) {
434                         ret = regulator_enable(host->reg_va09);
435                         if (ret < 0)
436                                 goto out;
437                         /* wait 200 us to stablize VA09 */
438                         usleep_range(200, 210);
439                         ufs_mtk_va09_pwr_ctrl(res, 1);
440                 }
441                 phy_power_on(mphy);
442         } else {
443                 phy_power_off(mphy);
444                 if (ufs_mtk_is_va09_supported(hba)) {
445                         ufs_mtk_va09_pwr_ctrl(res, 0);
446                         ret = regulator_disable(host->reg_va09);
447                 }
448         }
449 out:
450         if (ret) {
451                 dev_info(hba->dev,
452                          "failed to %s va09: %d\n",
453                          on ? "enable" : "disable",
454                          ret);
455         } else {
456                 host->mphy_powered_on = on;
457         }
458
459         return ret;
460 }
461
462 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
463                                 struct clk **clk_out)
464 {
465         struct clk *clk;
466         int err = 0;
467
468         clk = devm_clk_get(dev, name);
469         if (IS_ERR(clk))
470                 err = PTR_ERR(clk);
471         else
472                 *clk_out = clk;
473
474         return err;
475 }
476
477 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
478 {
479         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
480         struct ufs_mtk_crypt_cfg *cfg;
481         struct regulator *reg;
482         int volt, ret;
483
484         if (!ufs_mtk_is_boost_crypt_enabled(hba))
485                 return;
486
487         cfg = host->crypt;
488         volt = cfg->vcore_volt;
489         reg = cfg->reg_vcore;
490
491         ret = clk_prepare_enable(cfg->clk_crypt_mux);
492         if (ret) {
493                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
494                          ret);
495                 return;
496         }
497
498         if (boost) {
499                 ret = regulator_set_voltage(reg, volt, INT_MAX);
500                 if (ret) {
501                         dev_info(hba->dev,
502                                  "failed to set vcore to %d\n", volt);
503                         goto out;
504                 }
505
506                 ret = clk_set_parent(cfg->clk_crypt_mux,
507                                      cfg->clk_crypt_perf);
508                 if (ret) {
509                         dev_info(hba->dev,
510                                  "failed to set clk_crypt_perf\n");
511                         regulator_set_voltage(reg, 0, INT_MAX);
512                         goto out;
513                 }
514         } else {
515                 ret = clk_set_parent(cfg->clk_crypt_mux,
516                                      cfg->clk_crypt_lp);
517                 if (ret) {
518                         dev_info(hba->dev,
519                                  "failed to set clk_crypt_lp\n");
520                         goto out;
521                 }
522
523                 ret = regulator_set_voltage(reg, 0, INT_MAX);
524                 if (ret) {
525                         dev_info(hba->dev,
526                                  "failed to set vcore to MIN\n");
527                 }
528         }
529 out:
530         clk_disable_unprepare(cfg->clk_crypt_mux);
531 }
532
533 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
534                                  struct clk **clk)
535 {
536         int ret;
537
538         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
539         if (ret) {
540                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
541                          name, ret);
542         }
543
544         return ret;
545 }
546
547 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
548 {
549         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
550         struct ufs_mtk_crypt_cfg *cfg;
551         struct device *dev = hba->dev;
552         struct regulator *reg;
553         u32 volt;
554
555         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
556                                    GFP_KERNEL);
557         if (!host->crypt)
558                 goto disable_caps;
559
560         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
561         if (IS_ERR(reg)) {
562                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
563                          PTR_ERR(reg));
564                 goto disable_caps;
565         }
566
567         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
568                                  &volt)) {
569                 dev_info(dev, "failed to get boost-crypt-vcore-min");
570                 goto disable_caps;
571         }
572
573         cfg = host->crypt;
574         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
575                                   &cfg->clk_crypt_mux))
576                 goto disable_caps;
577
578         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
579                                   &cfg->clk_crypt_lp))
580                 goto disable_caps;
581
582         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
583                                   &cfg->clk_crypt_perf))
584                 goto disable_caps;
585
586         cfg->reg_vcore = reg;
587         cfg->vcore_volt = volt;
588         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
589
590 disable_caps:
591         return;
592 }
593
594 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
595 {
596         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
597
598         host->reg_va09 = regulator_get(hba->dev, "va09");
599         if (IS_ERR(host->reg_va09))
600                 dev_info(hba->dev, "failed to get va09");
601         else
602                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
603 }
604
605 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
606 {
607         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
608         struct device_node *np = hba->dev->of_node;
609
610         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
611                 ufs_mtk_init_boost_crypt(hba);
612
613         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
614                 ufs_mtk_init_va09_pwr_ctrl(hba);
615
616         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
617                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
618
619         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
620                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
621
622         if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
623                 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
624
625         dev_info(hba->dev, "caps: 0x%x", host->caps);
626 }
627
628 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
629 {
630         ufs_mtk_boost_crypt(hba, scale_up);
631 }
632
633 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
634 {
635         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
636
637         if (on) {
638                 phy_power_on(host->mphy);
639                 ufs_mtk_setup_ref_clk(hba, on);
640                 if (!ufshcd_is_clkscaling_supported(hba))
641                         ufs_mtk_scale_perf(hba, on);
642         } else {
643                 if (!ufshcd_is_clkscaling_supported(hba))
644                         ufs_mtk_scale_perf(hba, on);
645                 ufs_mtk_setup_ref_clk(hba, on);
646                 phy_power_off(host->mphy);
647         }
648 }
649
650 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
651 {
652         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
653         u32 irq, i;
654
655         if (!is_mcq_enabled(hba))
656                 return;
657
658         if (host->mcq_nr_intr == 0)
659                 return;
660
661         for (i = 0; i < host->mcq_nr_intr; i++) {
662                 irq = host->mcq_intr_info[i].irq;
663                 disable_irq(irq);
664         }
665         host->is_mcq_intr_enabled = false;
666 }
667
668 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
669 {
670         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
671         u32 irq, i;
672
673         if (!is_mcq_enabled(hba))
674                 return;
675
676         if (host->mcq_nr_intr == 0)
677                 return;
678
679         if (host->is_mcq_intr_enabled == true)
680                 return;
681
682         for (i = 0; i < host->mcq_nr_intr; i++) {
683                 irq = host->mcq_intr_info[i].irq;
684                 enable_irq(irq);
685         }
686         host->is_mcq_intr_enabled = true;
687 }
688
689 /**
690  * ufs_mtk_setup_clocks - enables/disable clocks
691  * @hba: host controller instance
692  * @on: If true, enable clocks else disable them.
693  * @status: PRE_CHANGE or POST_CHANGE notify
694  *
695  * Return: 0 on success, non-zero on failure.
696  */
697 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
698                                 enum ufs_notify_change_status status)
699 {
700         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
701         bool clk_pwr_off = false;
702         int ret = 0;
703
704         /*
705          * In case ufs_mtk_init() is not yet done, simply ignore.
706          * This ufs_mtk_setup_clocks() shall be called from
707          * ufs_mtk_init() after init is done.
708          */
709         if (!host)
710                 return 0;
711
712         if (!on && status == PRE_CHANGE) {
713                 if (ufshcd_is_link_off(hba)) {
714                         clk_pwr_off = true;
715                 } else if (ufshcd_is_link_hibern8(hba) ||
716                          (!ufshcd_can_hibern8_during_gating(hba) &&
717                          ufshcd_is_auto_hibern8_enabled(hba))) {
718                         /*
719                          * Gate ref-clk and poweroff mphy if link state is in
720                          * OFF or Hibern8 by either Auto-Hibern8 or
721                          * ufshcd_link_state_transition().
722                          */
723                         ret = ufs_mtk_wait_link_state(hba,
724                                                       VS_LINK_HIBERN8,
725                                                       15);
726                         if (!ret)
727                                 clk_pwr_off = true;
728                 }
729
730                 if (clk_pwr_off)
731                         ufs_mtk_pwr_ctrl(hba, false);
732                 ufs_mtk_mcq_disable_irq(hba);
733         } else if (on && status == POST_CHANGE) {
734                 ufs_mtk_pwr_ctrl(hba, true);
735                 ufs_mtk_mcq_enable_irq(hba);
736         }
737
738         return ret;
739 }
740
741 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
742 {
743         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
744         int ret, ver = 0;
745
746         if (host->hw_ver.major)
747                 return;
748
749         /* Set default (minimum) version anyway */
750         host->hw_ver.major = 2;
751
752         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
753         if (!ret) {
754                 if (ver >= UFS_UNIPRO_VER_1_8) {
755                         host->hw_ver.major = 3;
756                         /*
757                          * Fix HCI version for some platforms with
758                          * incorrect version
759                          */
760                         if (hba->ufs_version < ufshci_version(3, 0))
761                                 hba->ufs_version = ufshci_version(3, 0);
762                 }
763         }
764 }
765
766 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
767 {
768         return hba->ufs_version;
769 }
770
771 /**
772  * ufs_mtk_init_clocks - Init mtk driver private clocks
773  *
774  * @hba: per adapter instance
775  */
776 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
777 {
778         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
779         struct list_head *head = &hba->clk_list_head;
780         struct ufs_mtk_clk *mclk = &host->mclk;
781         struct ufs_clk_info *clki, *clki_tmp;
782
783         /*
784          * Find private clocks and store them in struct ufs_mtk_clk.
785          * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
786          * being switched on/off in clock gating.
787          */
788         list_for_each_entry_safe(clki, clki_tmp, head, list) {
789                 if (!strcmp(clki->name, "ufs_sel")) {
790                         host->mclk.ufs_sel_clki = clki;
791                 } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
792                         host->mclk.ufs_sel_max_clki = clki;
793                         clk_disable_unprepare(clki->clk);
794                         list_del(&clki->list);
795                 } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
796                         host->mclk.ufs_sel_min_clki = clki;
797                         clk_disable_unprepare(clki->clk);
798                         list_del(&clki->list);
799                 }
800         }
801
802         if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
803             !mclk->ufs_sel_min_clki) {
804                 hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
805                 dev_info(hba->dev,
806                          "%s: Clk-scaling not ready. Feature disabled.",
807                          __func__);
808         }
809 }
810
811 #define MAX_VCC_NAME 30
812 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
813 {
814         struct ufs_vreg_info *info = &hba->vreg_info;
815         struct device_node *np = hba->dev->of_node;
816         struct device *dev = hba->dev;
817         char vcc_name[MAX_VCC_NAME];
818         struct arm_smccc_res res;
819         int err, ver;
820
821         if (hba->vreg_info.vcc)
822                 return 0;
823
824         if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
825                 ufs_mtk_get_vcc_num(res);
826                 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
827                         snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
828                 else
829                         return -ENODEV;
830         } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
831                 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
832                 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
833         } else {
834                 return 0;
835         }
836
837         err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
838         if (err)
839                 return err;
840
841         err = ufshcd_get_vreg(dev, info->vcc);
842         if (err)
843                 return err;
844
845         err = regulator_enable(info->vcc->reg);
846         if (!err) {
847                 info->vcc->enabled = true;
848                 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
849         }
850
851         return err;
852 }
853
854 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
855 {
856         struct ufs_vreg_info *info = &hba->vreg_info;
857         struct ufs_vreg **vreg_on, **vreg_off;
858
859         if (hba->dev_info.wspecversion >= 0x0300) {
860                 vreg_on = &info->vccq;
861                 vreg_off = &info->vccq2;
862         } else {
863                 vreg_on = &info->vccq2;
864                 vreg_off = &info->vccq;
865         }
866
867         if (*vreg_on)
868                 (*vreg_on)->always_on = true;
869
870         if (*vreg_off) {
871                 regulator_disable((*vreg_off)->reg);
872                 devm_kfree(hba->dev, (*vreg_off)->name);
873                 devm_kfree(hba->dev, *vreg_off);
874                 *vreg_off = NULL;
875         }
876 }
877
878 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
879 {
880         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
881         struct platform_device *pdev;
882         int i;
883         int irq;
884
885         host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
886         pdev = container_of(hba->dev, struct platform_device, dev);
887
888         for (i = 0; i < host->mcq_nr_intr; i++) {
889                 /* irq index 0 is legacy irq, sq/cq irq start from index 1 */
890                 irq = platform_get_irq(pdev, i + 1);
891                 if (irq < 0) {
892                         host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
893                         goto failed;
894                 }
895                 host->mcq_intr_info[i].hba = hba;
896                 host->mcq_intr_info[i].irq = irq;
897                 dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
898         }
899
900         return;
901 failed:
902        /* invalidate irq info */
903         for (i = 0; i < host->mcq_nr_intr; i++)
904                 host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
905
906         host->mcq_nr_intr = 0;
907 }
908
909 /**
910  * ufs_mtk_init - find other essential mmio bases
911  * @hba: host controller instance
912  *
913  * Binds PHY with controller and powers up PHY enabling clocks
914  * and regulators.
915  *
916  * Return: -EPROBE_DEFER if binding fails, returns negative error
917  * on phy power up failure and returns zero on success.
918  */
919 static int ufs_mtk_init(struct ufs_hba *hba)
920 {
921         const struct of_device_id *id;
922         struct device *dev = hba->dev;
923         struct ufs_mtk_host *host;
924         struct Scsi_Host *shost = hba->host;
925         int err = 0;
926
927         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
928         if (!host) {
929                 err = -ENOMEM;
930                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
931                 goto out;
932         }
933
934         host->hba = hba;
935         ufshcd_set_variant(hba, host);
936
937         id = of_match_device(ufs_mtk_of_match, dev);
938         if (!id) {
939                 err = -EINVAL;
940                 goto out;
941         }
942
943         /* Initialize host capability */
944         ufs_mtk_init_host_caps(hba);
945
946         ufs_mtk_init_mcq_irq(hba);
947
948         err = ufs_mtk_bind_mphy(hba);
949         if (err)
950                 goto out_variant_clear;
951
952         ufs_mtk_init_reset(hba);
953
954         /* Enable runtime autosuspend */
955         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
956
957         /* Enable clock-gating */
958         hba->caps |= UFSHCD_CAP_CLK_GATING;
959
960         /* Enable inline encryption */
961         hba->caps |= UFSHCD_CAP_CRYPTO;
962
963         /* Enable WriteBooster */
964         hba->caps |= UFSHCD_CAP_WB_EN;
965
966         /* Enable clk scaling*/
967         hba->caps |= UFSHCD_CAP_CLK_SCALING;
968
969         /* Set runtime pm delay to replace default */
970         shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
971
972         hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
973         hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
974         hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
975         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
976
977         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
978                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
979
980         ufs_mtk_init_clocks(hba);
981
982         /*
983          * ufshcd_vops_init() is invoked after
984          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
985          * phy clock setup is skipped.
986          *
987          * Enable phy clocks specifically here.
988          */
989         ufs_mtk_mphy_power_on(hba, true);
990         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
991
992         host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
993
994         goto out;
995
996 out_variant_clear:
997         ufshcd_set_variant(hba, NULL);
998 out:
999         return err;
1000 }
1001
1002 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1003                                      struct ufs_pa_layer_attr *dev_req_params)
1004 {
1005         if (!ufs_mtk_is_pmc_via_fastauto(hba))
1006                 return false;
1007
1008         if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1009                 return false;
1010
1011         if (dev_req_params->pwr_tx != FAST_MODE &&
1012             dev_req_params->gear_tx < UFS_HS_G4)
1013                 return false;
1014
1015         if (dev_req_params->pwr_rx != FAST_MODE &&
1016             dev_req_params->gear_rx < UFS_HS_G4)
1017                 return false;
1018
1019         return true;
1020 }
1021
1022 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1023                                   struct ufs_pa_layer_attr *dev_max_params,
1024                                   struct ufs_pa_layer_attr *dev_req_params)
1025 {
1026         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1027         struct ufs_host_params host_params;
1028         int ret;
1029
1030         ufshcd_init_host_params(&host_params);
1031         host_params.hs_rx_gear = UFS_HS_G5;
1032         host_params.hs_tx_gear = UFS_HS_G5;
1033
1034         ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1035         if (ret) {
1036                 pr_info("%s: failed to determine capabilities\n",
1037                         __func__);
1038         }
1039
1040         if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1041                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1042                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1043
1044                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1045                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1046
1047                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1048                                dev_req_params->lane_tx);
1049                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1050                                dev_req_params->lane_rx);
1051                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1052                                dev_req_params->hs_rate);
1053
1054                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1055                                PA_NO_ADAPT);
1056
1057                 ret = ufshcd_uic_change_pwr_mode(hba,
1058                                         FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1059
1060                 if (ret) {
1061                         dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1062                                 __func__, ret);
1063                 }
1064         }
1065
1066         if (host->hw_ver.major >= 3) {
1067                 ret = ufshcd_dme_configure_adapt(hba,
1068                                            dev_req_params->gear_tx,
1069                                            PA_INITIAL_ADAPT);
1070         }
1071
1072         return ret;
1073 }
1074
1075 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1076                                      enum ufs_notify_change_status stage,
1077                                      struct ufs_pa_layer_attr *dev_max_params,
1078                                      struct ufs_pa_layer_attr *dev_req_params)
1079 {
1080         int ret = 0;
1081
1082         switch (stage) {
1083         case PRE_CHANGE:
1084                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1085                                              dev_req_params);
1086                 break;
1087         case POST_CHANGE:
1088                 break;
1089         default:
1090                 ret = -EINVAL;
1091                 break;
1092         }
1093
1094         return ret;
1095 }
1096
1097 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1098 {
1099         int ret;
1100         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1101
1102         ret = ufshcd_dme_set(hba,
1103                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1104                              lpm ? 1 : 0);
1105         if (!ret || !lpm) {
1106                 /*
1107                  * Forcibly set as non-LPM mode if UIC commands is failed
1108                  * to use default hba_enable_delay_us value for re-enabling
1109                  * the host.
1110                  */
1111                 host->unipro_lpm = lpm;
1112         }
1113
1114         return ret;
1115 }
1116
1117 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1118 {
1119         int ret;
1120         u32 tmp;
1121
1122         ufs_mtk_get_controller_version(hba);
1123
1124         ret = ufs_mtk_unipro_set_lpm(hba, false);
1125         if (ret)
1126                 return ret;
1127
1128         /*
1129          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1130          * to make sure that both host and device TX LCC are disabled
1131          * once link startup is completed.
1132          */
1133         ret = ufshcd_disable_host_tx_lcc(hba);
1134         if (ret)
1135                 return ret;
1136
1137         /* disable deep stall */
1138         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1139         if (ret)
1140                 return ret;
1141
1142         tmp &= ~(1 << 6);
1143
1144         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1145
1146         return ret;
1147 }
1148
1149 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1150 {
1151         u32 ah_ms;
1152
1153         if (ufshcd_is_clkgating_allowed(hba)) {
1154                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1155                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1156                                           hba->ahit);
1157                 else
1158                         ah_ms = 10;
1159                 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1160         }
1161 }
1162
1163 static void ufs_mtk_post_link(struct ufs_hba *hba)
1164 {
1165         /* enable unipro clock gating feature */
1166         ufs_mtk_cfg_unipro_cg(hba, true);
1167
1168         /* will be configured during probe hba */
1169         if (ufshcd_is_auto_hibern8_supported(hba))
1170                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1171                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1172
1173         ufs_mtk_setup_clk_gating(hba);
1174 }
1175
1176 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1177                                        enum ufs_notify_change_status stage)
1178 {
1179         int ret = 0;
1180
1181         switch (stage) {
1182         case PRE_CHANGE:
1183                 ret = ufs_mtk_pre_link(hba);
1184                 break;
1185         case POST_CHANGE:
1186                 ufs_mtk_post_link(hba);
1187                 break;
1188         default:
1189                 ret = -EINVAL;
1190                 break;
1191         }
1192
1193         return ret;
1194 }
1195
1196 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1197 {
1198         struct arm_smccc_res res;
1199
1200         /* disable hba before device reset */
1201         ufshcd_hba_stop(hba);
1202
1203         ufs_mtk_device_reset_ctrl(0, res);
1204
1205         /*
1206          * The reset signal is active low. UFS devices shall detect
1207          * more than or equal to 1us of positive or negative RST_n
1208          * pulse width.
1209          *
1210          * To be on safe side, keep the reset low for at least 10us.
1211          */
1212         usleep_range(10, 15);
1213
1214         ufs_mtk_device_reset_ctrl(1, res);
1215
1216         /* Some devices may need time to respond to rst_n */
1217         usleep_range(10000, 15000);
1218
1219         dev_info(hba->dev, "device reset done\n");
1220
1221         return 0;
1222 }
1223
1224 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1225 {
1226         int err;
1227
1228         err = ufshcd_hba_enable(hba);
1229         if (err)
1230                 return err;
1231
1232         err = ufs_mtk_unipro_set_lpm(hba, false);
1233         if (err)
1234                 return err;
1235
1236         err = ufshcd_uic_hibern8_exit(hba);
1237         if (err)
1238                 return err;
1239
1240         /* Check link state to make sure exit h8 success */
1241         ufs_mtk_wait_idle_state(hba, 5);
1242         err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1243         if (err) {
1244                 dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1245                 return err;
1246         }
1247         ufshcd_set_link_active(hba);
1248
1249         err = ufshcd_make_hba_operational(hba);
1250         if (err)
1251                 return err;
1252
1253         if (is_mcq_enabled(hba)) {
1254                 ufs_mtk_config_mcq(hba, false);
1255                 ufshcd_mcq_make_queues_operational(hba);
1256                 ufshcd_mcq_config_mac(hba, hba->nutrs);
1257                 ufshcd_mcq_enable(hba);
1258         }
1259
1260         return 0;
1261 }
1262
1263 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1264 {
1265         int err;
1266
1267         /* Disable reset confirm feature by UniPro */
1268         ufshcd_writel(hba,
1269                       (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1270                       REG_UFS_XOUFS_CTRL);
1271
1272         err = ufs_mtk_unipro_set_lpm(hba, true);
1273         if (err) {
1274                 /* Resume UniPro state for following error recovery */
1275                 ufs_mtk_unipro_set_lpm(hba, false);
1276                 return err;
1277         }
1278
1279         return 0;
1280 }
1281
1282 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1283 {
1284         struct ufs_vreg *vccqx = NULL;
1285
1286         if (hba->vreg_info.vccq)
1287                 vccqx = hba->vreg_info.vccq;
1288         else
1289                 vccqx = hba->vreg_info.vccq2;
1290
1291         regulator_set_mode(vccqx->reg,
1292                            lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1293 }
1294
1295 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1296 {
1297         struct arm_smccc_res res;
1298
1299         ufs_mtk_device_pwr_ctrl(!lpm,
1300                                 (unsigned long)hba->dev_info.wspecversion,
1301                                 res);
1302 }
1303
1304 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1305 {
1306         if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1307                 return;
1308
1309         /* Skip if VCC is assumed always-on */
1310         if (!hba->vreg_info.vcc)
1311                 return;
1312
1313         /* Bypass LPM when device is still active */
1314         if (lpm && ufshcd_is_ufs_dev_active(hba))
1315                 return;
1316
1317         /* Bypass LPM if VCC is enabled */
1318         if (lpm && hba->vreg_info.vcc->enabled)
1319                 return;
1320
1321         if (lpm) {
1322                 ufs_mtk_vccqx_set_lpm(hba, lpm);
1323                 ufs_mtk_vsx_set_lpm(hba, lpm);
1324         } else {
1325                 ufs_mtk_vsx_set_lpm(hba, lpm);
1326                 ufs_mtk_vccqx_set_lpm(hba, lpm);
1327         }
1328 }
1329
1330 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1331 {
1332         int ret;
1333
1334         /* disable auto-hibern8 */
1335         ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1336
1337         /* wait host return to idle state when auto-hibern8 off */
1338         ufs_mtk_wait_idle_state(hba, 5);
1339
1340         ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1341         if (ret)
1342                 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1343 }
1344
1345 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1346         enum ufs_notify_change_status status)
1347 {
1348         int err;
1349         struct arm_smccc_res res;
1350
1351         if (status == PRE_CHANGE) {
1352                 if (ufshcd_is_auto_hibern8_supported(hba))
1353                         ufs_mtk_auto_hibern8_disable(hba);
1354                 return 0;
1355         }
1356
1357         if (ufshcd_is_link_hibern8(hba)) {
1358                 err = ufs_mtk_link_set_lpm(hba);
1359                 if (err)
1360                         goto fail;
1361         }
1362
1363         if (!ufshcd_is_link_active(hba)) {
1364                 /*
1365                  * Make sure no error will be returned to prevent
1366                  * ufshcd_suspend() re-enabling regulators while vreg is still
1367                  * in low-power mode.
1368                  */
1369                 err = ufs_mtk_mphy_power_on(hba, false);
1370                 if (err)
1371                         goto fail;
1372         }
1373
1374         if (ufshcd_is_link_off(hba))
1375                 ufs_mtk_device_reset_ctrl(0, res);
1376
1377         ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1378
1379         return 0;
1380 fail:
1381         /*
1382          * Set link as off state enforcedly to trigger
1383          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1384          * for completed host reset.
1385          */
1386         ufshcd_set_link_off(hba);
1387         return -EAGAIN;
1388 }
1389
1390 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1391 {
1392         int err;
1393         struct arm_smccc_res res;
1394
1395         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1396                 ufs_mtk_dev_vreg_set_lpm(hba, false);
1397
1398         ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1399
1400         err = ufs_mtk_mphy_power_on(hba, true);
1401         if (err)
1402                 goto fail;
1403
1404         if (ufshcd_is_link_hibern8(hba)) {
1405                 err = ufs_mtk_link_set_hpm(hba);
1406                 if (err)
1407                         goto fail;
1408         }
1409
1410         return 0;
1411 fail:
1412         return ufshcd_link_recovery(hba);
1413 }
1414
1415 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1416 {
1417         /* Dump ufshci register 0x140 ~ 0x14C */
1418         ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1419                          "XOUFS Ctrl (0x140): ");
1420
1421         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1422
1423         /* Dump ufshci register 0x2200 ~ 0x22AC */
1424         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1425                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1426                          "MPHY Ctrl (0x2200): ");
1427
1428         /* Direct debugging information to REG_MTK_PROBE */
1429         ufs_mtk_dbg_sel(hba);
1430         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1431 }
1432
1433 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1434 {
1435         struct ufs_dev_info *dev_info = &hba->dev_info;
1436         u16 mid = dev_info->wmanufacturerid;
1437
1438         if (mid == UFS_VENDOR_SAMSUNG) {
1439                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1440                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1441         }
1442
1443         /*
1444          * Decide waiting time before gating reference clock and
1445          * after ungating reference clock according to vendors'
1446          * requirements.
1447          */
1448         if (mid == UFS_VENDOR_SAMSUNG)
1449                 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1450         else if (mid == UFS_VENDOR_SKHYNIX)
1451                 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1452         else if (mid == UFS_VENDOR_TOSHIBA)
1453                 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1454         else
1455                 ufs_mtk_setup_ref_clk_wait_us(hba,
1456                                               REFCLK_DEFAULT_WAIT_US);
1457         return 0;
1458 }
1459
1460 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1461 {
1462         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1463
1464         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1465             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1466                 hba->vreg_info.vcc->always_on = true;
1467                 /*
1468                  * VCC will be kept always-on thus we don't
1469                  * need any delay during regulator operations
1470                  */
1471                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1472                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1473         }
1474
1475         ufs_mtk_vreg_fix_vcc(hba);
1476         ufs_mtk_vreg_fix_vccqx(hba);
1477 }
1478
1479 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1480                                  enum ufs_event_type evt, void *data)
1481 {
1482         unsigned int val = *(u32 *)data;
1483         unsigned long reg;
1484         u8 bit;
1485
1486         trace_ufs_mtk_event(evt, val);
1487
1488         /* Print details of UIC Errors */
1489         if (evt <= UFS_EVT_DME_ERR) {
1490                 dev_info(hba->dev,
1491                          "Host UIC Error Code (%s): %08x\n",
1492                          ufs_uic_err_str[evt], val);
1493                 reg = val;
1494         }
1495
1496         if (evt == UFS_EVT_PA_ERR) {
1497                 for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1498                         dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1499         }
1500
1501         if (evt == UFS_EVT_DL_ERR) {
1502                 for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1503                         dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1504         }
1505 }
1506
1507 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1508                                 struct devfreq_dev_profile *profile,
1509                                 struct devfreq_simple_ondemand_data *data)
1510 {
1511         /* Customize min gear in clk scaling */
1512         hba->clk_scaling.min_gear = UFS_HS_G4;
1513
1514         hba->vps->devfreq_profile.polling_ms = 200;
1515         hba->vps->ondemand_data.upthreshold = 50;
1516         hba->vps->ondemand_data.downdifferential = 20;
1517 }
1518
1519 /**
1520  * ufs_mtk_clk_scale - Internal clk scaling operation
1521  *
1522  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1523  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1524  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1525  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1526  * This prevent changing rate of pll clock that is shared between modules.
1527  *
1528  * @hba: per adapter instance
1529  * @scale_up: True for scaling up and false for scaling down
1530  */
1531 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1532 {
1533         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1534         struct ufs_mtk_clk *mclk = &host->mclk;
1535         struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1536         int ret = 0;
1537
1538         ret = clk_prepare_enable(clki->clk);
1539         if (ret) {
1540                 dev_info(hba->dev,
1541                          "clk_prepare_enable() fail, ret: %d\n", ret);
1542                 return;
1543         }
1544
1545         if (scale_up) {
1546                 ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1547                 clki->curr_freq = clki->max_freq;
1548         } else {
1549                 ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1550                 clki->curr_freq = clki->min_freq;
1551         }
1552
1553         if (ret) {
1554                 dev_info(hba->dev,
1555                          "Failed to set ufs_sel_clki, ret: %d\n", ret);
1556         }
1557
1558         clk_disable_unprepare(clki->clk);
1559
1560         trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1561 }
1562
1563 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1564                                     enum ufs_notify_change_status status)
1565 {
1566         if (!ufshcd_is_clkscaling_supported(hba))
1567                 return 0;
1568
1569         if (status == PRE_CHANGE) {
1570                 /* Switch parent before clk_set_rate() */
1571                 ufs_mtk_clk_scale(hba, scale_up);
1572         } else {
1573                 /* Request interrupt latency QoS accordingly */
1574                 ufs_mtk_scale_perf(hba, scale_up);
1575         }
1576
1577         return 0;
1578 }
1579
1580 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1581 {
1582         return MAX_SUPP_MAC;
1583 }
1584
1585 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1586 {
1587         struct ufshcd_mcq_opr_info_t *opr;
1588         int i;
1589
1590         hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1591         hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1592         hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1593         hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1594
1595         for (i = 0; i < OPR_MAX; i++) {
1596                 opr = &hba->mcq_opr[i];
1597                 opr->stride = REG_UFS_MCQ_STRIDE;
1598                 opr->base = hba->mmio_base + opr->offset;
1599         }
1600
1601         return 0;
1602 }
1603
1604 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1605 {
1606         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1607
1608         /* fail mcq initialization if interrupt is not filled properly */
1609         if (!host->mcq_nr_intr) {
1610                 dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1611                 return -EINVAL;
1612         }
1613
1614         hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1615         return 0;
1616 }
1617
1618 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1619 {
1620         struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1621         struct ufs_hba *hba = mcq_intr_info->hba;
1622         struct ufs_hw_queue *hwq;
1623         u32 events;
1624         int qid = mcq_intr_info->qid;
1625
1626         hwq = &hba->uhq[qid];
1627
1628         events = ufshcd_mcq_read_cqis(hba, qid);
1629         if (events)
1630                 ufshcd_mcq_write_cqis(hba, events, qid);
1631
1632         if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1633                 ufshcd_mcq_poll_cqe_lock(hba, hwq);
1634
1635         return IRQ_HANDLED;
1636 }
1637
1638 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1639 {
1640         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1641         u32 irq, i;
1642         int ret;
1643
1644         for (i = 0; i < host->mcq_nr_intr; i++) {
1645                 irq = host->mcq_intr_info[i].irq;
1646                 if (irq == MTK_MCQ_INVALID_IRQ) {
1647                         dev_err(hba->dev, "invalid irq. %d\n", i);
1648                         return -ENOPARAM;
1649                 }
1650
1651                 host->mcq_intr_info[i].qid = i;
1652                 ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1653                                        &host->mcq_intr_info[i]);
1654
1655                 dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1656
1657                 if (ret) {
1658                         dev_err(hba->dev, "Cannot request irq %d\n", ret);
1659                         return ret;
1660                 }
1661         }
1662
1663         return 0;
1664 }
1665
1666 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1667 {
1668         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1669         int ret = 0;
1670
1671         if (!host->mcq_set_intr) {
1672                 /* Disable irq option register */
1673                 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1674
1675                 if (irq) {
1676                         ret = ufs_mtk_config_mcq_irq(hba);
1677                         if (ret)
1678                                 return ret;
1679                 }
1680
1681                 host->mcq_set_intr = true;
1682         }
1683
1684         ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1685         ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1686
1687         return 0;
1688 }
1689
1690 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1691 {
1692         return ufs_mtk_config_mcq(hba, true);
1693 }
1694
1695 /*
1696  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1697  *
1698  * The variant operations configure the necessary controller and PHY
1699  * handshake during initialization.
1700  */
1701 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1702         .name                = "mediatek.ufshci",
1703         .init                = ufs_mtk_init,
1704         .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1705         .setup_clocks        = ufs_mtk_setup_clocks,
1706         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1707         .link_startup_notify = ufs_mtk_link_startup_notify,
1708         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1709         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1710         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1711         .suspend             = ufs_mtk_suspend,
1712         .resume              = ufs_mtk_resume,
1713         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1714         .device_reset        = ufs_mtk_device_reset,
1715         .event_notify        = ufs_mtk_event_notify,
1716         .config_scaling_param = ufs_mtk_config_scaling_param,
1717         .clk_scale_notify    = ufs_mtk_clk_scale_notify,
1718         /* mcq vops */
1719         .get_hba_mac         = ufs_mtk_get_hba_mac,
1720         .op_runtime_config   = ufs_mtk_op_runtime_config,
1721         .mcq_config_resource = ufs_mtk_mcq_config_resource,
1722         .config_esi          = ufs_mtk_config_esi,
1723 };
1724
1725 /**
1726  * ufs_mtk_probe - probe routine of the driver
1727  * @pdev: pointer to Platform device handle
1728  *
1729  * Return: zero for success and non-zero for failure.
1730  */
1731 static int ufs_mtk_probe(struct platform_device *pdev)
1732 {
1733         int err;
1734         struct device *dev = &pdev->dev;
1735         struct device_node *reset_node;
1736         struct platform_device *reset_pdev;
1737         struct device_link *link;
1738
1739         reset_node = of_find_compatible_node(NULL, NULL,
1740                                              "ti,syscon-reset");
1741         if (!reset_node) {
1742                 dev_notice(dev, "find ti,syscon-reset fail\n");
1743                 goto skip_reset;
1744         }
1745         reset_pdev = of_find_device_by_node(reset_node);
1746         if (!reset_pdev) {
1747                 dev_notice(dev, "find reset_pdev fail\n");
1748                 goto skip_reset;
1749         }
1750         link = device_link_add(dev, &reset_pdev->dev,
1751                 DL_FLAG_AUTOPROBE_CONSUMER);
1752         put_device(&reset_pdev->dev);
1753         if (!link) {
1754                 dev_notice(dev, "add reset device_link fail\n");
1755                 goto skip_reset;
1756         }
1757         /* supplier is not probed */
1758         if (link->status == DL_STATE_DORMANT) {
1759                 err = -EPROBE_DEFER;
1760                 goto out;
1761         }
1762
1763 skip_reset:
1764         /* perform generic probe */
1765         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1766
1767 out:
1768         if (err)
1769                 dev_err(dev, "probe failed %d\n", err);
1770
1771         of_node_put(reset_node);
1772         return err;
1773 }
1774
1775 /**
1776  * ufs_mtk_remove - set driver_data of the device to NULL
1777  * @pdev: pointer to platform device handle
1778  *
1779  * Always return 0
1780  */
1781 static void ufs_mtk_remove(struct platform_device *pdev)
1782 {
1783         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1784
1785         pm_runtime_get_sync(&(pdev)->dev);
1786         ufshcd_remove(hba);
1787 }
1788
1789 #ifdef CONFIG_PM_SLEEP
1790 static int ufs_mtk_system_suspend(struct device *dev)
1791 {
1792         struct ufs_hba *hba = dev_get_drvdata(dev);
1793         int ret;
1794
1795         ret = ufshcd_system_suspend(dev);
1796         if (ret)
1797                 return ret;
1798
1799         ufs_mtk_dev_vreg_set_lpm(hba, true);
1800
1801         return 0;
1802 }
1803
1804 static int ufs_mtk_system_resume(struct device *dev)
1805 {
1806         struct ufs_hba *hba = dev_get_drvdata(dev);
1807
1808         ufs_mtk_dev_vreg_set_lpm(hba, false);
1809
1810         return ufshcd_system_resume(dev);
1811 }
1812 #endif
1813
1814 #ifdef CONFIG_PM
1815 static int ufs_mtk_runtime_suspend(struct device *dev)
1816 {
1817         struct ufs_hba *hba = dev_get_drvdata(dev);
1818         int ret = 0;
1819
1820         ret = ufshcd_runtime_suspend(dev);
1821         if (ret)
1822                 return ret;
1823
1824         ufs_mtk_dev_vreg_set_lpm(hba, true);
1825
1826         return 0;
1827 }
1828
1829 static int ufs_mtk_runtime_resume(struct device *dev)
1830 {
1831         struct ufs_hba *hba = dev_get_drvdata(dev);
1832
1833         ufs_mtk_dev_vreg_set_lpm(hba, false);
1834
1835         return ufshcd_runtime_resume(dev);
1836 }
1837 #endif
1838
1839 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1840         SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1841                                 ufs_mtk_system_resume)
1842         SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1843                            ufs_mtk_runtime_resume, NULL)
1844         .prepare         = ufshcd_suspend_prepare,
1845         .complete        = ufshcd_resume_complete,
1846 };
1847
1848 static struct platform_driver ufs_mtk_pltform = {
1849         .probe      = ufs_mtk_probe,
1850         .remove_new = ufs_mtk_remove,
1851         .driver = {
1852                 .name   = "ufshcd-mtk",
1853                 .pm     = &ufs_mtk_pm_ops,
1854                 .of_match_table = ufs_mtk_of_match,
1855         },
1856 };
1857
1858 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1859 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1860 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1861 MODULE_LICENSE("GPL v2");
1862
1863 module_platform_driver(ufs_mtk_pltform);