GNU Linux-libre 4.9.315-gnu1
[releases.git] / drivers / scsi / ufs / ufs-qcom.c
1 /*
2  * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/phy/phy.h>
19 #include <linux/phy/phy-qcom-ufs.h>
20
21 #include "ufshcd.h"
22 #include "ufshcd-pltfrm.h"
23 #include "unipro.h"
24 #include "ufs-qcom.h"
25 #include "ufshci.h"
26 #include "ufs_quirks.h"
27 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
28         (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
29
30 enum {
31         TSTBUS_UAWM,
32         TSTBUS_UARM,
33         TSTBUS_TXUC,
34         TSTBUS_RXUC,
35         TSTBUS_DFC,
36         TSTBUS_TRLUT,
37         TSTBUS_TMRLUT,
38         TSTBUS_OCSC,
39         TSTBUS_UTP_HCI,
40         TSTBUS_COMBINED,
41         TSTBUS_WRAPPER,
42         TSTBUS_UNIPRO,
43         TSTBUS_MAX,
44 };
45
46 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
47
48 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
49 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
50 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
51                                                        u32 clk_cycles);
52
53 static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
54                 char *prefix)
55 {
56         print_hex_dump(KERN_ERR, prefix,
57                         len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
58                         16, 4, (void __force *)hba->mmio_base + offset,
59                         len * 4, false);
60 }
61
62 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
63                 char *prefix, void *priv)
64 {
65         ufs_qcom_dump_regs(hba, offset, len, prefix);
66 }
67
68 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
69 {
70         int err = 0;
71
72         err = ufshcd_dme_get(hba,
73                         UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
74         if (err)
75                 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
76                                 __func__, err);
77
78         return err;
79 }
80
81 static int ufs_qcom_host_clk_get(struct device *dev,
82                 const char *name, struct clk **clk_out)
83 {
84         struct clk *clk;
85         int err = 0;
86
87         clk = devm_clk_get(dev, name);
88         if (IS_ERR(clk)) {
89                 err = PTR_ERR(clk);
90                 dev_err(dev, "%s: failed to get %s err %d",
91                                 __func__, name, err);
92         } else {
93                 *clk_out = clk;
94         }
95
96         return err;
97 }
98
99 static int ufs_qcom_host_clk_enable(struct device *dev,
100                 const char *name, struct clk *clk)
101 {
102         int err = 0;
103
104         err = clk_prepare_enable(clk);
105         if (err)
106                 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
107
108         return err;
109 }
110
111 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
112 {
113         if (!host->is_lane_clks_enabled)
114                 return;
115
116         if (host->hba->lanes_per_direction > 1)
117                 clk_disable_unprepare(host->tx_l1_sync_clk);
118         clk_disable_unprepare(host->tx_l0_sync_clk);
119         if (host->hba->lanes_per_direction > 1)
120                 clk_disable_unprepare(host->rx_l1_sync_clk);
121         clk_disable_unprepare(host->rx_l0_sync_clk);
122
123         host->is_lane_clks_enabled = false;
124 }
125
126 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
127 {
128         int err = 0;
129         struct device *dev = host->hba->dev;
130
131         if (host->is_lane_clks_enabled)
132                 return 0;
133
134         err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
135                 host->rx_l0_sync_clk);
136         if (err)
137                 goto out;
138
139         err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
140                 host->tx_l0_sync_clk);
141         if (err)
142                 goto disable_rx_l0;
143
144         if (host->hba->lanes_per_direction > 1) {
145                 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
146                         host->rx_l1_sync_clk);
147                 if (err)
148                         goto disable_tx_l0;
149
150                 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
151                         host->tx_l1_sync_clk);
152                 if (err)
153                         goto disable_rx_l1;
154         }
155
156         host->is_lane_clks_enabled = true;
157         goto out;
158
159 disable_rx_l1:
160         if (host->hba->lanes_per_direction > 1)
161                 clk_disable_unprepare(host->rx_l1_sync_clk);
162 disable_tx_l0:
163         clk_disable_unprepare(host->tx_l0_sync_clk);
164 disable_rx_l0:
165         clk_disable_unprepare(host->rx_l0_sync_clk);
166 out:
167         return err;
168 }
169
170 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
171 {
172         int err = 0;
173         struct device *dev = host->hba->dev;
174
175         err = ufs_qcom_host_clk_get(dev,
176                         "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
177         if (err)
178                 goto out;
179
180         err = ufs_qcom_host_clk_get(dev,
181                         "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
182         if (err)
183                 goto out;
184
185         /* In case of single lane per direction, don't read lane1 clocks */
186         if (host->hba->lanes_per_direction > 1) {
187                 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
188                         &host->rx_l1_sync_clk);
189                 if (err)
190                         goto out;
191
192                 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
193                         &host->tx_l1_sync_clk);
194         }
195 out:
196         return err;
197 }
198
199 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
200 {
201         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
202         struct phy *phy = host->generic_phy;
203         u32 tx_lanes;
204         int err = 0;
205
206         err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
207         if (err)
208                 goto out;
209
210         err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
211         if (err)
212                 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
213                         __func__);
214
215 out:
216         return err;
217 }
218
219 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
220 {
221         int err;
222         u32 tx_fsm_val = 0;
223         unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
224
225         do {
226                 err = ufshcd_dme_get(hba,
227                                 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
228                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
229                                 &tx_fsm_val);
230                 if (err || tx_fsm_val == TX_FSM_HIBERN8)
231                         break;
232
233                 /* sleep for max. 200us */
234                 usleep_range(100, 200);
235         } while (time_before(jiffies, timeout));
236
237         /*
238          * we might have scheduled out for long during polling so
239          * check the state again.
240          */
241         if (time_after(jiffies, timeout))
242                 err = ufshcd_dme_get(hba,
243                                 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
244                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
245                                 &tx_fsm_val);
246
247         if (err) {
248                 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
249                                 __func__, err);
250         } else if (tx_fsm_val != TX_FSM_HIBERN8) {
251                 err = tx_fsm_val;
252                 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
253                                 __func__, err);
254         }
255
256         return err;
257 }
258
259 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
260 {
261         ufshcd_rmwl(host->hba, QUNIPRO_SEL,
262                    ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
263                    REG_UFS_CFG1);
264         /* make sure above configuration is applied before we return */
265         mb();
266 }
267
268 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
269 {
270         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
271         struct phy *phy = host->generic_phy;
272         int ret = 0;
273         bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
274                                                         ? true : false;
275
276         /* Assert PHY reset and apply PHY calibration values */
277         ufs_qcom_assert_reset(hba);
278         /* provide 1ms delay to let the reset pulse propagate */
279         usleep_range(1000, 1100);
280
281         ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
282
283         if (ret) {
284                 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
285                         __func__, ret);
286                 goto out;
287         }
288
289         /* De-assert PHY reset and start serdes */
290         ufs_qcom_deassert_reset(hba);
291
292         /*
293          * after reset deassertion, phy will need all ref clocks,
294          * voltage, current to settle down before starting serdes.
295          */
296         usleep_range(1000, 1100);
297         ret = ufs_qcom_phy_start_serdes(phy);
298         if (ret) {
299                 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
300                         __func__, ret);
301                 goto out;
302         }
303
304         ret = ufs_qcom_phy_is_pcs_ready(phy);
305         if (ret)
306                 dev_err(hba->dev,
307                         "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
308                         __func__, ret);
309
310         ufs_qcom_select_unipro_mode(host);
311
312 out:
313         return ret;
314 }
315
316 /*
317  * The UTP controller has a number of internal clock gating cells (CGCs).
318  * Internal hardware sub-modules within the UTP controller control the CGCs.
319  * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
320  * in a specific operation, UTP controller CGCs are by default disabled and
321  * this function enables them (after every UFS link startup) to save some power
322  * leakage.
323  */
324 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
325 {
326         ufshcd_writel(hba,
327                 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
328                 REG_UFS_CFG2);
329
330         /* Ensure that HW clock gating is enabled before next operations */
331         mb();
332 }
333
334 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
335                                       enum ufs_notify_change_status status)
336 {
337         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
338         int err = 0;
339
340         switch (status) {
341         case PRE_CHANGE:
342                 ufs_qcom_power_up_sequence(hba);
343                 /*
344                  * The PHY PLL output is the source of tx/rx lane symbol
345                  * clocks, hence, enable the lane clocks only after PHY
346                  * is initialized.
347                  */
348                 err = ufs_qcom_enable_lane_clks(host);
349                 break;
350         case POST_CHANGE:
351                 /* check if UFS PHY moved from DISABLED to HIBERN8 */
352                 err = ufs_qcom_check_hibern8(hba);
353                 ufs_qcom_enable_hw_clk_gating(hba);
354
355                 break;
356         default:
357                 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
358                 err = -EINVAL;
359                 break;
360         }
361         return err;
362 }
363
364 /**
365  * Returns zero for success and non-zero in case of a failure
366  */
367 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
368                                u32 hs, u32 rate, bool update_link_startup_timer)
369 {
370         int ret = 0;
371         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
372         struct ufs_clk_info *clki;
373         u32 core_clk_period_in_ns;
374         u32 tx_clk_cycles_per_us = 0;
375         unsigned long core_clk_rate = 0;
376         u32 core_clk_cycles_per_us = 0;
377
378         static u32 pwm_fr_table[][2] = {
379                 {UFS_PWM_G1, 0x1},
380                 {UFS_PWM_G2, 0x1},
381                 {UFS_PWM_G3, 0x1},
382                 {UFS_PWM_G4, 0x1},
383         };
384
385         static u32 hs_fr_table_rA[][2] = {
386                 {UFS_HS_G1, 0x1F},
387                 {UFS_HS_G2, 0x3e},
388                 {UFS_HS_G3, 0x7D},
389         };
390
391         static u32 hs_fr_table_rB[][2] = {
392                 {UFS_HS_G1, 0x24},
393                 {UFS_HS_G2, 0x49},
394                 {UFS_HS_G3, 0x92},
395         };
396
397         /*
398          * The Qunipro controller does not use following registers:
399          * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
400          * UFS_REG_PA_LINK_STARTUP_TIMER
401          * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
402          * Aggregation logic.
403         */
404         if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
405                 goto out;
406
407         if (gear == 0) {
408                 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
409                 goto out_error;
410         }
411
412         list_for_each_entry(clki, &hba->clk_list_head, list) {
413                 if (!strcmp(clki->name, "core_clk"))
414                         core_clk_rate = clk_get_rate(clki->clk);
415         }
416
417         /* If frequency is smaller than 1MHz, set to 1MHz */
418         if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
419                 core_clk_rate = DEFAULT_CLK_RATE_HZ;
420
421         core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
422         if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
423                 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
424                 /*
425                  * make sure above write gets applied before we return from
426                  * this function.
427                  */
428                 mb();
429         }
430
431         if (ufs_qcom_cap_qunipro(host))
432                 goto out;
433
434         core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
435         core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
436         core_clk_period_in_ns &= MASK_CLK_NS_REG;
437
438         switch (hs) {
439         case FASTAUTO_MODE:
440         case FAST_MODE:
441                 if (rate == PA_HS_MODE_A) {
442                         if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
443                                 dev_err(hba->dev,
444                                         "%s: index %d exceeds table size %zu\n",
445                                         __func__, gear,
446                                         ARRAY_SIZE(hs_fr_table_rA));
447                                 goto out_error;
448                         }
449                         tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
450                 } else if (rate == PA_HS_MODE_B) {
451                         if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
452                                 dev_err(hba->dev,
453                                         "%s: index %d exceeds table size %zu\n",
454                                         __func__, gear,
455                                         ARRAY_SIZE(hs_fr_table_rB));
456                                 goto out_error;
457                         }
458                         tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
459                 } else {
460                         dev_err(hba->dev, "%s: invalid rate = %d\n",
461                                 __func__, rate);
462                         goto out_error;
463                 }
464                 break;
465         case SLOWAUTO_MODE:
466         case SLOW_MODE:
467                 if (gear > ARRAY_SIZE(pwm_fr_table)) {
468                         dev_err(hba->dev,
469                                         "%s: index %d exceeds table size %zu\n",
470                                         __func__, gear,
471                                         ARRAY_SIZE(pwm_fr_table));
472                         goto out_error;
473                 }
474                 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
475                 break;
476         case UNCHANGED:
477         default:
478                 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
479                 goto out_error;
480         }
481
482         if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
483             (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
484                 /* this register 2 fields shall be written at once */
485                 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
486                               REG_UFS_TX_SYMBOL_CLK_NS_US);
487                 /*
488                  * make sure above write gets applied before we return from
489                  * this function.
490                  */
491                 mb();
492         }
493
494         if (update_link_startup_timer) {
495                 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
496                               REG_UFS_PA_LINK_STARTUP_TIMER);
497                 /*
498                  * make sure that this configuration is applied before
499                  * we return
500                  */
501                 mb();
502         }
503         goto out;
504
505 out_error:
506         ret = -EINVAL;
507 out:
508         return ret;
509 }
510
511 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
512                                         enum ufs_notify_change_status status)
513 {
514         int err = 0;
515         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
516
517         switch (status) {
518         case PRE_CHANGE:
519                 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
520                                         0, true)) {
521                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
522                                 __func__);
523                         err = -EINVAL;
524                         goto out;
525                 }
526
527                 if (ufs_qcom_cap_qunipro(host))
528                         /*
529                          * set unipro core clock cycles to 150 & clear clock
530                          * divider
531                          */
532                         err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
533                                                                           150);
534
535                 /*
536                  * Some UFS devices (and may be host) have issues if LCC is
537                  * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
538                  * before link startup which will make sure that both host
539                  * and device TX LCC are disabled once link startup is
540                  * completed.
541                  */
542                 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
543                         err = ufshcd_dme_set(hba,
544                                         UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
545                                         0);
546
547                 break;
548         case POST_CHANGE:
549                 ufs_qcom_link_startup_post_change(hba);
550                 break;
551         default:
552                 break;
553         }
554
555 out:
556         return err;
557 }
558
559 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
560 {
561         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
562         struct phy *phy = host->generic_phy;
563         int ret = 0;
564
565         if (ufs_qcom_is_link_off(hba)) {
566                 /*
567                  * Disable the tx/rx lane symbol clocks before PHY is
568                  * powered down as the PLL source should be disabled
569                  * after downstream clocks are disabled.
570                  */
571                 ufs_qcom_disable_lane_clks(host);
572                 phy_power_off(phy);
573
574                 /* Assert PHY soft reset */
575                 ufs_qcom_assert_reset(hba);
576                 goto out;
577         }
578
579         /*
580          * If UniPro link is not active, PHY ref_clk, main PHY analog power
581          * rail and low noise analog power rail for PLL can be switched off.
582          */
583         if (!ufs_qcom_is_link_active(hba)) {
584                 ufs_qcom_disable_lane_clks(host);
585                 phy_power_off(phy);
586         }
587
588 out:
589         return ret;
590 }
591
592 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
593 {
594         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
595         struct phy *phy = host->generic_phy;
596         int err;
597
598         err = phy_power_on(phy);
599         if (err) {
600                 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
601                         __func__, err);
602                 goto out;
603         }
604
605         err = ufs_qcom_enable_lane_clks(host);
606         if (err)
607                 goto out;
608
609         hba->is_sys_suspended = false;
610
611 out:
612         return err;
613 }
614
615 struct ufs_qcom_dev_params {
616         u32 pwm_rx_gear;        /* pwm rx gear to work in */
617         u32 pwm_tx_gear;        /* pwm tx gear to work in */
618         u32 hs_rx_gear;         /* hs rx gear to work in */
619         u32 hs_tx_gear;         /* hs tx gear to work in */
620         u32 rx_lanes;           /* number of rx lanes */
621         u32 tx_lanes;           /* number of tx lanes */
622         u32 rx_pwr_pwm;         /* rx pwm working pwr */
623         u32 tx_pwr_pwm;         /* tx pwm working pwr */
624         u32 rx_pwr_hs;          /* rx hs working pwr */
625         u32 tx_pwr_hs;          /* tx hs working pwr */
626         u32 hs_rate;            /* rate A/B to work in HS */
627         u32 desired_working_mode;
628 };
629
630 static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
631                                       struct ufs_pa_layer_attr *dev_max,
632                                       struct ufs_pa_layer_attr *agreed_pwr)
633 {
634         int min_qcom_gear;
635         int min_dev_gear;
636         bool is_dev_sup_hs = false;
637         bool is_qcom_max_hs = false;
638
639         if (dev_max->pwr_rx == FAST_MODE)
640                 is_dev_sup_hs = true;
641
642         if (qcom_param->desired_working_mode == FAST) {
643                 is_qcom_max_hs = true;
644                 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
645                                       qcom_param->hs_tx_gear);
646         } else {
647                 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
648                                       qcom_param->pwm_tx_gear);
649         }
650
651         /*
652          * device doesn't support HS but qcom_param->desired_working_mode is
653          * HS, thus device and qcom_param don't agree
654          */
655         if (!is_dev_sup_hs && is_qcom_max_hs) {
656                 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
657                         __func__);
658                 return -ENOTSUPP;
659         } else if (is_dev_sup_hs && is_qcom_max_hs) {
660                 /*
661                  * since device supports HS, it supports FAST_MODE.
662                  * since qcom_param->desired_working_mode is also HS
663                  * then final decision (FAST/FASTAUTO) is done according
664                  * to qcom_params as it is the restricting factor
665                  */
666                 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
667                                                 qcom_param->rx_pwr_hs;
668         } else {
669                 /*
670                  * here qcom_param->desired_working_mode is PWM.
671                  * it doesn't matter whether device supports HS or PWM,
672                  * in both cases qcom_param->desired_working_mode will
673                  * determine the mode
674                  */
675                  agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
676                                                 qcom_param->rx_pwr_pwm;
677         }
678
679         /*
680          * we would like tx to work in the minimum number of lanes
681          * between device capability and vendor preferences.
682          * the same decision will be made for rx
683          */
684         agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
685                                                 qcom_param->tx_lanes);
686         agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
687                                                 qcom_param->rx_lanes);
688
689         /* device maximum gear is the minimum between device rx and tx gears */
690         min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
691
692         /*
693          * if both device capabilities and vendor pre-defined preferences are
694          * both HS or both PWM then set the minimum gear to be the chosen
695          * working gear.
696          * if one is PWM and one is HS then the one that is PWM get to decide
697          * what is the gear, as it is the one that also decided previously what
698          * pwr the device will be configured to.
699          */
700         if ((is_dev_sup_hs && is_qcom_max_hs) ||
701             (!is_dev_sup_hs && !is_qcom_max_hs))
702                 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
703                         min_t(u32, min_dev_gear, min_qcom_gear);
704         else if (!is_dev_sup_hs)
705                 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
706         else
707                 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
708
709         agreed_pwr->hs_rate = qcom_param->hs_rate;
710         return 0;
711 }
712
713 #ifdef CONFIG_MSM_BUS_SCALING
714 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
715                 const char *speed_mode)
716 {
717         struct device *dev = host->hba->dev;
718         struct device_node *np = dev->of_node;
719         int err;
720         const char *key = "qcom,bus-vector-names";
721
722         if (!speed_mode) {
723                 err = -EINVAL;
724                 goto out;
725         }
726
727         if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
728                 err = of_property_match_string(np, key, "MAX");
729         else
730                 err = of_property_match_string(np, key, speed_mode);
731
732 out:
733         if (err < 0)
734                 dev_err(dev, "%s: Invalid %s mode %d\n",
735                                 __func__, speed_mode, err);
736         return err;
737 }
738
739 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
740 {
741         int gear = max_t(u32, p->gear_rx, p->gear_tx);
742         int lanes = max_t(u32, p->lane_rx, p->lane_tx);
743         int pwr;
744
745         /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
746         if (!gear)
747                 gear = 1;
748
749         if (!lanes)
750                 lanes = 1;
751
752         if (!p->pwr_rx && !p->pwr_tx) {
753                 pwr = SLOWAUTO_MODE;
754                 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
755         } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
756                  p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
757                 pwr = FAST_MODE;
758                 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
759                          p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
760         } else {
761                 pwr = SLOW_MODE;
762                 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
763                          "PWM", gear, lanes);
764         }
765 }
766
767 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
768 {
769         int err = 0;
770
771         if (vote != host->bus_vote.curr_vote) {
772                 err = msm_bus_scale_client_update_request(
773                                 host->bus_vote.client_handle, vote);
774                 if (err) {
775                         dev_err(host->hba->dev,
776                                 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
777                                 __func__, host->bus_vote.client_handle,
778                                 vote, err);
779                         goto out;
780                 }
781
782                 host->bus_vote.curr_vote = vote;
783         }
784 out:
785         return err;
786 }
787
788 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
789 {
790         int vote;
791         int err = 0;
792         char mode[BUS_VECTOR_NAME_LEN];
793
794         ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
795
796         vote = ufs_qcom_get_bus_vote(host, mode);
797         if (vote >= 0)
798                 err = ufs_qcom_set_bus_vote(host, vote);
799         else
800                 err = vote;
801
802         if (err)
803                 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
804         else
805                 host->bus_vote.saved_vote = vote;
806         return err;
807 }
808
809 static ssize_t
810 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
811                         char *buf)
812 {
813         struct ufs_hba *hba = dev_get_drvdata(dev);
814         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
815
816         return snprintf(buf, PAGE_SIZE, "%u\n",
817                         host->bus_vote.is_max_bw_needed);
818 }
819
820 static ssize_t
821 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
822                 const char *buf, size_t count)
823 {
824         struct ufs_hba *hba = dev_get_drvdata(dev);
825         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
826         uint32_t value;
827
828         if (!kstrtou32(buf, 0, &value)) {
829                 host->bus_vote.is_max_bw_needed = !!value;
830                 ufs_qcom_update_bus_bw_vote(host);
831         }
832
833         return count;
834 }
835
836 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
837 {
838         int err;
839         struct msm_bus_scale_pdata *bus_pdata;
840         struct device *dev = host->hba->dev;
841         struct platform_device *pdev = to_platform_device(dev);
842         struct device_node *np = dev->of_node;
843
844         bus_pdata = msm_bus_cl_get_pdata(pdev);
845         if (!bus_pdata) {
846                 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
847                 err = -ENODATA;
848                 goto out;
849         }
850
851         err = of_property_count_strings(np, "qcom,bus-vector-names");
852         if (err < 0 || err != bus_pdata->num_usecases) {
853                 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
854                                 __func__, err);
855                 goto out;
856         }
857
858         host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
859         if (!host->bus_vote.client_handle) {
860                 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
861                                 __func__);
862                 err = -EFAULT;
863                 goto out;
864         }
865
866         /* cache the vote index for minimum and maximum bandwidth */
867         host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
868         host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
869
870         host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
871         host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
872         sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
873         host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
874         host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
875         err = device_create_file(dev, &host->bus_vote.max_bus_bw);
876 out:
877         return err;
878 }
879 #else /* CONFIG_MSM_BUS_SCALING */
880 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
881 {
882         return 0;
883 }
884
885 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
886 {
887         return 0;
888 }
889
890 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
891 {
892         return 0;
893 }
894 #endif /* CONFIG_MSM_BUS_SCALING */
895
896 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
897 {
898         if (host->dev_ref_clk_ctrl_mmio &&
899             (enable ^ host->is_dev_ref_clk_enabled)) {
900                 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
901
902                 if (enable)
903                         temp |= host->dev_ref_clk_en_mask;
904                 else
905                         temp &= ~host->dev_ref_clk_en_mask;
906
907                 /*
908                  * If we are here to disable this clock it might be immediately
909                  * after entering into hibern8 in which case we need to make
910                  * sure that device ref_clk is active at least 1us after the
911                  * hibern8 enter.
912                  */
913                 if (!enable)
914                         udelay(1);
915
916                 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
917
918                 /* ensure that ref_clk is enabled/disabled before we return */
919                 wmb();
920
921                 /*
922                  * If we call hibern8 exit after this, we need to make sure that
923                  * device ref_clk is stable for at least 1us before the hibern8
924                  * exit command.
925                  */
926                 if (enable)
927                         udelay(1);
928
929                 host->is_dev_ref_clk_enabled = enable;
930         }
931 }
932
933 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
934                                 enum ufs_notify_change_status status,
935                                 struct ufs_pa_layer_attr *dev_max_params,
936                                 struct ufs_pa_layer_attr *dev_req_params)
937 {
938         u32 val;
939         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
940         struct phy *phy = host->generic_phy;
941         struct ufs_qcom_dev_params ufs_qcom_cap;
942         int ret = 0;
943         int res = 0;
944
945         if (!dev_req_params) {
946                 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
947                 ret = -EINVAL;
948                 goto out;
949         }
950
951         switch (status) {
952         case PRE_CHANGE:
953                 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
954                 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
955                 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
956                 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
957                 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
958                 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
959                 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
960                 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
961                 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
962                 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
963                 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
964                 ufs_qcom_cap.desired_working_mode =
965                                         UFS_QCOM_LIMIT_DESIRED_MODE;
966
967                 if (host->hw_ver.major == 0x1) {
968                         /*
969                          * HS-G3 operations may not reliably work on legacy QCOM
970                          * UFS host controller hardware even though capability
971                          * exchange during link startup phase may end up
972                          * negotiating maximum supported gear as G3.
973                          * Hence downgrade the maximum supported gear to HS-G2.
974                          */
975                         if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
976                                 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
977                         if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
978                                 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
979                 }
980
981                 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
982                                                  dev_max_params,
983                                                  dev_req_params);
984                 if (ret) {
985                         pr_err("%s: failed to determine capabilities\n",
986                                         __func__);
987                         goto out;
988                 }
989
990                 /* enable the device ref clock before changing to HS mode */
991                 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
992                         ufshcd_is_hs_mode(dev_req_params))
993                         ufs_qcom_dev_ref_clk_ctrl(host, true);
994                 break;
995         case POST_CHANGE:
996                 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
997                                         dev_req_params->pwr_rx,
998                                         dev_req_params->hs_rate, false)) {
999                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1000                                 __func__);
1001                         /*
1002                          * we return error code at the end of the routine,
1003                          * but continue to configure UFS_PHY_TX_LANE_ENABLE
1004                          * and bus voting as usual
1005                          */
1006                         ret = -EINVAL;
1007                 }
1008
1009                 val = ~(MAX_U32 << dev_req_params->lane_tx);
1010                 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1011                 if (res) {
1012                         dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1013                                 __func__, res);
1014                         ret = res;
1015                 }
1016
1017                 /* cache the power mode parameters to use internally */
1018                 memcpy(&host->dev_req_params,
1019                                 dev_req_params, sizeof(*dev_req_params));
1020                 ufs_qcom_update_bus_bw_vote(host);
1021
1022                 /* disable the device ref clock if entered PWM mode */
1023                 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1024                         !ufshcd_is_hs_mode(dev_req_params))
1025                         ufs_qcom_dev_ref_clk_ctrl(host, false);
1026                 break;
1027         default:
1028                 ret = -EINVAL;
1029                 break;
1030         }
1031 out:
1032         return ret;
1033 }
1034
1035 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1036 {
1037         int err;
1038         u32 pa_vs_config_reg1;
1039
1040         err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1041                              &pa_vs_config_reg1);
1042         if (err)
1043                 goto out;
1044
1045         /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1046         err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1047                             (pa_vs_config_reg1 | (1 << 12)));
1048
1049 out:
1050         return err;
1051 }
1052
1053 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1054 {
1055         int err = 0;
1056
1057         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1058                 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1059
1060         return err;
1061 }
1062
1063 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1064 {
1065         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1066
1067         if (host->hw_ver.major == 0x1)
1068                 return UFSHCI_VERSION_11;
1069         else
1070                 return UFSHCI_VERSION_20;
1071 }
1072
1073 /**
1074  * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1075  * @hba: host controller instance
1076  *
1077  * QCOM UFS host controller might have some non standard behaviours (quirks)
1078  * than what is specified by UFSHCI specification. Advertise all such
1079  * quirks to standard UFS host controller driver so standard takes them into
1080  * account.
1081  */
1082 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1083 {
1084         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1085
1086         if (host->hw_ver.major == 0x01) {
1087                 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1088                             | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1089                             | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1090
1091                 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1092                         hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1093
1094                 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1095         }
1096
1097         if (host->hw_ver.major == 0x2) {
1098                 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1099
1100                 if (!ufs_qcom_cap_qunipro(host))
1101                         /* Legacy UniPro mode still need following quirks */
1102                         hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1103                                 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1104                                 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1105         }
1106 }
1107
1108 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1109 {
1110         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1111
1112         hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1113         hba->caps |= UFSHCD_CAP_CLK_SCALING;
1114         hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1115
1116         if (host->hw_ver.major >= 0x2) {
1117                 host->caps = UFS_QCOM_CAP_QUNIPRO |
1118                              UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1119         }
1120 }
1121
1122 /**
1123  * ufs_qcom_setup_clocks - enables/disable clocks
1124  * @hba: host controller instance
1125  * @on: If true, enable clocks else disable them.
1126  *
1127  * Returns 0 on success, non-zero on failure.
1128  */
1129 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1130 {
1131         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1132         int err;
1133         int vote = 0;
1134
1135         /*
1136          * In case ufs_qcom_init() is not yet done, simply ignore.
1137          * This ufs_qcom_setup_clocks() shall be called from
1138          * ufs_qcom_init() after init is done.
1139          */
1140         if (!host)
1141                 return 0;
1142
1143         if (on) {
1144                 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1145                 if (err)
1146                         goto out;
1147
1148                 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1149                 if (err) {
1150                         dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1151                                 __func__, err);
1152                         ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1153                         goto out;
1154                 }
1155                 /* enable the device ref clock for HS mode*/
1156                 if (ufshcd_is_hs_mode(&hba->pwr_info))
1157                         ufs_qcom_dev_ref_clk_ctrl(host, true);
1158                 vote = host->bus_vote.saved_vote;
1159                 if (vote == host->bus_vote.min_bw_vote)
1160                         ufs_qcom_update_bus_bw_vote(host);
1161
1162         } else {
1163
1164                 /* M-PHY RMMI interface clocks can be turned off */
1165                 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1166                 if (!ufs_qcom_is_link_active(hba))
1167                         /* disable device ref_clk */
1168                         ufs_qcom_dev_ref_clk_ctrl(host, false);
1169
1170                 vote = host->bus_vote.min_bw_vote;
1171         }
1172
1173         err = ufs_qcom_set_bus_vote(host, vote);
1174         if (err)
1175                 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1176                                 __func__, err);
1177
1178 out:
1179         return err;
1180 }
1181
1182 #define ANDROID_BOOT_DEV_MAX    30
1183 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1184
1185 #ifndef MODULE
1186 static int __init get_android_boot_dev(char *str)
1187 {
1188         strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1189         return 1;
1190 }
1191 __setup("androidboot.bootdevice=", get_android_boot_dev);
1192 #endif
1193
1194 /**
1195  * ufs_qcom_init - bind phy with controller
1196  * @hba: host controller instance
1197  *
1198  * Binds PHY with controller and powers up PHY enabling clocks
1199  * and regulators.
1200  *
1201  * Returns -EPROBE_DEFER if binding fails, returns negative error
1202  * on phy power up failure and returns zero on success.
1203  */
1204 static int ufs_qcom_init(struct ufs_hba *hba)
1205 {
1206         int err;
1207         struct device *dev = hba->dev;
1208         struct platform_device *pdev = to_platform_device(dev);
1209         struct ufs_qcom_host *host;
1210         struct resource *res;
1211
1212         if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1213                 return -ENODEV;
1214
1215         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1216         if (!host) {
1217                 err = -ENOMEM;
1218                 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1219                 goto out;
1220         }
1221
1222         /* Make a two way bind between the qcom host and the hba */
1223         host->hba = hba;
1224         ufshcd_set_variant(hba, host);
1225
1226         /*
1227          * voting/devoting device ref_clk source is time consuming hence
1228          * skip devoting it during aggressive clock gating. This clock
1229          * will still be gated off during runtime suspend.
1230          */
1231         host->generic_phy = devm_phy_get(dev, "ufsphy");
1232
1233         if (IS_ERR(host->generic_phy)) {
1234                 err = PTR_ERR(host->generic_phy);
1235                 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1236                 goto out;
1237         }
1238
1239         err = ufs_qcom_bus_register(host);
1240         if (err)
1241                 goto out_host_free;
1242
1243         ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1244                 &host->hw_ver.minor, &host->hw_ver.step);
1245
1246         /*
1247          * for newer controllers, device reference clock control bit has
1248          * moved inside UFS controller register address space itself.
1249          */
1250         if (host->hw_ver.major >= 0x02) {
1251                 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1252                 host->dev_ref_clk_en_mask = BIT(26);
1253         } else {
1254                 /* "dev_ref_clk_ctrl_mem" is optional resource */
1255                 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1256                 if (res) {
1257                         host->dev_ref_clk_ctrl_mmio =
1258                                         devm_ioremap_resource(dev, res);
1259                         if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1260                                 dev_warn(dev,
1261                                         "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1262                                         __func__,
1263                                         PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1264                                 host->dev_ref_clk_ctrl_mmio = NULL;
1265                         }
1266                         host->dev_ref_clk_en_mask = BIT(5);
1267                 }
1268         }
1269
1270         /* update phy revision information before calling phy_init() */
1271         ufs_qcom_phy_save_controller_version(host->generic_phy,
1272                 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1273
1274         phy_init(host->generic_phy);
1275         err = phy_power_on(host->generic_phy);
1276         if (err)
1277                 goto out_unregister_bus;
1278
1279         err = ufs_qcom_init_lane_clks(host);
1280         if (err)
1281                 goto out_disable_phy;
1282
1283         ufs_qcom_set_caps(hba);
1284         ufs_qcom_advertise_quirks(hba);
1285
1286         ufs_qcom_setup_clocks(hba, true);
1287
1288         if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1289                 ufs_qcom_hosts[hba->dev->id] = host;
1290
1291         host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1292         ufs_qcom_get_default_testbus_cfg(host);
1293         err = ufs_qcom_testbus_config(host);
1294         if (err) {
1295                 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1296                                 __func__, err);
1297                 err = 0;
1298         }
1299
1300         goto out;
1301
1302 out_disable_phy:
1303         phy_power_off(host->generic_phy);
1304 out_unregister_bus:
1305         phy_exit(host->generic_phy);
1306 out_host_free:
1307         devm_kfree(dev, host);
1308         ufshcd_set_variant(hba, NULL);
1309 out:
1310         return err;
1311 }
1312
1313 static void ufs_qcom_exit(struct ufs_hba *hba)
1314 {
1315         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1316
1317         ufs_qcom_disable_lane_clks(host);
1318         phy_power_off(host->generic_phy);
1319 }
1320
1321 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1322                                                        u32 clk_cycles)
1323 {
1324         int err;
1325         u32 core_clk_ctrl_reg;
1326
1327         if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1328                 return -EINVAL;
1329
1330         err = ufshcd_dme_get(hba,
1331                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1332                             &core_clk_ctrl_reg);
1333         if (err)
1334                 goto out;
1335
1336         core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1337         core_clk_ctrl_reg |= clk_cycles;
1338
1339         /* Clear CORE_CLK_DIV_EN */
1340         core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1341
1342         err = ufshcd_dme_set(hba,
1343                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1344                             core_clk_ctrl_reg);
1345 out:
1346         return err;
1347 }
1348
1349 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1350 {
1351         /* nothing to do as of now */
1352         return 0;
1353 }
1354
1355 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1356 {
1357         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1358
1359         if (!ufs_qcom_cap_qunipro(host))
1360                 return 0;
1361
1362         /* set unipro core clock cycles to 150 and clear clock divider */
1363         return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1364 }
1365
1366 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1367 {
1368         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1369         int err;
1370         u32 core_clk_ctrl_reg;
1371
1372         if (!ufs_qcom_cap_qunipro(host))
1373                 return 0;
1374
1375         err = ufshcd_dme_get(hba,
1376                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1377                             &core_clk_ctrl_reg);
1378
1379         /* make sure CORE_CLK_DIV_EN is cleared */
1380         if (!err &&
1381             (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1382                 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1383                 err = ufshcd_dme_set(hba,
1384                                     UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1385                                     core_clk_ctrl_reg);
1386         }
1387
1388         return err;
1389 }
1390
1391 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1392 {
1393         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1394
1395         if (!ufs_qcom_cap_qunipro(host))
1396                 return 0;
1397
1398         /* set unipro core clock cycles to 75 and clear clock divider */
1399         return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1400 }
1401
1402 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1403                 bool scale_up, enum ufs_notify_change_status status)
1404 {
1405         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1406         struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1407         int err = 0;
1408
1409         if (status == PRE_CHANGE) {
1410                 if (scale_up)
1411                         err = ufs_qcom_clk_scale_up_pre_change(hba);
1412                 else
1413                         err = ufs_qcom_clk_scale_down_pre_change(hba);
1414         } else {
1415                 if (scale_up)
1416                         err = ufs_qcom_clk_scale_up_post_change(hba);
1417                 else
1418                         err = ufs_qcom_clk_scale_down_post_change(hba);
1419
1420                 if (err || !dev_req_params)
1421                         goto out;
1422
1423                 ufs_qcom_cfg_timers(hba,
1424                                     dev_req_params->gear_rx,
1425                                     dev_req_params->pwr_rx,
1426                                     dev_req_params->hs_rate,
1427                                     false);
1428                 ufs_qcom_update_bus_bw_vote(host);
1429         }
1430
1431 out:
1432         return err;
1433 }
1434
1435 static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1436                 void *priv, void (*print_fn)(struct ufs_hba *hba,
1437                 int offset, int num_regs, char *str, void *priv))
1438 {
1439         u32 reg;
1440         struct ufs_qcom_host *host;
1441
1442         if (unlikely(!hba)) {
1443                 pr_err("%s: hba is NULL\n", __func__);
1444                 return;
1445         }
1446         if (unlikely(!print_fn)) {
1447                 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1448                 return;
1449         }
1450
1451         host = ufshcd_get_variant(hba);
1452         if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1453                 return;
1454
1455         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1456         print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1457
1458         reg = ufshcd_readl(hba, REG_UFS_CFG1);
1459         reg |= UFS_BIT(17);
1460         ufshcd_writel(hba, reg, REG_UFS_CFG1);
1461
1462         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1463         print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1464
1465         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1466         print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1467
1468         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1469         print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1470
1471         ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
1472
1473         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1474         print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1475
1476         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1477         print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1478
1479         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1480         print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1481
1482         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1483         print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1484
1485         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1486         print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1487
1488         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1489         print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1490
1491         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1492         print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1493 }
1494
1495 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1496 {
1497         if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
1498                 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1499         else
1500                 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1501 }
1502
1503 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1504 {
1505         /* provide a legal default configuration */
1506         host->testbus.select_major = TSTBUS_UAWM;
1507         host->testbus.select_minor = 1;
1508 }
1509
1510 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1511 {
1512         if (host->testbus.select_major >= TSTBUS_MAX) {
1513                 dev_err(host->hba->dev,
1514                         "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1515                         __func__, host->testbus.select_major);
1516                 return false;
1517         }
1518
1519         /*
1520          * Not performing check for each individual select_major
1521          * mappings of select_minor, since there is no harm in
1522          * configuring a non-existent select_minor
1523          */
1524         if (host->testbus.select_minor > 0x1F) {
1525                 dev_err(host->hba->dev,
1526                         "%s: 0x%05X is not a legal testbus option\n",
1527                         __func__, host->testbus.select_minor);
1528                 return false;
1529         }
1530
1531         return true;
1532 }
1533
1534 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1535 {
1536         int reg;
1537         int offset;
1538         u32 mask = TEST_BUS_SUB_SEL_MASK;
1539
1540         if (!host)
1541                 return -EINVAL;
1542
1543         if (!ufs_qcom_testbus_cfg_is_ok(host))
1544                 return -EPERM;
1545
1546         switch (host->testbus.select_major) {
1547         case TSTBUS_UAWM:
1548                 reg = UFS_TEST_BUS_CTRL_0;
1549                 offset = 24;
1550                 break;
1551         case TSTBUS_UARM:
1552                 reg = UFS_TEST_BUS_CTRL_0;
1553                 offset = 16;
1554                 break;
1555         case TSTBUS_TXUC:
1556                 reg = UFS_TEST_BUS_CTRL_0;
1557                 offset = 8;
1558                 break;
1559         case TSTBUS_RXUC:
1560                 reg = UFS_TEST_BUS_CTRL_0;
1561                 offset = 0;
1562                 break;
1563         case TSTBUS_DFC:
1564                 reg = UFS_TEST_BUS_CTRL_1;
1565                 offset = 24;
1566                 break;
1567         case TSTBUS_TRLUT:
1568                 reg = UFS_TEST_BUS_CTRL_1;
1569                 offset = 16;
1570                 break;
1571         case TSTBUS_TMRLUT:
1572                 reg = UFS_TEST_BUS_CTRL_1;
1573                 offset = 8;
1574                 break;
1575         case TSTBUS_OCSC:
1576                 reg = UFS_TEST_BUS_CTRL_1;
1577                 offset = 0;
1578                 break;
1579         case TSTBUS_WRAPPER:
1580                 reg = UFS_TEST_BUS_CTRL_2;
1581                 offset = 16;
1582                 break;
1583         case TSTBUS_COMBINED:
1584                 reg = UFS_TEST_BUS_CTRL_2;
1585                 offset = 8;
1586                 break;
1587         case TSTBUS_UTP_HCI:
1588                 reg = UFS_TEST_BUS_CTRL_2;
1589                 offset = 0;
1590                 break;
1591         case TSTBUS_UNIPRO:
1592                 reg = UFS_UNIPRO_CFG;
1593                 offset = 1;
1594                 break;
1595         /*
1596          * No need for a default case, since
1597          * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1598          * is legal
1599          */
1600         }
1601         mask <<= offset;
1602
1603         pm_runtime_get_sync(host->hba->dev);
1604         ufshcd_hold(host->hba, false);
1605         ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1606                     (u32)host->testbus.select_major << 19,
1607                     REG_UFS_CFG1);
1608         ufshcd_rmwl(host->hba, mask,
1609                     (u32)host->testbus.select_minor << offset,
1610                     reg);
1611         ufs_qcom_enable_test_bus(host);
1612         ufshcd_release(host->hba);
1613         pm_runtime_put_sync(host->hba->dev);
1614
1615         return 0;
1616 }
1617
1618 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1619 {
1620         ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
1621 }
1622
1623 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1624 {
1625         ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1626                         "HCI Vendor Specific Registers ");
1627
1628         ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1629         ufs_qcom_testbus_read(hba);
1630 }
1631
1632 /**
1633  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1634  *
1635  * The variant operations configure the necessary controller and PHY
1636  * handshake during initialization.
1637  */
1638 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1639         .name                   = "qcom",
1640         .init                   = ufs_qcom_init,
1641         .exit                   = ufs_qcom_exit,
1642         .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1643         .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1644         .setup_clocks           = ufs_qcom_setup_clocks,
1645         .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1646         .link_startup_notify    = ufs_qcom_link_startup_notify,
1647         .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1648         .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1649         .suspend                = ufs_qcom_suspend,
1650         .resume                 = ufs_qcom_resume,
1651         .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1652 };
1653
1654 /**
1655  * ufs_qcom_probe - probe routine of the driver
1656  * @pdev: pointer to Platform device handle
1657  *
1658  * Return zero for success and non-zero for failure
1659  */
1660 static int ufs_qcom_probe(struct platform_device *pdev)
1661 {
1662         int err;
1663         struct device *dev = &pdev->dev;
1664
1665         /* Perform generic probe */
1666         err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1667         if (err)
1668                 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1669
1670         return err;
1671 }
1672
1673 /**
1674  * ufs_qcom_remove - set driver_data of the device to NULL
1675  * @pdev: pointer to platform device handle
1676  *
1677  * Always returns 0
1678  */
1679 static int ufs_qcom_remove(struct platform_device *pdev)
1680 {
1681         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1682
1683         pm_runtime_get_sync(&(pdev)->dev);
1684         ufshcd_remove(hba);
1685         return 0;
1686 }
1687
1688 static const struct of_device_id ufs_qcom_of_match[] = {
1689         { .compatible = "qcom,ufshc"},
1690         {},
1691 };
1692 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1693
1694 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1695         .suspend        = ufshcd_pltfrm_suspend,
1696         .resume         = ufshcd_pltfrm_resume,
1697         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1698         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1699         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1700 };
1701
1702 static struct platform_driver ufs_qcom_pltform = {
1703         .probe  = ufs_qcom_probe,
1704         .remove = ufs_qcom_remove,
1705         .shutdown = ufshcd_pltfrm_shutdown,
1706         .driver = {
1707                 .name   = "ufshcd-qcom",
1708                 .pm     = &ufs_qcom_pm_ops,
1709                 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1710         },
1711 };
1712 module_platform_driver(ufs_qcom_pltform);
1713
1714 MODULE_LICENSE("GPL v2");