GNU Linux-libre 4.4.289-gnu1
[releases.git] / drivers / phy / phy-qcom-ufs.c
1 /*
2  * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #include "phy-qcom-ufs-i.h"
16
17 #define MAX_PROP_NAME              32
18 #define VDDA_PHY_MIN_UV            1000000
19 #define VDDA_PHY_MAX_UV            1000000
20 #define VDDA_PLL_MIN_UV            1800000
21 #define VDDA_PLL_MAX_UV            1800000
22 #define VDDP_REF_CLK_MIN_UV        1200000
23 #define VDDP_REF_CLK_MAX_UV        1200000
24
25 static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
26                                     const char *, bool);
27 static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
28                                   const char *);
29 static int ufs_qcom_phy_base_init(struct platform_device *pdev,
30                                   struct ufs_qcom_phy *phy_common);
31
32 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
33                            struct ufs_qcom_phy_calibration *tbl_A,
34                            int tbl_size_A,
35                            struct ufs_qcom_phy_calibration *tbl_B,
36                            int tbl_size_B, bool is_rate_B)
37 {
38         int i;
39         int ret = 0;
40
41         if (!tbl_A) {
42                 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
43                 ret = EINVAL;
44                 goto out;
45         }
46
47         for (i = 0; i < tbl_size_A; i++)
48                 writel_relaxed(tbl_A[i].cfg_value,
49                                ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
50
51         /*
52          * In case we would like to work in rate B, we need
53          * to override a registers that were configured in rate A table
54          * with registers of rate B table.
55          * table.
56          */
57         if (is_rate_B) {
58                 if (!tbl_B) {
59                         dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
60                                 __func__);
61                         ret = EINVAL;
62                         goto out;
63                 }
64
65                 for (i = 0; i < tbl_size_B; i++)
66                         writel_relaxed(tbl_B[i].cfg_value,
67                                 ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
68         }
69
70         /* flush buffered writes */
71         mb();
72
73 out:
74         return ret;
75 }
76 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
77
78 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
79                                 struct ufs_qcom_phy *common_cfg,
80                                 const struct phy_ops *ufs_qcom_phy_gen_ops,
81                                 struct ufs_qcom_phy_specific_ops *phy_spec_ops)
82 {
83         int err;
84         struct device *dev = &pdev->dev;
85         struct phy *generic_phy = NULL;
86         struct phy_provider *phy_provider;
87
88         err = ufs_qcom_phy_base_init(pdev, common_cfg);
89         if (err) {
90                 dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
91                 goto out;
92         }
93
94         phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
95         if (IS_ERR(phy_provider)) {
96                 err = PTR_ERR(phy_provider);
97                 dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
98                 goto out;
99         }
100
101         generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
102         if (IS_ERR(generic_phy)) {
103                 err =  PTR_ERR(generic_phy);
104                 dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
105                 generic_phy = NULL;
106                 goto out;
107         }
108
109         common_cfg->phy_spec_ops = phy_spec_ops;
110         common_cfg->dev = dev;
111
112 out:
113         return generic_phy;
114 }
115 EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
116
117 /*
118  * This assumes the embedded phy structure inside generic_phy is of type
119  * struct ufs_qcom_phy. In order to function properly it's crucial
120  * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
121  * as the first inside generic_phy.
122  */
123 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
124 {
125         return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
126 }
127 EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
128
129 static
130 int ufs_qcom_phy_base_init(struct platform_device *pdev,
131                            struct ufs_qcom_phy *phy_common)
132 {
133         struct device *dev = &pdev->dev;
134         struct resource *res;
135         int err = 0;
136
137         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
138         phy_common->mmio = devm_ioremap_resource(dev, res);
139         if (IS_ERR((void const *)phy_common->mmio)) {
140                 err = PTR_ERR((void const *)phy_common->mmio);
141                 phy_common->mmio = NULL;
142                 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
143                         __func__, err);
144                 return err;
145         }
146
147         /* "dev_ref_clk_ctrl_mem" is optional resource */
148         res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
149                                            "dev_ref_clk_ctrl_mem");
150         phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
151         if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio))
152                 phy_common->dev_ref_clk_ctrl_mmio = NULL;
153
154         return 0;
155 }
156
157 static int __ufs_qcom_phy_clk_get(struct phy *phy,
158                          const char *name, struct clk **clk_out, bool err_print)
159 {
160         struct clk *clk;
161         int err = 0;
162         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
163         struct device *dev = ufs_qcom_phy->dev;
164
165         clk = devm_clk_get(dev, name);
166         if (IS_ERR(clk)) {
167                 err = PTR_ERR(clk);
168                 if (err_print)
169                         dev_err(dev, "failed to get %s err %d", name, err);
170         } else {
171                 *clk_out = clk;
172         }
173
174         return err;
175 }
176
177 static
178 int ufs_qcom_phy_clk_get(struct phy *phy,
179                          const char *name, struct clk **clk_out)
180 {
181         return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
182 }
183
184 int
185 ufs_qcom_phy_init_clks(struct phy *generic_phy,
186                        struct ufs_qcom_phy *phy_common)
187 {
188         int err;
189
190         err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
191                                    &phy_common->tx_iface_clk);
192         if (err)
193                 goto out;
194
195         err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
196                                    &phy_common->rx_iface_clk);
197         if (err)
198                 goto out;
199
200         err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
201                                    &phy_common->ref_clk_src);
202         if (err)
203                 goto out;
204
205         /*
206          * "ref_clk_parent" is optional hence don't abort init if it's not
207          * found.
208          */
209         __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
210                                    &phy_common->ref_clk_parent, false);
211
212         err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
213                                    &phy_common->ref_clk);
214
215 out:
216         return err;
217 }
218 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
219
220 int
221 ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
222                               struct ufs_qcom_phy *phy_common)
223 {
224         int err;
225
226         err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
227                 "vdda-pll");
228         if (err)
229                 goto out;
230
231         err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
232                 "vdda-phy");
233
234         if (err)
235                 goto out;
236
237         /* vddp-ref-clk-* properties are optional */
238         __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
239                                  "vddp-ref-clk", true);
240 out:
241         return err;
242 }
243 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
244
245 static int __ufs_qcom_phy_init_vreg(struct phy *phy,
246                 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
247 {
248         int err = 0;
249         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
250         struct device *dev = ufs_qcom_phy->dev;
251
252         char prop_name[MAX_PROP_NAME];
253
254         vreg->name = kstrdup(name, GFP_KERNEL);
255         if (!vreg->name) {
256                 err = -ENOMEM;
257                 goto out;
258         }
259
260         vreg->reg = devm_regulator_get(dev, name);
261         if (IS_ERR(vreg->reg)) {
262                 err = PTR_ERR(vreg->reg);
263                 vreg->reg = NULL;
264                 if (!optional)
265                         dev_err(dev, "failed to get %s, %d\n", name, err);
266                 goto out;
267         }
268
269         if (dev->of_node) {
270                 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
271                 err = of_property_read_u32(dev->of_node,
272                                         prop_name, &vreg->max_uA);
273                 if (err && err != -EINVAL) {
274                         dev_err(dev, "%s: failed to read %s\n",
275                                         __func__, prop_name);
276                         goto out;
277                 } else if (err == -EINVAL || !vreg->max_uA) {
278                         if (regulator_count_voltages(vreg->reg) > 0) {
279                                 dev_err(dev, "%s: %s is mandatory\n",
280                                                 __func__, prop_name);
281                                 goto out;
282                         }
283                         err = 0;
284                 }
285                 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
286                 if (of_get_property(dev->of_node, prop_name, NULL))
287                         vreg->is_always_on = true;
288                 else
289                         vreg->is_always_on = false;
290         }
291
292         if (!strcmp(name, "vdda-pll")) {
293                 vreg->max_uV = VDDA_PLL_MAX_UV;
294                 vreg->min_uV = VDDA_PLL_MIN_UV;
295         } else if (!strcmp(name, "vdda-phy")) {
296                 vreg->max_uV = VDDA_PHY_MAX_UV;
297                 vreg->min_uV = VDDA_PHY_MIN_UV;
298         } else if (!strcmp(name, "vddp-ref-clk")) {
299                 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
300                 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
301         }
302
303 out:
304         if (err)
305                 kfree(vreg->name);
306         return err;
307 }
308
309 static int ufs_qcom_phy_init_vreg(struct phy *phy,
310                         struct ufs_qcom_phy_vreg *vreg, const char *name)
311 {
312         return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
313 }
314
315 static
316 int ufs_qcom_phy_cfg_vreg(struct phy *phy,
317                           struct ufs_qcom_phy_vreg *vreg, bool on)
318 {
319         int ret = 0;
320         struct regulator *reg = vreg->reg;
321         const char *name = vreg->name;
322         int min_uV;
323         int uA_load;
324         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
325         struct device *dev = ufs_qcom_phy->dev;
326
327         BUG_ON(!vreg);
328
329         if (regulator_count_voltages(reg) > 0) {
330                 min_uV = on ? vreg->min_uV : 0;
331                 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
332                 if (ret) {
333                         dev_err(dev, "%s: %s set voltage failed, err=%d\n",
334                                         __func__, name, ret);
335                         goto out;
336                 }
337                 uA_load = on ? vreg->max_uA : 0;
338                 ret = regulator_set_load(reg, uA_load);
339                 if (ret >= 0) {
340                         /*
341                          * regulator_set_load() returns new regulator
342                          * mode upon success.
343                          */
344                         ret = 0;
345                 } else {
346                         dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
347                                         __func__, name, uA_load, ret);
348                         goto out;
349                 }
350         }
351 out:
352         return ret;
353 }
354
355 static
356 int ufs_qcom_phy_enable_vreg(struct phy *phy,
357                              struct ufs_qcom_phy_vreg *vreg)
358 {
359         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
360         struct device *dev = ufs_qcom_phy->dev;
361         int ret = 0;
362
363         if (!vreg || vreg->enabled)
364                 goto out;
365
366         ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
367         if (ret) {
368                 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
369                         __func__, ret);
370                 goto out;
371         }
372
373         ret = regulator_enable(vreg->reg);
374         if (ret) {
375                 dev_err(dev, "%s: enable failed, err=%d\n",
376                                 __func__, ret);
377                 goto out;
378         }
379
380         vreg->enabled = true;
381 out:
382         return ret;
383 }
384
385 int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
386 {
387         int ret = 0;
388         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
389
390         if (phy->is_ref_clk_enabled)
391                 goto out;
392
393         /*
394          * reference clock is propagated in a daisy-chained manner from
395          * source to phy, so ungate them at each stage.
396          */
397         ret = clk_prepare_enable(phy->ref_clk_src);
398         if (ret) {
399                 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
400                                 __func__, ret);
401                 goto out;
402         }
403
404         /*
405          * "ref_clk_parent" is optional clock hence make sure that clk reference
406          * is available before trying to enable the clock.
407          */
408         if (phy->ref_clk_parent) {
409                 ret = clk_prepare_enable(phy->ref_clk_parent);
410                 if (ret) {
411                         dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
412                                         __func__, ret);
413                         goto out_disable_src;
414                 }
415         }
416
417         ret = clk_prepare_enable(phy->ref_clk);
418         if (ret) {
419                 dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
420                                 __func__, ret);
421                 goto out_disable_parent;
422         }
423
424         phy->is_ref_clk_enabled = true;
425         goto out;
426
427 out_disable_parent:
428         if (phy->ref_clk_parent)
429                 clk_disable_unprepare(phy->ref_clk_parent);
430 out_disable_src:
431         clk_disable_unprepare(phy->ref_clk_src);
432 out:
433         return ret;
434 }
435 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
436
437 static
438 int ufs_qcom_phy_disable_vreg(struct phy *phy,
439                               struct ufs_qcom_phy_vreg *vreg)
440 {
441         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
442         struct device *dev = ufs_qcom_phy->dev;
443         int ret = 0;
444
445         if (!vreg || !vreg->enabled || vreg->is_always_on)
446                 goto out;
447
448         ret = regulator_disable(vreg->reg);
449
450         if (!ret) {
451                 /* ignore errors on applying disable config */
452                 ufs_qcom_phy_cfg_vreg(phy, vreg, false);
453                 vreg->enabled = false;
454         } else {
455                 dev_err(dev, "%s: %s disable failed, err=%d\n",
456                                 __func__, vreg->name, ret);
457         }
458 out:
459         return ret;
460 }
461
462 void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
463 {
464         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
465
466         if (phy->is_ref_clk_enabled) {
467                 clk_disable_unprepare(phy->ref_clk);
468                 /*
469                  * "ref_clk_parent" is optional clock hence make sure that clk
470                  * reference is available before trying to disable the clock.
471                  */
472                 if (phy->ref_clk_parent)
473                         clk_disable_unprepare(phy->ref_clk_parent);
474                 clk_disable_unprepare(phy->ref_clk_src);
475                 phy->is_ref_clk_enabled = false;
476         }
477 }
478 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
479
480 #define UFS_REF_CLK_EN  (1 << 5)
481
482 static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
483 {
484         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
485
486         if (phy->dev_ref_clk_ctrl_mmio &&
487             (enable ^ phy->is_dev_ref_clk_enabled)) {
488                 u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
489
490                 if (enable)
491                         temp |= UFS_REF_CLK_EN;
492                 else
493                         temp &= ~UFS_REF_CLK_EN;
494
495                 /*
496                  * If we are here to disable this clock immediately after
497                  * entering into hibern8, we need to make sure that device
498                  * ref_clk is active atleast 1us after the hibern8 enter.
499                  */
500                 if (!enable)
501                         udelay(1);
502
503                 writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
504                 /* ensure that ref_clk is enabled/disabled before we return */
505                 wmb();
506                 /*
507                  * If we call hibern8 exit after this, we need to make sure that
508                  * device ref_clk is stable for atleast 1us before the hibern8
509                  * exit command.
510                  */
511                 if (enable)
512                         udelay(1);
513
514                 phy->is_dev_ref_clk_enabled = enable;
515         }
516 }
517
518 void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
519 {
520         ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
521 }
522 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
523
524 void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
525 {
526         ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
527 }
528 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
529
530 /* Turn ON M-PHY RMMI interface clocks */
531 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
532 {
533         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
534         int ret = 0;
535
536         if (phy->is_iface_clk_enabled)
537                 goto out;
538
539         ret = clk_prepare_enable(phy->tx_iface_clk);
540         if (ret) {
541                 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
542                                 __func__, ret);
543                 goto out;
544         }
545         ret = clk_prepare_enable(phy->rx_iface_clk);
546         if (ret) {
547                 clk_disable_unprepare(phy->tx_iface_clk);
548                 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
549                                 __func__, ret);
550                 goto out;
551         }
552         phy->is_iface_clk_enabled = true;
553
554 out:
555         return ret;
556 }
557 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
558
559 /* Turn OFF M-PHY RMMI interface clocks */
560 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
561 {
562         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
563
564         if (phy->is_iface_clk_enabled) {
565                 clk_disable_unprepare(phy->tx_iface_clk);
566                 clk_disable_unprepare(phy->rx_iface_clk);
567                 phy->is_iface_clk_enabled = false;
568         }
569 }
570 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
571
572 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
573 {
574         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
575         int ret = 0;
576
577         if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
578                 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
579                         __func__);
580                 ret = -ENOTSUPP;
581         } else {
582                 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
583         }
584
585         return ret;
586 }
587 EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
588
589 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
590 {
591         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
592         int ret = 0;
593
594         if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
595                 dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
596                         __func__);
597                 ret = -ENOTSUPP;
598         } else {
599                 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
600                                                                tx_lanes);
601         }
602
603         return ret;
604 }
605 EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
606
607 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
608                                           u8 major, u16 minor, u16 step)
609 {
610         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
611
612         ufs_qcom_phy->host_ctrl_rev_major = major;
613         ufs_qcom_phy->host_ctrl_rev_minor = minor;
614         ufs_qcom_phy->host_ctrl_rev_step = step;
615 }
616 EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
617
618 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
619 {
620         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
621         int ret = 0;
622
623         if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
624                 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
625                         __func__);
626                 ret = -ENOTSUPP;
627         } else {
628                 ret = ufs_qcom_phy->phy_spec_ops->
629                                 calibrate_phy(ufs_qcom_phy, is_rate_B);
630                 if (ret)
631                         dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
632                                 __func__, ret);
633         }
634
635         return ret;
636 }
637 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
638
639 int ufs_qcom_phy_remove(struct phy *generic_phy,
640                         struct ufs_qcom_phy *ufs_qcom_phy)
641 {
642         phy_power_off(generic_phy);
643
644         kfree(ufs_qcom_phy->vdda_pll.name);
645         kfree(ufs_qcom_phy->vdda_phy.name);
646
647         return 0;
648 }
649 EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
650
651 int ufs_qcom_phy_exit(struct phy *generic_phy)
652 {
653         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
654
655         if (ufs_qcom_phy->is_powered_on)
656                 phy_power_off(generic_phy);
657
658         return 0;
659 }
660 EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
661
662 int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
663 {
664         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
665
666         if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
667                 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
668                         __func__);
669                 return -ENOTSUPP;
670         }
671
672         return ufs_qcom_phy->phy_spec_ops->
673                         is_physical_coding_sublayer_ready(ufs_qcom_phy);
674 }
675 EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
676
677 int ufs_qcom_phy_power_on(struct phy *generic_phy)
678 {
679         struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
680         struct device *dev = phy_common->dev;
681         int err;
682
683         err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
684         if (err) {
685                 dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
686                         __func__, err);
687                 goto out;
688         }
689
690         phy_common->phy_spec_ops->power_control(phy_common, true);
691
692         /* vdda_pll also enables ref clock LDOs so enable it first */
693         err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
694         if (err) {
695                 dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
696                         __func__, err);
697                 goto out_disable_phy;
698         }
699
700         err = ufs_qcom_phy_enable_ref_clk(generic_phy);
701         if (err) {
702                 dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
703                         __func__, err);
704                 goto out_disable_pll;
705         }
706
707         /* enable device PHY ref_clk pad rail */
708         if (phy_common->vddp_ref_clk.reg) {
709                 err = ufs_qcom_phy_enable_vreg(generic_phy,
710                                                &phy_common->vddp_ref_clk);
711                 if (err) {
712                         dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
713                                 __func__, err);
714                         goto out_disable_ref_clk;
715                 }
716         }
717
718         phy_common->is_powered_on = true;
719         goto out;
720
721 out_disable_ref_clk:
722         ufs_qcom_phy_disable_ref_clk(generic_phy);
723 out_disable_pll:
724         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
725 out_disable_phy:
726         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
727 out:
728         return err;
729 }
730 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
731
732 int ufs_qcom_phy_power_off(struct phy *generic_phy)
733 {
734         struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
735
736         phy_common->phy_spec_ops->power_control(phy_common, false);
737
738         if (phy_common->vddp_ref_clk.reg)
739                 ufs_qcom_phy_disable_vreg(generic_phy,
740                                           &phy_common->vddp_ref_clk);
741         ufs_qcom_phy_disable_ref_clk(generic_phy);
742
743         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
744         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
745         phy_common->is_powered_on = false;
746
747         return 0;
748 }
749 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);