GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / ufs / host / ufshcd-pci.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller PCI glue driver
4  *
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <ufs/ufshcd.h>
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/pm_qos.h>
18 #include <linux/debugfs.h>
19 #include <linux/uuid.h>
20 #include <linux/acpi.h>
21 #include <linux/gpio/consumer.h>
22
23 struct ufs_host {
24         void (*late_init)(struct ufs_hba *hba);
25 };
26
27 enum intel_ufs_dsm_func_id {
28         INTEL_DSM_FNS           =  0,
29         INTEL_DSM_RESET         =  1,
30 };
31
32 struct intel_host {
33         struct ufs_host ufs_host;
34         u32             dsm_fns;
35         u32             active_ltr;
36         u32             idle_ltr;
37         struct dentry   *debugfs_root;
38         struct gpio_desc *reset_gpio;
39 };
40
41 static const guid_t intel_dsm_guid =
42         GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
43                   0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
44
45 static bool __intel_dsm_supported(struct intel_host *host,
46                                   enum intel_ufs_dsm_func_id fn)
47 {
48         return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
49 }
50
51 #define INTEL_DSM_SUPPORTED(host, name) \
52         __intel_dsm_supported(host, INTEL_DSM_##name)
53
54 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
55                        unsigned int fn, u32 *result)
56 {
57         union acpi_object *obj;
58         int err = 0;
59         size_t len;
60
61         obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
62                                       ACPI_TYPE_BUFFER);
63         if (!obj)
64                 return -EOPNOTSUPP;
65
66         if (obj->buffer.length < 1) {
67                 err = -EINVAL;
68                 goto out;
69         }
70
71         len = min_t(size_t, obj->buffer.length, 4);
72
73         *result = 0;
74         memcpy(result, obj->buffer.pointer, len);
75 out:
76         ACPI_FREE(obj);
77
78         return err;
79 }
80
81 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
82                      unsigned int fn, u32 *result)
83 {
84         if (!__intel_dsm_supported(intel_host, fn))
85                 return -EOPNOTSUPP;
86
87         return __intel_dsm(intel_host, dev, fn, result);
88 }
89
90 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
91 {
92         int err;
93
94         err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
95         dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
96 }
97
98 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
99                                        enum ufs_notify_change_status status)
100 {
101         /* Cannot enable ICE until after HC enable */
102         if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
103                 u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
104
105                 hce |= CRYPTO_GENERAL_ENABLE;
106                 ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
107         }
108
109         return 0;
110 }
111
112 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
113 {
114         u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
115         u32 lcc_enable = 0;
116
117         ufshcd_dme_get(hba, attr, &lcc_enable);
118         if (lcc_enable)
119                 ufshcd_disable_host_tx_lcc(hba);
120
121         return 0;
122 }
123
124 static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
125                                          enum ufs_notify_change_status status)
126 {
127         int err = 0;
128
129         switch (status) {
130         case PRE_CHANGE:
131                 err = ufs_intel_disable_lcc(hba);
132                 break;
133         case POST_CHANGE:
134                 break;
135         default:
136                 break;
137         }
138
139         return err;
140 }
141
142 static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
143 {
144         struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
145         int ret;
146
147         pwr_info.lane_rx = lanes;
148         pwr_info.lane_tx = lanes;
149         ret = ufshcd_config_pwr_mode(hba, &pwr_info);
150         if (ret)
151                 dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
152                         __func__, lanes, ret);
153         return ret;
154 }
155
156 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
157                                 enum ufs_notify_change_status status,
158                                 struct ufs_pa_layer_attr *dev_max_params,
159                                 struct ufs_pa_layer_attr *dev_req_params)
160 {
161         int err = 0;
162
163         switch (status) {
164         case PRE_CHANGE:
165                 if (ufshcd_is_hs_mode(dev_max_params) &&
166                     (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
167                         ufs_intel_set_lanes(hba, 2);
168                 memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
169                 break;
170         case POST_CHANGE:
171                 if (ufshcd_is_hs_mode(dev_req_params)) {
172                         u32 peer_granularity;
173
174                         usleep_range(1000, 1250);
175                         err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
176                                                   &peer_granularity);
177                 }
178                 break;
179         default:
180                 break;
181         }
182
183         return err;
184 }
185
186 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
187 {
188         u32 granularity, peer_granularity;
189         u32 pa_tactivate, peer_pa_tactivate;
190         int ret;
191
192         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
193         if (ret)
194                 goto out;
195
196         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
197         if (ret)
198                 goto out;
199
200         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
201         if (ret)
202                 goto out;
203
204         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
205         if (ret)
206                 goto out;
207
208         if (granularity == peer_granularity) {
209                 u32 new_peer_pa_tactivate = pa_tactivate + 2;
210
211                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
212         }
213 out:
214         return ret;
215 }
216
217 #define INTEL_ACTIVELTR         0x804
218 #define INTEL_IDLELTR           0x808
219
220 #define INTEL_LTR_REQ           BIT(15)
221 #define INTEL_LTR_SCALE_MASK    GENMASK(11, 10)
222 #define INTEL_LTR_SCALE_1US     (2 << 10)
223 #define INTEL_LTR_SCALE_32US    (3 << 10)
224 #define INTEL_LTR_VALUE_MASK    GENMASK(9, 0)
225
226 static void intel_cache_ltr(struct ufs_hba *hba)
227 {
228         struct intel_host *host = ufshcd_get_variant(hba);
229
230         host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
231         host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
232 }
233
234 static void intel_ltr_set(struct device *dev, s32 val)
235 {
236         struct ufs_hba *hba = dev_get_drvdata(dev);
237         struct intel_host *host = ufshcd_get_variant(hba);
238         u32 ltr;
239
240         pm_runtime_get_sync(dev);
241
242         /*
243          * Program latency tolerance (LTR) accordingly what has been asked
244          * by the PM QoS layer or disable it in case we were passed
245          * negative value or PM_QOS_LATENCY_ANY.
246          */
247         ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
248
249         if (val == PM_QOS_LATENCY_ANY || val < 0) {
250                 ltr &= ~INTEL_LTR_REQ;
251         } else {
252                 ltr |= INTEL_LTR_REQ;
253                 ltr &= ~INTEL_LTR_SCALE_MASK;
254                 ltr &= ~INTEL_LTR_VALUE_MASK;
255
256                 if (val > INTEL_LTR_VALUE_MASK) {
257                         val >>= 5;
258                         if (val > INTEL_LTR_VALUE_MASK)
259                                 val = INTEL_LTR_VALUE_MASK;
260                         ltr |= INTEL_LTR_SCALE_32US | val;
261                 } else {
262                         ltr |= INTEL_LTR_SCALE_1US | val;
263                 }
264         }
265
266         if (ltr == host->active_ltr)
267                 goto out;
268
269         writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
270         writel(ltr, hba->mmio_base + INTEL_IDLELTR);
271
272         /* Cache the values into intel_host structure */
273         intel_cache_ltr(hba);
274 out:
275         pm_runtime_put(dev);
276 }
277
278 static void intel_ltr_expose(struct device *dev)
279 {
280         dev->power.set_latency_tolerance = intel_ltr_set;
281         dev_pm_qos_expose_latency_tolerance(dev);
282 }
283
284 static void intel_ltr_hide(struct device *dev)
285 {
286         dev_pm_qos_hide_latency_tolerance(dev);
287         dev->power.set_latency_tolerance = NULL;
288 }
289
290 static void intel_add_debugfs(struct ufs_hba *hba)
291 {
292         struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
293         struct intel_host *host = ufshcd_get_variant(hba);
294
295         intel_cache_ltr(hba);
296
297         host->debugfs_root = dir;
298         debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
299         debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
300 }
301
302 static void intel_remove_debugfs(struct ufs_hba *hba)
303 {
304         struct intel_host *host = ufshcd_get_variant(hba);
305
306         debugfs_remove_recursive(host->debugfs_root);
307 }
308
309 static int ufs_intel_device_reset(struct ufs_hba *hba)
310 {
311         struct intel_host *host = ufshcd_get_variant(hba);
312
313         if (INTEL_DSM_SUPPORTED(host, RESET)) {
314                 u32 result = 0;
315                 int err;
316
317                 err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
318                 if (!err && !result)
319                         err = -EIO;
320                 if (err)
321                         dev_err(hba->dev, "%s: DSM error %d result %u\n",
322                                 __func__, err, result);
323                 return err;
324         }
325
326         if (!host->reset_gpio)
327                 return -EOPNOTSUPP;
328
329         gpiod_set_value_cansleep(host->reset_gpio, 1);
330         usleep_range(10, 15);
331
332         gpiod_set_value_cansleep(host->reset_gpio, 0);
333         usleep_range(10, 15);
334
335         return 0;
336 }
337
338 static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
339 {
340         /* GPIO in _DSD has active low setting */
341         return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
342 }
343
344 static int ufs_intel_common_init(struct ufs_hba *hba)
345 {
346         struct intel_host *host;
347
348         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
349
350         host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
351         if (!host)
352                 return -ENOMEM;
353         ufshcd_set_variant(hba, host);
354         intel_dsm_init(host, hba->dev);
355         if (INTEL_DSM_SUPPORTED(host, RESET)) {
356                 if (hba->vops->device_reset)
357                         hba->caps |= UFSHCD_CAP_DEEPSLEEP;
358         } else {
359                 if (hba->vops->device_reset)
360                         host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
361                 if (IS_ERR(host->reset_gpio)) {
362                         dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
363                                 __func__, PTR_ERR(host->reset_gpio));
364                         host->reset_gpio = NULL;
365                 }
366                 if (host->reset_gpio) {
367                         gpiod_set_value_cansleep(host->reset_gpio, 0);
368                         hba->caps |= UFSHCD_CAP_DEEPSLEEP;
369                 }
370         }
371         intel_ltr_expose(hba->dev);
372         intel_add_debugfs(hba);
373         return 0;
374 }
375
376 static void ufs_intel_common_exit(struct ufs_hba *hba)
377 {
378         intel_remove_debugfs(hba);
379         intel_ltr_hide(hba->dev);
380 }
381
382 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
383 {
384         if (ufshcd_is_link_hibern8(hba)) {
385                 int ret = ufshcd_uic_hibern8_exit(hba);
386
387                 if (!ret) {
388                         ufshcd_set_link_active(hba);
389                 } else {
390                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
391                                 __func__, ret);
392                         /*
393                          * Force reset and restore. Any other actions can lead
394                          * to an unrecoverable state.
395                          */
396                         ufshcd_set_link_off(hba);
397                 }
398         }
399
400         return 0;
401 }
402
403 static int ufs_intel_ehl_init(struct ufs_hba *hba)
404 {
405         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
406         return ufs_intel_common_init(hba);
407 }
408
409 static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
410 {
411         /* LKF always needs a full reset, so set PM accordingly */
412         if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
413                 hba->spm_lvl = UFS_PM_LVL_6;
414                 hba->rpm_lvl = UFS_PM_LVL_6;
415         } else {
416                 hba->spm_lvl = UFS_PM_LVL_5;
417                 hba->rpm_lvl = UFS_PM_LVL_5;
418         }
419 }
420
421 static int ufs_intel_lkf_init(struct ufs_hba *hba)
422 {
423         struct ufs_host *ufs_host;
424         int err;
425
426         hba->nop_out_timeout = 200;
427         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
428         hba->caps |= UFSHCD_CAP_CRYPTO;
429         err = ufs_intel_common_init(hba);
430         ufs_host = ufshcd_get_variant(hba);
431         ufs_host->late_init = ufs_intel_lkf_late_init;
432         return err;
433 }
434
435 static int ufs_intel_adl_init(struct ufs_hba *hba)
436 {
437         hba->nop_out_timeout = 200;
438         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
439         hba->caps |= UFSHCD_CAP_WB_EN;
440         return ufs_intel_common_init(hba);
441 }
442
443 static int ufs_intel_mtl_init(struct ufs_hba *hba)
444 {
445         hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
446         return ufs_intel_common_init(hba);
447 }
448
449 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
450         .name                   = "intel-pci",
451         .init                   = ufs_intel_common_init,
452         .exit                   = ufs_intel_common_exit,
453         .link_startup_notify    = ufs_intel_link_startup_notify,
454         .resume                 = ufs_intel_resume,
455 };
456
457 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
458         .name                   = "intel-pci",
459         .init                   = ufs_intel_ehl_init,
460         .exit                   = ufs_intel_common_exit,
461         .link_startup_notify    = ufs_intel_link_startup_notify,
462         .resume                 = ufs_intel_resume,
463 };
464
465 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
466         .name                   = "intel-pci",
467         .init                   = ufs_intel_lkf_init,
468         .exit                   = ufs_intel_common_exit,
469         .hce_enable_notify      = ufs_intel_hce_enable_notify,
470         .link_startup_notify    = ufs_intel_link_startup_notify,
471         .pwr_change_notify      = ufs_intel_lkf_pwr_change_notify,
472         .apply_dev_quirks       = ufs_intel_lkf_apply_dev_quirks,
473         .resume                 = ufs_intel_resume,
474         .device_reset           = ufs_intel_device_reset,
475 };
476
477 static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
478         .name                   = "intel-pci",
479         .init                   = ufs_intel_adl_init,
480         .exit                   = ufs_intel_common_exit,
481         .link_startup_notify    = ufs_intel_link_startup_notify,
482         .resume                 = ufs_intel_resume,
483         .device_reset           = ufs_intel_device_reset,
484 };
485
486 static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
487         .name                   = "intel-pci",
488         .init                   = ufs_intel_mtl_init,
489         .exit                   = ufs_intel_common_exit,
490         .hce_enable_notify      = ufs_intel_hce_enable_notify,
491         .link_startup_notify    = ufs_intel_link_startup_notify,
492         .resume                 = ufs_intel_resume,
493         .device_reset           = ufs_intel_device_reset,
494 };
495
496 #ifdef CONFIG_PM_SLEEP
497 static int ufshcd_pci_restore(struct device *dev)
498 {
499         struct ufs_hba *hba = dev_get_drvdata(dev);
500
501         /* Force a full reset and restore */
502         ufshcd_set_link_off(hba);
503
504         return ufshcd_system_resume(dev);
505 }
506 #endif
507
508 /**
509  * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
510  *              data structure memory
511  * @pdev: pointer to PCI handle
512  */
513 static void ufshcd_pci_remove(struct pci_dev *pdev)
514 {
515         struct ufs_hba *hba = pci_get_drvdata(pdev);
516
517         pm_runtime_forbid(&pdev->dev);
518         pm_runtime_get_noresume(&pdev->dev);
519         ufshcd_remove(hba);
520         ufshcd_dealloc_host(hba);
521 }
522
523 /**
524  * ufshcd_pci_probe - probe routine of the driver
525  * @pdev: pointer to PCI device handle
526  * @id: PCI device id
527  *
528  * Return: 0 on success, non-zero value on failure.
529  */
530 static int
531 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
532 {
533         struct ufs_host *ufs_host;
534         struct ufs_hba *hba;
535         void __iomem *mmio_base;
536         int err;
537
538         err = pcim_enable_device(pdev);
539         if (err) {
540                 dev_err(&pdev->dev, "pcim_enable_device failed\n");
541                 return err;
542         }
543
544         pci_set_master(pdev);
545
546         err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
547         if (err < 0) {
548                 dev_err(&pdev->dev, "request and iomap failed\n");
549                 return err;
550         }
551
552         mmio_base = pcim_iomap_table(pdev)[0];
553
554         err = ufshcd_alloc_host(&pdev->dev, &hba);
555         if (err) {
556                 dev_err(&pdev->dev, "Allocation failed\n");
557                 return err;
558         }
559
560         hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
561
562         err = ufshcd_init(hba, mmio_base, pdev->irq);
563         if (err) {
564                 dev_err(&pdev->dev, "Initialization failed\n");
565                 ufshcd_dealloc_host(hba);
566                 return err;
567         }
568
569         ufs_host = ufshcd_get_variant(hba);
570         if (ufs_host && ufs_host->late_init)
571                 ufs_host->late_init(hba);
572
573         pm_runtime_put_noidle(&pdev->dev);
574         pm_runtime_allow(&pdev->dev);
575
576         return 0;
577 }
578
579 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
580         SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
581 #ifdef CONFIG_PM_SLEEP
582         .suspend        = ufshcd_system_suspend,
583         .resume         = ufshcd_system_resume,
584         .freeze         = ufshcd_system_suspend,
585         .thaw           = ufshcd_system_resume,
586         .poweroff       = ufshcd_system_suspend,
587         .restore        = ufshcd_pci_restore,
588         .prepare        = ufshcd_suspend_prepare,
589         .complete       = ufshcd_resume_complete,
590 #endif
591 };
592
593 static const struct pci_device_id ufshcd_pci_tbl[] = {
594         { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
595         { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
596         { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
597         { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
598         { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
599         { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
600         { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
601         { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
602         { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
603         { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
604         { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
605         { }     /* terminate list */
606 };
607
608 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
609
610 static struct pci_driver ufshcd_pci_driver = {
611         .name = UFSHCD,
612         .id_table = ufshcd_pci_tbl,
613         .probe = ufshcd_pci_probe,
614         .remove = ufshcd_pci_remove,
615         .driver = {
616                 .pm = &ufshcd_pci_pm_ops
617         },
618 };
619
620 module_pci_driver(ufshcd_pci_driver);
621
622 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
623 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
624 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
625 MODULE_LICENSE("GPL");