1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller PCI glue driver
5 * This code is based on drivers/scsi/ufs/ufshcd-pci.c
6 * Copyright (C) 2011-2013 Samsung India Software Operations
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
14 #include <linux/pci.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_qos.h>
17 #include <linux/debugfs.h>
18 #include <linux/uuid.h>
19 #include <linux/acpi.h>
20 #include <linux/gpio/consumer.h>
23 void (*late_init)(struct ufs_hba *hba);
32 struct ufs_host ufs_host;
36 struct dentry *debugfs_root;
37 struct gpio_desc *reset_gpio;
40 static const guid_t intel_dsm_guid =
41 GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
42 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
44 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
45 unsigned int fn, u32 *result)
47 union acpi_object *obj;
51 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
55 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
60 len = min_t(size_t, obj->buffer.length, 4);
63 memcpy(result, obj->buffer.pointer, len);
70 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
71 unsigned int fn, u32 *result)
73 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
76 return __intel_dsm(intel_host, dev, fn, result);
79 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
83 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
84 dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
87 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
88 enum ufs_notify_change_status status)
90 /* Cannot enable ICE until after HC enable */
91 if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
92 u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
94 hce |= CRYPTO_GENERAL_ENABLE;
95 ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
101 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
103 u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
106 ufshcd_dme_get(hba, attr, &lcc_enable);
108 ufshcd_disable_host_tx_lcc(hba);
113 static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
114 enum ufs_notify_change_status status)
120 err = ufs_intel_disable_lcc(hba);
131 static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
133 struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
136 pwr_info.lane_rx = lanes;
137 pwr_info.lane_tx = lanes;
138 ret = ufshcd_config_pwr_mode(hba, &pwr_info);
140 dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
141 __func__, lanes, ret);
145 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
146 enum ufs_notify_change_status status,
147 struct ufs_pa_layer_attr *dev_max_params,
148 struct ufs_pa_layer_attr *dev_req_params)
154 if (ufshcd_is_hs_mode(dev_max_params) &&
155 (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
156 ufs_intel_set_lanes(hba, 2);
157 memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
160 if (ufshcd_is_hs_mode(dev_req_params)) {
161 u32 peer_granularity;
163 usleep_range(1000, 1250);
164 err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
175 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
177 u32 granularity, peer_granularity;
178 u32 pa_tactivate, peer_pa_tactivate;
181 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
185 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
189 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
193 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
197 if (granularity == peer_granularity) {
198 u32 new_peer_pa_tactivate = pa_tactivate + 2;
200 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
206 #define INTEL_ACTIVELTR 0x804
207 #define INTEL_IDLELTR 0x808
209 #define INTEL_LTR_REQ BIT(15)
210 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
211 #define INTEL_LTR_SCALE_1US (2 << 10)
212 #define INTEL_LTR_SCALE_32US (3 << 10)
213 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
215 static void intel_cache_ltr(struct ufs_hba *hba)
217 struct intel_host *host = ufshcd_get_variant(hba);
219 host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
220 host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
223 static void intel_ltr_set(struct device *dev, s32 val)
225 struct ufs_hba *hba = dev_get_drvdata(dev);
226 struct intel_host *host = ufshcd_get_variant(hba);
229 pm_runtime_get_sync(dev);
232 * Program latency tolerance (LTR) accordingly what has been asked
233 * by the PM QoS layer or disable it in case we were passed
234 * negative value or PM_QOS_LATENCY_ANY.
236 ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
238 if (val == PM_QOS_LATENCY_ANY || val < 0) {
239 ltr &= ~INTEL_LTR_REQ;
241 ltr |= INTEL_LTR_REQ;
242 ltr &= ~INTEL_LTR_SCALE_MASK;
243 ltr &= ~INTEL_LTR_VALUE_MASK;
245 if (val > INTEL_LTR_VALUE_MASK) {
247 if (val > INTEL_LTR_VALUE_MASK)
248 val = INTEL_LTR_VALUE_MASK;
249 ltr |= INTEL_LTR_SCALE_32US | val;
251 ltr |= INTEL_LTR_SCALE_1US | val;
255 if (ltr == host->active_ltr)
258 writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
259 writel(ltr, hba->mmio_base + INTEL_IDLELTR);
261 /* Cache the values into intel_host structure */
262 intel_cache_ltr(hba);
267 static void intel_ltr_expose(struct device *dev)
269 dev->power.set_latency_tolerance = intel_ltr_set;
270 dev_pm_qos_expose_latency_tolerance(dev);
273 static void intel_ltr_hide(struct device *dev)
275 dev_pm_qos_hide_latency_tolerance(dev);
276 dev->power.set_latency_tolerance = NULL;
279 static void intel_add_debugfs(struct ufs_hba *hba)
281 struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
282 struct intel_host *host = ufshcd_get_variant(hba);
284 intel_cache_ltr(hba);
286 host->debugfs_root = dir;
287 debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
288 debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
291 static void intel_remove_debugfs(struct ufs_hba *hba)
293 struct intel_host *host = ufshcd_get_variant(hba);
295 debugfs_remove_recursive(host->debugfs_root);
298 static int ufs_intel_device_reset(struct ufs_hba *hba)
300 struct intel_host *host = ufshcd_get_variant(hba);
302 if (host->dsm_fns & INTEL_DSM_RESET) {
306 err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
310 dev_err(hba->dev, "%s: DSM error %d result %u\n",
311 __func__, err, result);
315 if (!host->reset_gpio)
318 gpiod_set_value_cansleep(host->reset_gpio, 1);
319 usleep_range(10, 15);
321 gpiod_set_value_cansleep(host->reset_gpio, 0);
322 usleep_range(10, 15);
327 static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
329 /* GPIO in _DSD has active low setting */
330 return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
333 static int ufs_intel_common_init(struct ufs_hba *hba)
335 struct intel_host *host;
337 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
339 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
342 ufshcd_set_variant(hba, host);
343 intel_dsm_init(host, hba->dev);
344 if (host->dsm_fns & INTEL_DSM_RESET) {
345 if (hba->vops->device_reset)
346 hba->caps |= UFSHCD_CAP_DEEPSLEEP;
348 if (hba->vops->device_reset)
349 host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
350 if (IS_ERR(host->reset_gpio)) {
351 dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
352 __func__, PTR_ERR(host->reset_gpio));
353 host->reset_gpio = NULL;
355 if (host->reset_gpio) {
356 gpiod_set_value_cansleep(host->reset_gpio, 0);
357 hba->caps |= UFSHCD_CAP_DEEPSLEEP;
360 intel_ltr_expose(hba->dev);
361 intel_add_debugfs(hba);
365 static void ufs_intel_common_exit(struct ufs_hba *hba)
367 intel_remove_debugfs(hba);
368 intel_ltr_hide(hba->dev);
371 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
373 if (ufshcd_is_link_hibern8(hba)) {
374 int ret = ufshcd_uic_hibern8_exit(hba);
377 ufshcd_set_link_active(hba);
379 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
382 * Force reset and restore. Any other actions can lead
383 * to an unrecoverable state.
385 ufshcd_set_link_off(hba);
392 static int ufs_intel_ehl_init(struct ufs_hba *hba)
394 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
395 return ufs_intel_common_init(hba);
398 static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
400 /* LKF always needs a full reset, so set PM accordingly */
401 if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
402 hba->spm_lvl = UFS_PM_LVL_6;
403 hba->rpm_lvl = UFS_PM_LVL_6;
405 hba->spm_lvl = UFS_PM_LVL_5;
406 hba->rpm_lvl = UFS_PM_LVL_5;
410 static int ufs_intel_lkf_init(struct ufs_hba *hba)
412 struct ufs_host *ufs_host;
415 hba->nop_out_timeout = 200;
416 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
417 hba->caps |= UFSHCD_CAP_CRYPTO;
418 err = ufs_intel_common_init(hba);
419 ufs_host = ufshcd_get_variant(hba);
420 ufs_host->late_init = ufs_intel_lkf_late_init;
424 static int ufs_intel_adl_init(struct ufs_hba *hba)
426 hba->nop_out_timeout = 200;
427 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
428 return ufs_intel_common_init(hba);
431 static int ufs_intel_mtl_init(struct ufs_hba *hba)
433 hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
434 return ufs_intel_common_init(hba);
437 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
439 .init = ufs_intel_common_init,
440 .exit = ufs_intel_common_exit,
441 .link_startup_notify = ufs_intel_link_startup_notify,
442 .resume = ufs_intel_resume,
445 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
447 .init = ufs_intel_ehl_init,
448 .exit = ufs_intel_common_exit,
449 .link_startup_notify = ufs_intel_link_startup_notify,
450 .resume = ufs_intel_resume,
453 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
455 .init = ufs_intel_lkf_init,
456 .exit = ufs_intel_common_exit,
457 .hce_enable_notify = ufs_intel_hce_enable_notify,
458 .link_startup_notify = ufs_intel_link_startup_notify,
459 .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
460 .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
461 .resume = ufs_intel_resume,
462 .device_reset = ufs_intel_device_reset,
465 static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
467 .init = ufs_intel_adl_init,
468 .exit = ufs_intel_common_exit,
469 .link_startup_notify = ufs_intel_link_startup_notify,
470 .resume = ufs_intel_resume,
471 .device_reset = ufs_intel_device_reset,
474 static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
476 .init = ufs_intel_mtl_init,
477 .exit = ufs_intel_common_exit,
478 .hce_enable_notify = ufs_intel_hce_enable_notify,
479 .link_startup_notify = ufs_intel_link_startup_notify,
480 .resume = ufs_intel_resume,
481 .device_reset = ufs_intel_device_reset,
484 #ifdef CONFIG_PM_SLEEP
485 static int ufshcd_pci_restore(struct device *dev)
487 struct ufs_hba *hba = dev_get_drvdata(dev);
489 /* Force a full reset and restore */
490 ufshcd_set_link_off(hba);
492 return ufshcd_system_resume(dev);
497 * ufshcd_pci_shutdown - main function to put the controller in reset state
498 * @pdev: pointer to PCI device handle
500 static void ufshcd_pci_shutdown(struct pci_dev *pdev)
502 ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
506 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
507 * data structure memory
508 * @pdev: pointer to PCI handle
510 static void ufshcd_pci_remove(struct pci_dev *pdev)
512 struct ufs_hba *hba = pci_get_drvdata(pdev);
514 pm_runtime_forbid(&pdev->dev);
515 pm_runtime_get_noresume(&pdev->dev);
517 ufshcd_dealloc_host(hba);
521 * ufshcd_pci_probe - probe routine of the driver
522 * @pdev: pointer to PCI device handle
525 * Returns 0 on success, non-zero value on failure
528 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
530 struct ufs_host *ufs_host;
532 void __iomem *mmio_base;
535 err = pcim_enable_device(pdev);
537 dev_err(&pdev->dev, "pcim_enable_device failed\n");
541 pci_set_master(pdev);
543 err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
545 dev_err(&pdev->dev, "request and iomap failed\n");
549 mmio_base = pcim_iomap_table(pdev)[0];
551 err = ufshcd_alloc_host(&pdev->dev, &hba);
553 dev_err(&pdev->dev, "Allocation failed\n");
557 hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
559 err = ufshcd_init(hba, mmio_base, pdev->irq);
561 dev_err(&pdev->dev, "Initialization failed\n");
562 ufshcd_dealloc_host(hba);
566 ufs_host = ufshcd_get_variant(hba);
567 if (ufs_host && ufs_host->late_init)
568 ufs_host->late_init(hba);
570 pm_runtime_put_noidle(&pdev->dev);
571 pm_runtime_allow(&pdev->dev);
576 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
577 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
578 #ifdef CONFIG_PM_SLEEP
579 .suspend = ufshcd_system_suspend,
580 .resume = ufshcd_system_resume,
581 .freeze = ufshcd_system_suspend,
582 .thaw = ufshcd_system_resume,
583 .poweroff = ufshcd_system_suspend,
584 .restore = ufshcd_pci_restore,
585 .prepare = ufshcd_suspend_prepare,
586 .complete = ufshcd_resume_complete,
590 static const struct pci_device_id ufshcd_pci_tbl[] = {
591 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
592 { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
593 { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
594 { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
595 { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
596 { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
597 { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
598 { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
599 { } /* terminate list */
602 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
604 static struct pci_driver ufshcd_pci_driver = {
606 .id_table = ufshcd_pci_tbl,
607 .probe = ufshcd_pci_probe,
608 .remove = ufshcd_pci_remove,
609 .shutdown = ufshcd_pci_shutdown,
611 .pm = &ufshcd_pci_pm_ops
615 module_pci_driver(ufshcd_pci_driver);
617 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
618 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
619 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
620 MODULE_LICENSE("GPL");
621 MODULE_VERSION(UFSHCD_DRIVER_VERSION);