1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
38 #include "ufshcd-crypto.h"
39 #include <asm/unaligned.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/ufs.h>
44 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
53 /* UIC command timeout, unit: ms */
54 #define UIC_CMD_TIMEOUT 500
56 /* NOP OUT retries waiting for NOP IN response */
57 #define NOP_OUT_RETRIES 10
58 /* Timeout after 50 msecs if NOP OUT hangs without response */
59 #define NOP_OUT_TIMEOUT 50 /* msecs */
61 /* Query request retries */
62 #define QUERY_REQ_RETRIES 3
63 /* Query request timeout */
64 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
66 /* Advanced RPMB request timeout */
67 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT 100 /* msecs */
72 /* maximum number of retries for a general UIC command */
73 #define UFS_UIC_COMMAND_RETRIES 3
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
78 /* maximum number of reset retries before giving up */
79 #define MAX_HOST_RESET_RETRIES 5
81 /* Maximum number of error handler retries before giving up */
82 #define MAX_ERR_HANDLER_RETRIES 5
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
90 /* default delay of autosuspend: 2000 ms */
91 #define RPM_AUTOSUSPEND_DELAY_MS 2000
93 /* Default delay of RPM device flush delayed work */
94 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
96 /* Default value of wait time before gating device ref clock */
97 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
99 /* Polling time to wait for fDeviceInit */
100 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
102 /* Default RTC update every 10 seconds */
103 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
105 /* UFSHC 4.0 compliant HC support this mode. */
106 static bool use_mcq_mode = true;
108 static bool is_mcq_supported(struct ufs_hba *hba)
110 return hba->mcq_sup && use_mcq_mode;
113 module_param(use_mcq_mode, bool, 0644);
114 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
116 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
120 _ret = ufshcd_enable_vreg(_dev, _vreg); \
122 _ret = ufshcd_disable_vreg(_dev, _vreg); \
126 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
127 size_t __len = (len); \
128 print_hex_dump(KERN_ERR, prefix_str, \
129 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
130 16, 4, buf, __len, false); \
133 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
139 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
142 regs = kzalloc(len, GFP_ATOMIC);
146 for (pos = 0; pos < len; pos += 4) {
148 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
149 pos <= REG_UIC_ERROR_CODE_DME)
151 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
154 ufshcd_hex_dump(prefix, regs, len);
159 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
162 UFSHCD_MAX_CHANNEL = 0,
164 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
165 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
168 static const char *const ufshcd_state_name[] = {
169 [UFSHCD_STATE_RESET] = "reset",
170 [UFSHCD_STATE_OPERATIONAL] = "operational",
171 [UFSHCD_STATE_ERROR] = "error",
172 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
173 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
176 /* UFSHCD error handling flags */
178 UFSHCD_EH_IN_PROGRESS = (1 << 0),
181 /* UFSHCD UIC layer error flags */
183 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
184 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
185 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
186 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
187 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
188 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
189 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
192 #define ufshcd_set_eh_in_progress(h) \
193 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
194 #define ufshcd_eh_in_progress(h) \
195 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
196 #define ufshcd_clear_eh_in_progress(h) \
197 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
199 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
200 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
201 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
202 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
203 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
204 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
205 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
207 * For DeepSleep, the link is first put in hibern8 and then off.
208 * Leaving the link in hibern8 is not supported.
210 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
213 static inline enum ufs_dev_pwr_mode
214 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
216 return ufs_pm_lvl_states[lvl].dev_state;
219 static inline enum uic_link_state
220 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
222 return ufs_pm_lvl_states[lvl].link_state;
225 static inline enum ufs_pm_level
226 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
227 enum uic_link_state link_state)
229 enum ufs_pm_level lvl;
231 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
232 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
233 (ufs_pm_lvl_states[lvl].link_state == link_state))
237 /* if no match found, return the level 0 */
241 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
243 return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
244 hba->active_uic_cmd || hba->uic_async_done);
247 static const struct ufs_dev_quirk ufs_fixups[] = {
248 /* UFS cards deviations table */
249 { .wmanufacturerid = UFS_VENDOR_MICRON,
250 .model = UFS_ANY_MODEL,
251 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
252 { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
253 .model = UFS_ANY_MODEL,
254 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
255 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
256 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
257 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
258 .model = UFS_ANY_MODEL,
259 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
260 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
261 .model = "hB8aL1" /*H28U62301AMR*/,
262 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
263 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
264 .model = UFS_ANY_MODEL,
265 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
266 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
267 .model = "THGLF2G9C8KBADG",
268 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
269 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
270 .model = "THGLF2G9D8KBADG",
271 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
275 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
276 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
277 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
278 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
279 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
280 static void ufshcd_hba_exit(struct ufs_hba *hba);
281 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
282 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
283 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
284 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
285 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
286 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
287 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
289 static irqreturn_t ufshcd_intr(int irq, void *__hba);
290 static int ufshcd_change_power_mode(struct ufs_hba *hba,
291 struct ufs_pa_layer_attr *pwr_mode);
292 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
293 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
294 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
295 struct ufs_vreg *vreg);
296 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
298 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
299 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
301 void ufshcd_enable_irq(struct ufs_hba *hba)
303 if (!hba->is_irq_enabled) {
304 enable_irq(hba->irq);
305 hba->is_irq_enabled = true;
308 EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
310 void ufshcd_disable_irq(struct ufs_hba *hba)
312 if (hba->is_irq_enabled) {
313 disable_irq(hba->irq);
314 hba->is_irq_enabled = false;
317 EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
319 static void ufshcd_configure_wb(struct ufs_hba *hba)
321 if (!ufshcd_is_wb_allowed(hba))
324 ufshcd_wb_toggle(hba, true);
326 ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
328 if (ufshcd_is_wb_buf_flush_allowed(hba))
329 ufshcd_wb_toggle_buf_flush(hba, true);
332 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
334 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
335 scsi_unblock_requests(hba->host);
338 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
340 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
341 scsi_block_requests(hba->host);
344 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
345 enum ufs_trace_str_t str_t)
347 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
348 struct utp_upiu_header *header;
350 if (!trace_ufshcd_upiu_enabled())
353 if (str_t == UFS_CMD_SEND)
354 header = &rq->header;
356 header = &hba->lrb[tag].ucd_rsp_ptr->header;
358 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
362 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
363 enum ufs_trace_str_t str_t,
364 struct utp_upiu_req *rq_rsp)
366 if (!trace_ufshcd_upiu_enabled())
369 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
370 &rq_rsp->qr, UFS_TSF_OSF);
373 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
374 enum ufs_trace_str_t str_t)
376 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
378 if (!trace_ufshcd_upiu_enabled())
381 if (str_t == UFS_TM_SEND)
382 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
383 &descp->upiu_req.req_header,
384 &descp->upiu_req.input_param1,
387 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
388 &descp->upiu_rsp.rsp_header,
389 &descp->upiu_rsp.output_param1,
393 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
394 const struct uic_command *ucmd,
395 enum ufs_trace_str_t str_t)
399 if (!trace_ufshcd_uic_command_enabled())
402 if (str_t == UFS_CMD_SEND)
405 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
407 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
408 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
409 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
410 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
413 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
414 enum ufs_trace_str_t str_t)
417 u8 opcode = 0, group_id = 0;
421 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
422 struct scsi_cmnd *cmd = lrbp->cmd;
423 struct request *rq = scsi_cmd_to_rq(cmd);
424 int transfer_len = -1;
429 /* trace UPIU also */
430 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
431 if (!trace_ufshcd_command_enabled())
434 opcode = cmd->cmnd[0];
436 if (opcode == READ_10 || opcode == WRITE_10) {
438 * Currently we only fully trace read(10) and write(10) commands
441 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
442 lba = scsi_get_lba(cmd);
443 if (opcode == WRITE_10)
444 group_id = lrbp->cmd->cmnd[6];
445 } else if (opcode == UNMAP) {
447 * The number of Bytes to be unmapped beginning with the lba.
449 transfer_len = blk_rq_bytes(rq);
450 lba = scsi_get_lba(cmd);
453 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
455 if (is_mcq_enabled(hba)) {
456 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
460 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
462 trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
463 transfer_len, intr, lba, opcode, group_id);
466 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
468 struct ufs_clk_info *clki;
469 struct list_head *head = &hba->clk_list_head;
471 if (list_empty(head))
474 list_for_each_entry(clki, head, list) {
475 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
477 dev_err(hba->dev, "clk: %s, rate: %u\n",
478 clki->name, clki->curr_freq);
482 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
483 const char *err_name)
487 const struct ufs_event_hist *e;
489 if (id >= UFS_EVT_CNT)
492 e = &hba->ufs_stats.event[id];
494 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
495 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
497 if (e->tstamp[p] == 0)
499 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
500 e->val[p], div_u64(e->tstamp[p], 1000));
505 dev_err(hba->dev, "No record of %s\n", err_name);
507 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
510 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
512 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
514 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
515 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
516 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
517 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
518 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
519 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
521 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
522 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
523 "link_startup_fail");
524 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
525 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
527 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
528 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
529 "wlun suspend_fail");
530 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
531 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
532 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
534 ufshcd_vops_dbg_register_dump(hba);
538 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
540 const struct ufshcd_lrb *lrbp;
543 lrbp = &hba->lrb[tag];
545 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
546 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
547 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
548 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
550 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551 tag, (u64)lrbp->utrd_dma_addr);
553 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
554 sizeof(struct utp_transfer_req_desc));
555 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
556 (u64)lrbp->ucd_req_dma_addr);
557 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
558 sizeof(struct utp_upiu_req));
559 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
560 (u64)lrbp->ucd_rsp_dma_addr);
561 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
562 sizeof(struct utp_upiu_rsp));
564 prdt_length = le16_to_cpu(
565 lrbp->utr_descriptor_ptr->prd_table_length);
566 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
567 prdt_length /= ufshcd_sg_entry_size(hba);
570 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
572 (u64)lrbp->ucd_prdt_dma_addr);
575 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
576 ufshcd_sg_entry_size(hba) * prdt_length);
579 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
581 struct scsi_device *sdev = req->q->queuedata;
582 struct Scsi_Host *shost = sdev->host;
583 struct ufs_hba *hba = shost_priv(shost);
585 ufshcd_print_tr(hba, req->tag, *(bool *)priv);
591 * ufshcd_print_trs_all - print trs for all started requests.
592 * @hba: per-adapter instance.
593 * @pr_prdt: need to print prdt or not.
595 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
597 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
600 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
604 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
605 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
607 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
608 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
612 static void ufshcd_print_host_state(struct ufs_hba *hba)
614 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
616 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
617 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
618 hba->outstanding_reqs, hba->outstanding_tasks);
619 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
620 hba->saved_err, hba->saved_uic_err);
621 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
622 hba->curr_dev_pwr_mode, hba->uic_link_state);
623 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
624 hba->pm_op_in_progress, hba->is_sys_suspended);
625 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
626 hba->auto_bkops_enabled, hba->host->host_self_blocked);
627 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
629 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
630 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
631 hba->ufs_stats.hibern8_exit_cnt);
632 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
633 div_u64(hba->ufs_stats.last_intr_ts, 1000),
634 hba->ufs_stats.last_intr_status);
635 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
636 hba->eh_flags, hba->req_abort_count);
637 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
638 hba->ufs_version, hba->capabilities, hba->caps);
639 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
642 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
643 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
645 ufshcd_print_clk_freqs(hba);
649 * ufshcd_print_pwr_info - print power params as saved in hba
651 * @hba: per-adapter instance
653 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
655 static const char * const names[] = {
666 * Using dev_dbg to avoid messages during runtime PM to avoid
667 * never-ending cycles of messages written back to storage by user space
668 * causing runtime resume, causing more messages and so on.
670 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
672 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
673 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
674 names[hba->pwr_info.pwr_rx],
675 names[hba->pwr_info.pwr_tx],
676 hba->pwr_info.hs_rate);
679 static void ufshcd_device_reset(struct ufs_hba *hba)
683 err = ufshcd_vops_device_reset(hba);
686 ufshcd_set_ufs_dev_active(hba);
687 if (ufshcd_is_wb_allowed(hba)) {
688 hba->dev_info.wb_enabled = false;
689 hba->dev_info.wb_buf_flush_enabled = false;
691 if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
692 hba->dev_info.rtc_time_baseline = 0;
694 if (err != -EOPNOTSUPP)
695 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
698 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
706 usleep_range(us, us + tolerance);
708 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
711 * ufshcd_wait_for_register - wait for register value to change
712 * @hba: per-adapter interface
713 * @reg: mmio register offset
714 * @mask: mask to apply to the read register value
715 * @val: value to wait for
716 * @interval_us: polling interval in microseconds
717 * @timeout_ms: timeout in milliseconds
719 * Return: -ETIMEDOUT on error, zero on success.
721 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
722 u32 val, unsigned long interval_us,
723 unsigned long timeout_ms)
726 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
728 /* ignore bits that we don't intend to wait on */
731 while ((ufshcd_readl(hba, reg) & mask) != val) {
732 usleep_range(interval_us, interval_us + 50);
733 if (time_after(jiffies, timeout)) {
734 if ((ufshcd_readl(hba, reg) & mask) != val)
744 * ufshcd_get_intr_mask - Get the interrupt bit mask
745 * @hba: Pointer to adapter instance
747 * Return: interrupt bit mask per version
749 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
751 if (hba->ufs_version == ufshci_version(1, 0))
752 return INTERRUPT_MASK_ALL_VER_10;
753 if (hba->ufs_version <= ufshci_version(2, 0))
754 return INTERRUPT_MASK_ALL_VER_11;
756 return INTERRUPT_MASK_ALL_VER_21;
760 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
761 * @hba: Pointer to adapter instance
763 * Return: UFSHCI version supported by the controller
765 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
769 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
770 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
772 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
775 * UFSHCI v1.x uses a different version scheme, in order
776 * to allow the use of comparisons with the ufshci_version
777 * function, we convert it to the same scheme as ufs 2.0+.
779 if (ufshci_ver & 0x00010000)
780 return ufshci_version(1, ufshci_ver & 0x00000100);
786 * ufshcd_is_device_present - Check if any device connected to
787 * the host controller
788 * @hba: pointer to adapter instance
790 * Return: true if device present, false if no device detected
792 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
794 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
798 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
799 * @lrbp: pointer to local command reference block
800 * @cqe: pointer to the completion queue entry
802 * This function is used to get the OCS field from UTRD
804 * Return: the OCS field in the UTRD.
806 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
807 struct cq_entry *cqe)
810 return le32_to_cpu(cqe->status) & MASK_OCS;
812 return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
816 * ufshcd_utrl_clear() - Clear requests from the controller request list.
817 * @hba: per adapter instance
818 * @mask: mask with one bit set for each request to be cleared
820 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
822 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
825 * From the UFSHCI specification: "UTP Transfer Request List CLear
826 * Register (UTRLCLR): This field is bit significant. Each bit
827 * corresponds to a slot in the UTP Transfer Request List, where bit 0
828 * corresponds to request slot 0. A bit in this field is set to ‘0’
829 * by host software to indicate to the host controller that a transfer
830 * request slot is cleared. The host controller
831 * shall free up any resources associated to the request slot
832 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
833 * host software indicates no change to request slots by setting the
834 * associated bits in this field to ‘1’. Bits in this field shall only
835 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
837 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
841 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
842 * @hba: per adapter instance
843 * @pos: position of the bit to be cleared
845 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
847 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
848 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
850 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
854 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
855 * @reg: Register value of host controller status
857 * Return: 0 on success; a positive value if failed.
859 static inline int ufshcd_get_lists_status(u32 reg)
861 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
865 * ufshcd_get_uic_cmd_result - Get the UIC command result
866 * @hba: Pointer to adapter instance
868 * This function gets the result of UIC command completion
870 * Return: 0 on success; non-zero value on error.
872 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
874 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
875 MASK_UIC_COMMAND_RESULT;
879 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
880 * @hba: Pointer to adapter instance
882 * This function gets UIC command argument3
884 * Return: 0 on success; non-zero value on error.
886 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
888 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
892 * ufshcd_get_req_rsp - returns the TR response transaction type
893 * @ucd_rsp_ptr: pointer to response UPIU
897 static inline enum upiu_response_transaction
898 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
900 return ucd_rsp_ptr->header.transaction_code;
904 * ufshcd_is_exception_event - Check if the device raised an exception event
905 * @ucd_rsp_ptr: pointer to response UPIU
907 * The function checks if the device raised an exception event indicated in
908 * the Device Information field of response UPIU.
910 * Return: true if exception is raised, false otherwise.
912 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
914 return ucd_rsp_ptr->header.device_information & 1;
918 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
919 * @hba: per adapter instance
922 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
924 ufshcd_writel(hba, INT_AGGR_ENABLE |
925 INT_AGGR_COUNTER_AND_TIMER_RESET,
926 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
930 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
931 * @hba: per adapter instance
932 * @cnt: Interrupt aggregation counter threshold
933 * @tmout: Interrupt aggregation timeout value
936 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
938 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
939 INT_AGGR_COUNTER_THLD_VAL(cnt) |
940 INT_AGGR_TIMEOUT_VAL(tmout),
941 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
945 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
946 * @hba: per adapter instance
948 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
950 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
954 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
955 * When run-stop registers are set to 1, it indicates the
956 * host controller that it can process the requests
957 * @hba: per adapter instance
959 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
961 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
962 REG_UTP_TASK_REQ_LIST_RUN_STOP);
963 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
964 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
968 * ufshcd_hba_start - Start controller initialization sequence
969 * @hba: per adapter instance
971 static inline void ufshcd_hba_start(struct ufs_hba *hba)
973 u32 val = CONTROLLER_ENABLE;
975 if (ufshcd_crypto_enable(hba))
976 val |= CRYPTO_GENERAL_ENABLE;
978 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
982 * ufshcd_is_hba_active - Get controller state
983 * @hba: per adapter instance
985 * Return: true if and only if the controller is active.
987 bool ufshcd_is_hba_active(struct ufs_hba *hba)
989 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
991 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
993 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
995 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
996 if (hba->ufs_version <= ufshci_version(1, 1))
997 return UFS_UNIPRO_VER_1_41;
999 return UFS_UNIPRO_VER_1_6;
1001 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1003 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1006 * If both host and device support UniPro ver1.6 or later, PA layer
1007 * parameters tuning happens during link startup itself.
1009 * We can manually tune PA layer parameters if either host or device
1010 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1011 * logic simple, we will only do manual tuning if local unipro version
1012 * doesn't support ver1.6 or later.
1014 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
1018 * ufshcd_pm_qos_init - initialize PM QoS request
1019 * @hba: per adapter instance
1021 void ufshcd_pm_qos_init(struct ufs_hba *hba)
1024 if (hba->pm_qos_enabled)
1027 cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE);
1029 if (cpu_latency_qos_request_active(&hba->pm_qos_req))
1030 hba->pm_qos_enabled = true;
1034 * ufshcd_pm_qos_exit - remove request from PM QoS
1035 * @hba: per adapter instance
1037 void ufshcd_pm_qos_exit(struct ufs_hba *hba)
1039 if (!hba->pm_qos_enabled)
1042 cpu_latency_qos_remove_request(&hba->pm_qos_req);
1043 hba->pm_qos_enabled = false;
1047 * ufshcd_pm_qos_update - update PM QoS request
1048 * @hba: per adapter instance
1049 * @on: If True, vote for perf PM QoS mode otherwise power save mode
1051 static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
1053 if (!hba->pm_qos_enabled)
1056 cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
1060 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1061 * @hba: per adapter instance
1062 * @scale_up: If True, set max possible frequency othewise set low frequency
1064 * Return: 0 if successful; < 0 upon failure.
1066 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1069 struct ufs_clk_info *clki;
1070 struct list_head *head = &hba->clk_list_head;
1072 if (list_empty(head))
1075 list_for_each_entry(clki, head, list) {
1076 if (!IS_ERR_OR_NULL(clki->clk)) {
1077 if (scale_up && clki->max_freq) {
1078 if (clki->curr_freq == clki->max_freq)
1081 ret = clk_set_rate(clki->clk, clki->max_freq);
1083 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1084 __func__, clki->name,
1085 clki->max_freq, ret);
1088 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1089 "scaled up", clki->name,
1093 clki->curr_freq = clki->max_freq;
1095 } else if (!scale_up && clki->min_freq) {
1096 if (clki->curr_freq == clki->min_freq)
1099 ret = clk_set_rate(clki->clk, clki->min_freq);
1101 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1102 __func__, clki->name,
1103 clki->min_freq, ret);
1106 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1107 "scaled down", clki->name,
1110 clki->curr_freq = clki->min_freq;
1113 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1114 clki->name, clk_get_rate(clki->clk));
1121 int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
1122 struct dev_pm_opp *opp, void *data,
1125 struct ufs_hba *hba = dev_get_drvdata(dev);
1126 struct list_head *head = &hba->clk_list_head;
1127 struct ufs_clk_info *clki;
1132 list_for_each_entry(clki, head, list) {
1133 if (!IS_ERR_OR_NULL(clki->clk)) {
1134 freq = dev_pm_opp_get_freq_indexed(opp, idx++);
1136 /* Do not set rate for clocks having frequency as 0 */
1140 ret = clk_set_rate(clki->clk, freq);
1142 dev_err(dev, "%s: %s clk set rate(%ldHz) failed, %d\n",
1143 __func__, clki->name, freq, ret);
1147 trace_ufshcd_clk_scaling(dev_name(dev),
1148 (scaling_down ? "scaled down" : "scaled up"),
1149 clki->name, hba->clk_scaling.target_freq, freq);
1155 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks);
1157 static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
1159 struct dev_pm_opp *opp;
1162 opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
1165 return PTR_ERR(opp);
1167 ret = dev_pm_opp_set_opp(hba->dev, opp);
1168 dev_pm_opp_put(opp);
1174 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1175 * @hba: per adapter instance
1176 * @freq: frequency to scale
1177 * @scale_up: True if scaling up and false if scaling down
1179 * Return: 0 if successful; < 0 upon failure.
1181 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
1185 ktime_t start = ktime_get();
1187 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1191 if (hba->use_pm_opp)
1192 ret = ufshcd_opp_set_rate(hba, freq);
1194 ret = ufshcd_set_clk_freq(hba, scale_up);
1198 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1200 if (hba->use_pm_opp)
1201 ufshcd_opp_set_rate(hba,
1202 hba->devfreq->previous_freq);
1204 ufshcd_set_clk_freq(hba, !scale_up);
1208 ufshcd_pm_qos_update(hba, scale_up);
1211 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1212 (scale_up ? "up" : "down"),
1213 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1218 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1219 * @hba: per adapter instance
1220 * @freq: frequency to scale
1221 * @scale_up: True if scaling up and false if scaling down
1223 * Return: true if scaling is required, false otherwise.
1225 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1226 unsigned long freq, bool scale_up)
1228 struct ufs_clk_info *clki;
1229 struct list_head *head = &hba->clk_list_head;
1231 if (list_empty(head))
1234 if (hba->use_pm_opp)
1235 return freq != hba->clk_scaling.target_freq;
1237 list_for_each_entry(clki, head, list) {
1238 if (!IS_ERR_OR_NULL(clki->clk)) {
1239 if (scale_up && clki->max_freq) {
1240 if (clki->curr_freq == clki->max_freq)
1243 } else if (!scale_up && clki->min_freq) {
1244 if (clki->curr_freq == clki->min_freq)
1255 * Determine the number of pending commands by counting the bits in the SCSI
1256 * device budget maps. This approach has been selected because a bit is set in
1257 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1258 * flag. The host_self_blocked flag can be modified by calling
1259 * scsi_block_requests() or scsi_unblock_requests().
1261 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1263 const struct scsi_device *sdev;
1266 lockdep_assert_held(hba->host->host_lock);
1267 __shost_for_each_device(sdev, hba->host)
1268 pending += sbitmap_weight(&sdev->budget_map);
1274 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1277 * Return: 0 upon success; -EBUSY upon timeout.
1279 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1280 u64 wait_timeout_us)
1282 unsigned long flags;
1286 bool timeout = false, do_last_check = false;
1290 spin_lock_irqsave(hba->host->host_lock, flags);
1292 * Wait for all the outstanding tasks/transfer requests.
1293 * Verify by checking the doorbell registers are clear.
1295 start = ktime_get();
1297 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1302 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1303 tr_pending = ufshcd_pending_cmds(hba);
1304 if (!tm_doorbell && !tr_pending) {
1307 } else if (do_last_check) {
1311 spin_unlock_irqrestore(hba->host->host_lock, flags);
1312 io_schedule_timeout(msecs_to_jiffies(20));
1313 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1317 * We might have scheduled out for long time so make
1318 * sure to check if doorbells are cleared by this time
1321 do_last_check = true;
1323 spin_lock_irqsave(hba->host->host_lock, flags);
1324 } while (tm_doorbell || tr_pending);
1328 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1329 __func__, tm_doorbell, tr_pending);
1333 spin_unlock_irqrestore(hba->host->host_lock, flags);
1334 ufshcd_release(hba);
1339 * ufshcd_scale_gear - scale up/down UFS gear
1340 * @hba: per adapter instance
1341 * @scale_up: True for scaling up gear and false for scaling down
1343 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1344 * non-zero for any other errors.
1346 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1349 struct ufs_pa_layer_attr new_pwr_info;
1352 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1353 sizeof(struct ufs_pa_layer_attr));
1355 memcpy(&new_pwr_info, &hba->pwr_info,
1356 sizeof(struct ufs_pa_layer_attr));
1358 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1359 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1360 /* save the current power mode */
1361 memcpy(&hba->clk_scaling.saved_pwr_info,
1363 sizeof(struct ufs_pa_layer_attr));
1365 /* scale down gear */
1366 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1367 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1371 /* check if the power mode needs to be changed or not? */
1372 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1374 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1376 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1377 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1383 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1386 * Return: 0 upon success; -EBUSY upon timeout.
1388 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1392 * make sure that there are no outstanding requests when
1393 * clock scaling is in progress
1395 ufshcd_scsi_block_requests(hba);
1396 mutex_lock(&hba->wb_mutex);
1397 down_write(&hba->clk_scaling_lock);
1399 if (!hba->clk_scaling.is_allowed ||
1400 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1402 up_write(&hba->clk_scaling_lock);
1403 mutex_unlock(&hba->wb_mutex);
1404 ufshcd_scsi_unblock_requests(hba);
1408 /* let's not get into low power until clock scaling is completed */
1415 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
1417 up_write(&hba->clk_scaling_lock);
1419 /* Enable Write Booster if we have scaled up else disable it */
1420 if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1421 ufshcd_wb_toggle(hba, scale_up);
1423 mutex_unlock(&hba->wb_mutex);
1425 ufshcd_scsi_unblock_requests(hba);
1426 ufshcd_release(hba);
1430 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1431 * @hba: per adapter instance
1432 * @freq: frequency to scale
1433 * @scale_up: True for scaling up and false for scalin down
1435 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1436 * for any other errors.
1438 static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
1443 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1447 /* scale down the gear before scaling down clocks */
1449 ret = ufshcd_scale_gear(hba, false);
1454 ret = ufshcd_scale_clks(hba, freq, scale_up);
1457 ufshcd_scale_gear(hba, true);
1461 /* scale up the gear after scaling up clocks */
1463 ret = ufshcd_scale_gear(hba, true);
1465 ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
1472 ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
1476 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1478 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1479 clk_scaling.suspend_work);
1480 unsigned long irq_flags;
1482 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1483 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1484 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1487 hba->clk_scaling.is_suspended = true;
1488 hba->clk_scaling.window_start_t = 0;
1489 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1491 devfreq_suspend_device(hba->devfreq);
1494 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1496 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1497 clk_scaling.resume_work);
1498 unsigned long irq_flags;
1500 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1501 if (!hba->clk_scaling.is_suspended) {
1502 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1505 hba->clk_scaling.is_suspended = false;
1506 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1508 devfreq_resume_device(hba->devfreq);
1511 static int ufshcd_devfreq_target(struct device *dev,
1512 unsigned long *freq, u32 flags)
1515 struct ufs_hba *hba = dev_get_drvdata(dev);
1517 bool scale_up = false, sched_clk_scaling_suspend_work = false;
1518 struct list_head *clk_list = &hba->clk_list_head;
1519 struct ufs_clk_info *clki;
1520 unsigned long irq_flags;
1522 if (!ufshcd_is_clkscaling_supported(hba))
1525 if (hba->use_pm_opp) {
1526 struct dev_pm_opp *opp;
1528 /* Get the recommended frequency from OPP framework */
1529 opp = devfreq_recommended_opp(dev, freq, flags);
1531 return PTR_ERR(opp);
1533 dev_pm_opp_put(opp);
1535 /* Override with the closest supported frequency */
1536 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
1538 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1541 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1542 if (ufshcd_eh_in_progress(hba)) {
1543 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1547 /* Skip scaling clock when clock scaling is suspended */
1548 if (hba->clk_scaling.is_suspended) {
1549 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1550 dev_warn(hba->dev, "clock scaling is suspended, skip");
1554 if (!hba->clk_scaling.active_reqs)
1555 sched_clk_scaling_suspend_work = true;
1557 if (list_empty(clk_list)) {
1558 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1562 /* Decide based on the target or rounded-off frequency and update */
1563 if (hba->use_pm_opp)
1564 scale_up = *freq > hba->clk_scaling.target_freq;
1566 scale_up = *freq == clki->max_freq;
1568 if (!hba->use_pm_opp && !scale_up)
1569 *freq = clki->min_freq;
1571 /* Update the frequency */
1572 if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
1573 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1575 goto out; /* no state change required */
1577 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1579 start = ktime_get();
1580 ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
1582 hba->clk_scaling.target_freq = *freq;
1584 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1585 (scale_up ? "up" : "down"),
1586 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1589 if (sched_clk_scaling_suspend_work && !scale_up)
1590 queue_work(hba->clk_scaling.workq,
1591 &hba->clk_scaling.suspend_work);
1596 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1597 struct devfreq_dev_status *stat)
1599 struct ufs_hba *hba = dev_get_drvdata(dev);
1600 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1601 unsigned long flags;
1604 if (!ufshcd_is_clkscaling_supported(hba))
1607 memset(stat, 0, sizeof(*stat));
1609 spin_lock_irqsave(hba->host->host_lock, flags);
1610 curr_t = ktime_get();
1611 if (!scaling->window_start_t)
1615 * If current frequency is 0, then the ondemand governor considers
1616 * there's no initial frequency set. And it always requests to set
1617 * to max. frequency.
1619 if (hba->use_pm_opp) {
1620 stat->current_frequency = hba->clk_scaling.target_freq;
1622 struct list_head *clk_list = &hba->clk_list_head;
1623 struct ufs_clk_info *clki;
1625 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1626 stat->current_frequency = clki->curr_freq;
1629 if (scaling->is_busy_started)
1630 scaling->tot_busy_t += ktime_us_delta(curr_t,
1631 scaling->busy_start_t);
1632 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1633 stat->busy_time = scaling->tot_busy_t;
1635 scaling->window_start_t = curr_t;
1636 scaling->tot_busy_t = 0;
1638 if (scaling->active_reqs) {
1639 scaling->busy_start_t = curr_t;
1640 scaling->is_busy_started = true;
1642 scaling->busy_start_t = 0;
1643 scaling->is_busy_started = false;
1645 spin_unlock_irqrestore(hba->host->host_lock, flags);
1649 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1651 struct list_head *clk_list = &hba->clk_list_head;
1652 struct ufs_clk_info *clki;
1653 struct devfreq *devfreq;
1656 /* Skip devfreq if we don't have any clocks in the list */
1657 if (list_empty(clk_list))
1660 if (!hba->use_pm_opp) {
1661 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1662 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1663 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1666 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1667 &hba->vps->ondemand_data);
1668 devfreq = devfreq_add_device(hba->dev,
1669 &hba->vps->devfreq_profile,
1670 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1671 &hba->vps->ondemand_data);
1672 if (IS_ERR(devfreq)) {
1673 ret = PTR_ERR(devfreq);
1674 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1676 if (!hba->use_pm_opp) {
1677 dev_pm_opp_remove(hba->dev, clki->min_freq);
1678 dev_pm_opp_remove(hba->dev, clki->max_freq);
1683 hba->devfreq = devfreq;
1688 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1690 struct list_head *clk_list = &hba->clk_list_head;
1695 devfreq_remove_device(hba->devfreq);
1696 hba->devfreq = NULL;
1698 if (!hba->use_pm_opp) {
1699 struct ufs_clk_info *clki;
1701 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1702 dev_pm_opp_remove(hba->dev, clki->min_freq);
1703 dev_pm_opp_remove(hba->dev, clki->max_freq);
1707 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1709 unsigned long flags;
1710 bool suspend = false;
1712 cancel_work_sync(&hba->clk_scaling.suspend_work);
1713 cancel_work_sync(&hba->clk_scaling.resume_work);
1715 spin_lock_irqsave(hba->host->host_lock, flags);
1716 if (!hba->clk_scaling.is_suspended) {
1718 hba->clk_scaling.is_suspended = true;
1719 hba->clk_scaling.window_start_t = 0;
1721 spin_unlock_irqrestore(hba->host->host_lock, flags);
1724 devfreq_suspend_device(hba->devfreq);
1727 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1729 unsigned long flags;
1730 bool resume = false;
1732 spin_lock_irqsave(hba->host->host_lock, flags);
1733 if (hba->clk_scaling.is_suspended) {
1735 hba->clk_scaling.is_suspended = false;
1737 spin_unlock_irqrestore(hba->host->host_lock, flags);
1740 devfreq_resume_device(hba->devfreq);
1743 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1744 struct device_attribute *attr, char *buf)
1746 struct ufs_hba *hba = dev_get_drvdata(dev);
1748 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1751 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1752 struct device_attribute *attr, const char *buf, size_t count)
1754 struct ufs_hba *hba = dev_get_drvdata(dev);
1758 if (kstrtou32(buf, 0, &value))
1761 down(&hba->host_sem);
1762 if (!ufshcd_is_user_access_allowed(hba)) {
1768 if (value == hba->clk_scaling.is_enabled)
1771 ufshcd_rpm_get_sync(hba);
1774 hba->clk_scaling.is_enabled = value;
1777 ufshcd_resume_clkscaling(hba);
1779 ufshcd_suspend_clkscaling(hba);
1780 err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
1782 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1786 ufshcd_release(hba);
1787 ufshcd_rpm_put_sync(hba);
1790 return err ? err : count;
1793 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1795 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1796 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1797 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1798 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1799 hba->clk_scaling.enable_attr.attr.mode = 0644;
1800 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1801 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1804 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1806 if (hba->clk_scaling.enable_attr.attr.name)
1807 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1810 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1812 char wq_name[sizeof("ufs_clkscaling_00")];
1814 if (!ufshcd_is_clkscaling_supported(hba))
1817 if (!hba->clk_scaling.min_gear)
1818 hba->clk_scaling.min_gear = UFS_HS_G1;
1820 INIT_WORK(&hba->clk_scaling.suspend_work,
1821 ufshcd_clk_scaling_suspend_work);
1822 INIT_WORK(&hba->clk_scaling.resume_work,
1823 ufshcd_clk_scaling_resume_work);
1825 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1826 hba->host->host_no);
1827 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1829 hba->clk_scaling.is_initialized = true;
1832 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1834 if (!hba->clk_scaling.is_initialized)
1837 ufshcd_remove_clk_scaling_sysfs(hba);
1838 destroy_workqueue(hba->clk_scaling.workq);
1839 ufshcd_devfreq_remove(hba);
1840 hba->clk_scaling.is_initialized = false;
1843 static void ufshcd_ungate_work(struct work_struct *work)
1846 unsigned long flags;
1847 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1848 clk_gating.ungate_work);
1850 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1852 spin_lock_irqsave(hba->host->host_lock, flags);
1853 if (hba->clk_gating.state == CLKS_ON) {
1854 spin_unlock_irqrestore(hba->host->host_lock, flags);
1858 spin_unlock_irqrestore(hba->host->host_lock, flags);
1859 ufshcd_hba_vreg_set_hpm(hba);
1860 ufshcd_setup_clocks(hba, true);
1862 ufshcd_enable_irq(hba);
1864 /* Exit from hibern8 */
1865 if (ufshcd_can_hibern8_during_gating(hba)) {
1866 /* Prevent gating in this path */
1867 hba->clk_gating.is_suspended = true;
1868 if (ufshcd_is_link_hibern8(hba)) {
1869 ret = ufshcd_uic_hibern8_exit(hba);
1871 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1874 ufshcd_set_link_active(hba);
1876 hba->clk_gating.is_suspended = false;
1881 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1882 * Also, exit from hibern8 mode and set the link as active.
1883 * @hba: per adapter instance
1885 void ufshcd_hold(struct ufs_hba *hba)
1888 unsigned long flags;
1890 if (!ufshcd_is_clkgating_allowed(hba) ||
1891 !hba->clk_gating.is_initialized)
1893 spin_lock_irqsave(hba->host->host_lock, flags);
1894 hba->clk_gating.active_reqs++;
1897 switch (hba->clk_gating.state) {
1900 * Wait for the ungate work to complete if in progress.
1901 * Though the clocks may be in ON state, the link could
1902 * still be in hibner8 state if hibern8 is allowed
1903 * during clock gating.
1904 * Make sure we exit hibern8 state also in addition to
1907 if (ufshcd_can_hibern8_during_gating(hba) &&
1908 ufshcd_is_link_hibern8(hba)) {
1909 spin_unlock_irqrestore(hba->host->host_lock, flags);
1910 flush_result = flush_work(&hba->clk_gating.ungate_work);
1911 if (hba->clk_gating.is_suspended && !flush_result)
1913 spin_lock_irqsave(hba->host->host_lock, flags);
1918 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1919 hba->clk_gating.state = CLKS_ON;
1920 trace_ufshcd_clk_gating(dev_name(hba->dev),
1921 hba->clk_gating.state);
1925 * If we are here, it means gating work is either done or
1926 * currently running. Hence, fall through to cancel gating
1927 * work and to enable clocks.
1931 hba->clk_gating.state = REQ_CLKS_ON;
1932 trace_ufshcd_clk_gating(dev_name(hba->dev),
1933 hba->clk_gating.state);
1934 queue_work(hba->clk_gating.clk_gating_workq,
1935 &hba->clk_gating.ungate_work);
1937 * fall through to check if we should wait for this
1938 * work to be done or not.
1942 spin_unlock_irqrestore(hba->host->host_lock, flags);
1943 flush_work(&hba->clk_gating.ungate_work);
1944 /* Make sure state is CLKS_ON before returning */
1945 spin_lock_irqsave(hba->host->host_lock, flags);
1948 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1949 __func__, hba->clk_gating.state);
1952 spin_unlock_irqrestore(hba->host->host_lock, flags);
1954 EXPORT_SYMBOL_GPL(ufshcd_hold);
1956 static void ufshcd_gate_work(struct work_struct *work)
1958 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1959 clk_gating.gate_work.work);
1960 unsigned long flags;
1963 spin_lock_irqsave(hba->host->host_lock, flags);
1965 * In case you are here to cancel this work the gating state
1966 * would be marked as REQ_CLKS_ON. In this case save time by
1967 * skipping the gating work and exit after changing the clock
1970 if (hba->clk_gating.is_suspended ||
1971 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1972 hba->clk_gating.state = CLKS_ON;
1973 trace_ufshcd_clk_gating(dev_name(hba->dev),
1974 hba->clk_gating.state);
1978 if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1981 spin_unlock_irqrestore(hba->host->host_lock, flags);
1983 /* put the link into hibern8 mode before turning off clocks */
1984 if (ufshcd_can_hibern8_during_gating(hba)) {
1985 ret = ufshcd_uic_hibern8_enter(hba);
1987 hba->clk_gating.state = CLKS_ON;
1988 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1990 trace_ufshcd_clk_gating(dev_name(hba->dev),
1991 hba->clk_gating.state);
1994 ufshcd_set_link_hibern8(hba);
1997 ufshcd_disable_irq(hba);
1999 ufshcd_setup_clocks(hba, false);
2001 /* Put the host controller in low power mode if possible */
2002 ufshcd_hba_vreg_set_lpm(hba);
2004 * In case you are here to cancel this work the gating state
2005 * would be marked as REQ_CLKS_ON. In this case keep the state
2006 * as REQ_CLKS_ON which would anyway imply that clocks are off
2007 * and a request to turn them on is pending. By doing this way,
2008 * we keep the state machine in tact and this would ultimately
2009 * prevent from doing cancel work multiple times when there are
2010 * new requests arriving before the current cancel work is done.
2012 spin_lock_irqsave(hba->host->host_lock, flags);
2013 if (hba->clk_gating.state == REQ_CLKS_OFF) {
2014 hba->clk_gating.state = CLKS_OFF;
2015 trace_ufshcd_clk_gating(dev_name(hba->dev),
2016 hba->clk_gating.state);
2019 spin_unlock_irqrestore(hba->host->host_lock, flags);
2024 /* host lock must be held before calling this variant */
2025 static void __ufshcd_release(struct ufs_hba *hba)
2027 if (!ufshcd_is_clkgating_allowed(hba))
2030 hba->clk_gating.active_reqs--;
2032 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
2033 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
2034 hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
2035 hba->active_uic_cmd || hba->uic_async_done ||
2036 hba->clk_gating.state == CLKS_OFF)
2039 hba->clk_gating.state = REQ_CLKS_OFF;
2040 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
2041 queue_delayed_work(hba->clk_gating.clk_gating_workq,
2042 &hba->clk_gating.gate_work,
2043 msecs_to_jiffies(hba->clk_gating.delay_ms));
2046 void ufshcd_release(struct ufs_hba *hba)
2048 unsigned long flags;
2050 spin_lock_irqsave(hba->host->host_lock, flags);
2051 __ufshcd_release(hba);
2052 spin_unlock_irqrestore(hba->host->host_lock, flags);
2054 EXPORT_SYMBOL_GPL(ufshcd_release);
2056 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
2057 struct device_attribute *attr, char *buf)
2059 struct ufs_hba *hba = dev_get_drvdata(dev);
2061 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
2064 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
2066 struct ufs_hba *hba = dev_get_drvdata(dev);
2067 unsigned long flags;
2069 spin_lock_irqsave(hba->host->host_lock, flags);
2070 hba->clk_gating.delay_ms = value;
2071 spin_unlock_irqrestore(hba->host->host_lock, flags);
2073 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
2075 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
2076 struct device_attribute *attr, const char *buf, size_t count)
2078 unsigned long value;
2080 if (kstrtoul(buf, 0, &value))
2083 ufshcd_clkgate_delay_set(dev, value);
2087 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
2088 struct device_attribute *attr, char *buf)
2090 struct ufs_hba *hba = dev_get_drvdata(dev);
2092 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
2095 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
2096 struct device_attribute *attr, const char *buf, size_t count)
2098 struct ufs_hba *hba = dev_get_drvdata(dev);
2099 unsigned long flags;
2102 if (kstrtou32(buf, 0, &value))
2107 spin_lock_irqsave(hba->host->host_lock, flags);
2108 if (value == hba->clk_gating.is_enabled)
2112 __ufshcd_release(hba);
2114 hba->clk_gating.active_reqs++;
2116 hba->clk_gating.is_enabled = value;
2118 spin_unlock_irqrestore(hba->host->host_lock, flags);
2122 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
2124 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
2125 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
2126 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
2127 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
2128 hba->clk_gating.delay_attr.attr.mode = 0644;
2129 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
2130 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
2132 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
2133 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
2134 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
2135 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
2136 hba->clk_gating.enable_attr.attr.mode = 0644;
2137 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
2138 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
2141 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
2143 if (hba->clk_gating.delay_attr.attr.name)
2144 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2145 if (hba->clk_gating.enable_attr.attr.name)
2146 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
2149 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2151 char wq_name[sizeof("ufs_clk_gating_00")];
2153 if (!ufshcd_is_clkgating_allowed(hba))
2156 hba->clk_gating.state = CLKS_ON;
2158 hba->clk_gating.delay_ms = 150;
2159 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2160 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2162 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2163 hba->host->host_no);
2164 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
2165 WQ_MEM_RECLAIM | WQ_HIGHPRI);
2167 ufshcd_init_clk_gating_sysfs(hba);
2169 hba->clk_gating.is_enabled = true;
2170 hba->clk_gating.is_initialized = true;
2173 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2175 if (!hba->clk_gating.is_initialized)
2178 ufshcd_remove_clk_gating_sysfs(hba);
2180 /* Ungate the clock if necessary. */
2182 hba->clk_gating.is_initialized = false;
2183 ufshcd_release(hba);
2185 destroy_workqueue(hba->clk_gating.clk_gating_workq);
2188 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2190 bool queue_resume_work = false;
2191 ktime_t curr_t = ktime_get();
2192 unsigned long flags;
2194 if (!ufshcd_is_clkscaling_supported(hba))
2197 spin_lock_irqsave(hba->host->host_lock, flags);
2198 if (!hba->clk_scaling.active_reqs++)
2199 queue_resume_work = true;
2201 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2202 spin_unlock_irqrestore(hba->host->host_lock, flags);
2206 if (queue_resume_work)
2207 queue_work(hba->clk_scaling.workq,
2208 &hba->clk_scaling.resume_work);
2210 if (!hba->clk_scaling.window_start_t) {
2211 hba->clk_scaling.window_start_t = curr_t;
2212 hba->clk_scaling.tot_busy_t = 0;
2213 hba->clk_scaling.is_busy_started = false;
2216 if (!hba->clk_scaling.is_busy_started) {
2217 hba->clk_scaling.busy_start_t = curr_t;
2218 hba->clk_scaling.is_busy_started = true;
2220 spin_unlock_irqrestore(hba->host->host_lock, flags);
2223 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2225 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2226 unsigned long flags;
2228 if (!ufshcd_is_clkscaling_supported(hba))
2231 spin_lock_irqsave(hba->host->host_lock, flags);
2232 hba->clk_scaling.active_reqs--;
2233 if (!scaling->active_reqs && scaling->is_busy_started) {
2234 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2235 scaling->busy_start_t));
2236 scaling->busy_start_t = 0;
2237 scaling->is_busy_started = false;
2239 spin_unlock_irqrestore(hba->host->host_lock, flags);
2242 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2244 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2246 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2252 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2253 struct ufshcd_lrb *lrbp)
2255 const struct ufs_hba_monitor *m = &hba->monitor;
2257 return (m->enabled && lrbp && lrbp->cmd &&
2258 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2259 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2262 static void ufshcd_start_monitor(struct ufs_hba *hba,
2263 const struct ufshcd_lrb *lrbp)
2265 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2266 unsigned long flags;
2268 spin_lock_irqsave(hba->host->host_lock, flags);
2269 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2270 hba->monitor.busy_start_ts[dir] = ktime_get();
2271 spin_unlock_irqrestore(hba->host->host_lock, flags);
2274 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2276 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2277 unsigned long flags;
2279 spin_lock_irqsave(hba->host->host_lock, flags);
2280 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2281 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2282 struct ufs_hba_monitor *m = &hba->monitor;
2283 ktime_t now, inc, lat;
2285 now = lrbp->compl_time_stamp;
2286 inc = ktime_sub(now, m->busy_start_ts[dir]);
2287 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2288 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2290 /* Update latencies */
2292 lat = ktime_sub(now, lrbp->issue_time_stamp);
2293 m->lat_sum[dir] += lat;
2294 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2295 m->lat_max[dir] = lat;
2296 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2297 m->lat_min[dir] = lat;
2299 m->nr_queued[dir]--;
2300 /* Push forward the busy start of monitor */
2301 m->busy_start_ts[dir] = now;
2303 spin_unlock_irqrestore(hba->host->host_lock, flags);
2307 * ufshcd_send_command - Send SCSI or device management commands
2308 * @hba: per adapter instance
2309 * @task_tag: Task tag of the command
2310 * @hwq: pointer to hardware queue instance
2313 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2314 struct ufs_hw_queue *hwq)
2316 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2317 unsigned long flags;
2319 lrbp->issue_time_stamp = ktime_get();
2320 lrbp->issue_time_stamp_local_clock = local_clock();
2321 lrbp->compl_time_stamp = ktime_set(0, 0);
2322 lrbp->compl_time_stamp_local_clock = 0;
2323 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2325 ufshcd_clk_scaling_start_busy(hba);
2326 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2327 ufshcd_start_monitor(hba, lrbp);
2329 if (is_mcq_enabled(hba)) {
2330 int utrd_size = sizeof(struct utp_transfer_req_desc);
2331 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2332 struct utp_transfer_req_desc *dest;
2334 spin_lock(&hwq->sq_lock);
2335 dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2336 memcpy(dest, src, utrd_size);
2337 ufshcd_inc_sq_tail(hwq);
2338 spin_unlock(&hwq->sq_lock);
2340 spin_lock_irqsave(&hba->outstanding_lock, flags);
2341 if (hba->vops && hba->vops->setup_xfer_req)
2342 hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2344 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2345 ufshcd_writel(hba, 1 << lrbp->task_tag,
2346 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2347 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2352 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2353 * @lrbp: pointer to local reference block
2355 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2357 u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2361 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
2362 if (sense_buffer && resp_len) {
2365 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2366 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2368 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2374 * ufshcd_copy_query_response() - Copy the Query Response and the data
2376 * @hba: per adapter instance
2377 * @lrbp: pointer to local reference block
2379 * Return: 0 upon success; < 0 upon failure.
2382 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2384 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2386 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2388 /* Get the descriptor */
2389 if (hba->dev_cmd.query.descriptor &&
2390 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2391 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2392 GENERAL_UPIU_REQUEST_SIZE;
2396 /* data segment length */
2397 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
2398 .data_segment_length);
2399 buf_len = be16_to_cpu(
2400 hba->dev_cmd.query.request.upiu_req.length);
2401 if (likely(buf_len >= resp_len)) {
2402 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2405 "%s: rsp size %d is bigger than buffer size %d",
2406 __func__, resp_len, buf_len);
2415 * ufshcd_hba_capabilities - Read controller capabilities
2416 * @hba: per adapter instance
2418 * Return: 0 on success, negative on error.
2420 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2424 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2425 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
2426 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
2428 /* nutrs and nutmrs are 0 based values */
2429 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2431 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2432 hba->reserved_slot = hba->nutrs - 1;
2434 /* Read crypto capabilities */
2435 err = ufshcd_hba_init_crypto_capabilities(hba);
2437 dev_err(hba->dev, "crypto setup failed\n");
2441 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2445 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2446 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2447 hba->mcq_capabilities);
2453 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2454 * to accept UIC commands
2455 * @hba: per adapter instance
2457 * Return: true on success, else false.
2459 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2462 int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2463 500, UIC_CMD_TIMEOUT * 1000, false, hba,
2464 REG_CONTROLLER_STATUS);
2469 * ufshcd_get_upmcrs - Get the power mode change request status
2470 * @hba: Pointer to adapter instance
2472 * This function gets the UPMCRS field of HCS register
2474 * Return: value of UPMCRS field.
2476 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2478 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2482 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2483 * @hba: per adapter instance
2484 * @uic_cmd: UIC command
2487 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2489 lockdep_assert_held(&hba->uic_cmd_mutex);
2491 WARN_ON(hba->active_uic_cmd);
2493 hba->active_uic_cmd = uic_cmd;
2496 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2497 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2498 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2500 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2503 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2508 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2509 * @hba: per adapter instance
2510 * @uic_cmd: UIC command
2512 * Return: 0 only if success.
2515 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2518 unsigned long flags;
2520 lockdep_assert_held(&hba->uic_cmd_mutex);
2522 if (wait_for_completion_timeout(&uic_cmd->done,
2523 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2524 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2528 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2529 uic_cmd->command, uic_cmd->argument3);
2531 if (!uic_cmd->cmd_active) {
2532 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2534 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2538 spin_lock_irqsave(hba->host->host_lock, flags);
2539 hba->active_uic_cmd = NULL;
2540 spin_unlock_irqrestore(hba->host->host_lock, flags);
2546 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2547 * @hba: per adapter instance
2548 * @uic_cmd: UIC command
2549 * @completion: initialize the completion only if this is set to true
2551 * Return: 0 only if success.
2554 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2557 lockdep_assert_held(&hba->uic_cmd_mutex);
2559 if (!ufshcd_ready_for_uic_cmd(hba)) {
2561 "Controller not ready to accept UIC commands\n");
2566 init_completion(&uic_cmd->done);
2568 uic_cmd->cmd_active = 1;
2569 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2575 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2576 * @hba: per adapter instance
2577 * @uic_cmd: UIC command
2579 * Return: 0 only if success.
2581 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2585 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2589 mutex_lock(&hba->uic_cmd_mutex);
2590 ufshcd_add_delay_before_dme_cmd(hba);
2592 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2594 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2596 mutex_unlock(&hba->uic_cmd_mutex);
2598 ufshcd_release(hba);
2603 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2604 * @hba: per-adapter instance
2605 * @lrbp: pointer to local reference block
2606 * @sg_entries: The number of sg lists actually used
2607 * @sg_list: Pointer to SG list
2609 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2610 struct scatterlist *sg_list)
2612 struct ufshcd_sg_entry *prd;
2613 struct scatterlist *sg;
2618 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2619 lrbp->utr_descriptor_ptr->prd_table_length =
2620 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2622 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2624 prd = lrbp->ucd_prdt_ptr;
2626 for_each_sg(sg_list, sg, sg_entries, i) {
2627 const unsigned int len = sg_dma_len(sg);
2630 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2631 * based value that indicates the length, in bytes, of
2632 * the data block. A maximum of length of 256KB may
2633 * exist for any entry. Bits 1:0 of this field shall be
2634 * 11b to indicate Dword granularity. A value of '3'
2635 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2637 WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
2638 prd->size = cpu_to_le32(len - 1);
2639 prd->addr = cpu_to_le64(sg->dma_address);
2641 prd = (void *)prd + ufshcd_sg_entry_size(hba);
2644 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2649 * ufshcd_map_sg - Map scatter-gather list to prdt
2650 * @hba: per adapter instance
2651 * @lrbp: pointer to local reference block
2653 * Return: 0 in case of success, non-zero value in case of failure.
2655 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2657 struct scsi_cmnd *cmd = lrbp->cmd;
2658 int sg_segments = scsi_dma_map(cmd);
2660 if (sg_segments < 0)
2663 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2669 * ufshcd_enable_intr - enable interrupts
2670 * @hba: per adapter instance
2671 * @intrs: interrupt bits
2673 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2675 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2677 if (hba->ufs_version == ufshci_version(1, 0)) {
2679 rw = set & INTERRUPT_MASK_RW_VER_10;
2680 set = rw | ((set ^ intrs) & intrs);
2685 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2689 * ufshcd_disable_intr - disable interrupts
2690 * @hba: per adapter instance
2691 * @intrs: interrupt bits
2693 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2695 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2697 if (hba->ufs_version == ufshci_version(1, 0)) {
2699 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2700 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2701 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2707 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2711 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2712 * descriptor according to request
2713 * @lrbp: pointer to local reference block
2714 * @upiu_flags: flags required in the header
2715 * @cmd_dir: requests data direction
2716 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2718 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2719 enum dma_data_direction cmd_dir, int ehs_length)
2721 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2722 struct request_desc_header *h = &req_desc->header;
2723 enum utp_data_direction data_direction;
2725 *h = (typeof(*h)){ };
2727 if (cmd_dir == DMA_FROM_DEVICE) {
2728 data_direction = UTP_DEVICE_TO_HOST;
2729 *upiu_flags = UPIU_CMD_FLAGS_READ;
2730 } else if (cmd_dir == DMA_TO_DEVICE) {
2731 data_direction = UTP_HOST_TO_DEVICE;
2732 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2734 data_direction = UTP_NO_DATA_TRANSFER;
2735 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2738 h->command_type = lrbp->command_type;
2739 h->data_direction = data_direction;
2740 h->ehs_length = ehs_length;
2745 /* Prepare crypto related dwords */
2746 ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
2749 * assigning invalid value for command status. Controller
2750 * updates OCS on command completion, with the command
2753 h->ocs = OCS_INVALID_COMMAND_STATUS;
2755 req_desc->prd_table_length = 0;
2759 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2761 * @lrbp: local reference block pointer
2762 * @upiu_flags: flags
2765 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2767 struct scsi_cmnd *cmd = lrbp->cmd;
2768 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2769 unsigned short cdb_len;
2771 ucd_req_ptr->header = (struct utp_upiu_header){
2772 .transaction_code = UPIU_TRANSACTION_COMMAND,
2773 .flags = upiu_flags,
2775 .task_tag = lrbp->task_tag,
2776 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
2779 WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
2781 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2783 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2784 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2785 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2787 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2791 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2793 * @lrbp: local reference block pointer
2794 * @upiu_flags: flags
2796 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2797 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2799 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2800 struct ufs_query *query = &hba->dev_cmd.query;
2801 u16 len = be16_to_cpu(query->request.upiu_req.length);
2803 /* Query request header */
2804 ucd_req_ptr->header = (struct utp_upiu_header){
2805 .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
2806 .flags = upiu_flags,
2808 .task_tag = lrbp->task_tag,
2809 .query_function = query->request.query_func,
2810 /* Data segment length only need for WRITE_DESC */
2811 .data_segment_length =
2812 query->request.upiu_req.opcode ==
2813 UPIU_QUERY_OPCODE_WRITE_DESC ?
2818 /* Copy the Query Request buffer as is */
2819 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2822 /* Copy the Descriptor */
2823 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2824 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2826 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2829 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2831 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2833 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2835 ucd_req_ptr->header = (struct utp_upiu_header){
2836 .transaction_code = UPIU_TRANSACTION_NOP_OUT,
2837 .task_tag = lrbp->task_tag,
2840 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2844 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2845 * for Device Management Purposes
2846 * @hba: per adapter instance
2847 * @lrbp: pointer to local reference block
2849 * Return: 0 upon success; < 0 upon failure.
2851 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2852 struct ufshcd_lrb *lrbp)
2857 if (hba->ufs_version <= ufshci_version(1, 1))
2858 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2860 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2862 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
2863 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2864 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2865 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2866 ufshcd_prepare_utp_nop_upiu(lrbp);
2874 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2876 * @hba: per adapter instance
2877 * @lrbp: pointer to local reference block
2879 static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2881 struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
2882 unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
2885 if (hba->ufs_version <= ufshci_version(1, 1))
2886 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2888 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2890 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2891 lrbp->cmd->sc_data_direction, 0);
2892 if (ioprio_class == IOPRIO_CLASS_RT)
2893 upiu_flags |= UPIU_CMD_FLAGS_CP;
2894 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2898 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2899 * @upiu_wlun_id: UPIU W-LUN id
2901 * Return: SCSI W-LUN id.
2903 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2905 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2908 static inline bool is_device_wlun(struct scsi_device *sdev)
2911 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2915 * Associate the UFS controller queue with the default and poll HCTX types.
2916 * Initialize the mq_map[] arrays.
2918 static void ufshcd_map_queues(struct Scsi_Host *shost)
2920 struct ufs_hba *hba = shost_priv(shost);
2921 int i, queue_offset = 0;
2923 if (!is_mcq_supported(hba)) {
2924 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2925 hba->nr_queues[HCTX_TYPE_READ] = 0;
2926 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2927 hba->nr_hw_queues = 1;
2930 for (i = 0; i < shost->nr_maps; i++) {
2931 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2933 map->nr_queues = hba->nr_queues[i];
2934 if (!map->nr_queues)
2936 map->queue_offset = queue_offset;
2937 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2938 map->queue_offset = 0;
2940 blk_mq_map_queues(map);
2941 queue_offset += map->nr_queues;
2945 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2947 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2948 i * ufshcd_get_ucd_size(hba);
2949 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2950 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2951 i * ufshcd_get_ucd_size(hba);
2952 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2954 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2956 lrb->utr_descriptor_ptr = utrdlp + i;
2957 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2958 i * sizeof(struct utp_transfer_req_desc);
2959 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2960 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2961 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2962 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2963 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2964 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2968 * ufshcd_queuecommand - main entry point for SCSI requests
2969 * @host: SCSI host pointer
2970 * @cmd: command from SCSI Midlayer
2972 * Return: 0 for success, non-zero in case of failure.
2974 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2976 struct ufs_hba *hba = shost_priv(host);
2977 int tag = scsi_cmd_to_rq(cmd)->tag;
2978 struct ufshcd_lrb *lrbp;
2980 struct ufs_hw_queue *hwq = NULL;
2982 switch (hba->ufshcd_state) {
2983 case UFSHCD_STATE_OPERATIONAL:
2985 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2987 * SCSI error handler can call ->queuecommand() while UFS error
2988 * handler is in progress. Error interrupts could change the
2989 * state from UFSHCD_STATE_RESET to
2990 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2991 * being issued in that case.
2993 if (ufshcd_eh_in_progress(hba)) {
2994 err = SCSI_MLQUEUE_HOST_BUSY;
2998 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
3000 * pm_runtime_get_sync() is used at error handling preparation
3001 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
3002 * PM ops, it can never be finished if we let SCSI layer keep
3003 * retrying it, which gets err handler stuck forever. Neither
3004 * can we let the scsi cmd pass through, because UFS is in bad
3005 * state, the scsi cmd may eventually time out, which will get
3006 * err handler blocked for too long. So, just fail the scsi cmd
3007 * sent from PM ops, err handler can recover PM error anyways.
3009 if (hba->pm_op_in_progress) {
3010 hba->force_reset = true;
3011 set_host_byte(cmd, DID_BAD_TARGET);
3016 case UFSHCD_STATE_RESET:
3017 err = SCSI_MLQUEUE_HOST_BUSY;
3019 case UFSHCD_STATE_ERROR:
3020 set_host_byte(cmd, DID_ERROR);
3025 hba->req_abort_count = 0;
3029 lrbp = &hba->lrb[tag];
3031 lrbp->task_tag = tag;
3032 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
3033 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
3035 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
3037 lrbp->req_abort_skip = false;
3039 ufshcd_comp_scsi_upiu(hba, lrbp);
3041 err = ufshcd_map_sg(hba, lrbp);
3043 ufshcd_release(hba);
3047 if (is_mcq_enabled(hba))
3048 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
3050 ufshcd_send_command(hba, tag, hwq);
3053 if (ufs_trigger_eh(hba)) {
3054 unsigned long flags;
3056 spin_lock_irqsave(hba->host->host_lock, flags);
3057 ufshcd_schedule_eh_work(hba);
3058 spin_unlock_irqrestore(hba->host->host_lock, flags);
3064 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3065 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3068 lrbp->task_tag = tag;
3069 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
3070 lrbp->intr_cmd = true; /* No interrupt aggregation */
3071 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
3072 hba->dev_cmd.type = cmd_type;
3074 return ufshcd_compose_devman_upiu(hba, lrbp);
3078 * Check with the block layer if the command is inflight
3079 * @cmd: command to check.
3081 * Return: true if command is inflight; false if not.
3083 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
3090 rq = scsi_cmd_to_rq(cmd);
3091 if (!blk_mq_request_started(rq))
3098 * Clear the pending command in the controller and wait until
3099 * the controller confirms that the command has been cleared.
3100 * @hba: per adapter instance
3101 * @task_tag: The tag number of the command to be cleared.
3103 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
3106 unsigned long flags;
3109 if (is_mcq_enabled(hba)) {
3111 * MCQ mode. Clean up the MCQ resources similar to
3112 * what the ufshcd_utrl_clear() does for SDB mode.
3114 err = ufshcd_mcq_sq_cleanup(hba, task_tag);
3116 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
3117 __func__, task_tag, err);
3123 mask = 1U << task_tag;
3125 /* clear outstanding transaction before retry */
3126 spin_lock_irqsave(hba->host->host_lock, flags);
3127 ufshcd_utrl_clear(hba, mask);
3128 spin_unlock_irqrestore(hba->host->host_lock, flags);
3131 * wait for h/w to clear corresponding bit in door-bell.
3132 * max. wait is 1 sec.
3134 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
3135 mask, ~mask, 1000, 1000);
3139 * ufshcd_dev_cmd_completion() - handles device management command responses
3140 * @hba: per adapter instance
3141 * @lrbp: pointer to local reference block
3143 * Return: 0 upon success; < 0 upon failure.
3146 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3148 enum upiu_response_transaction resp;
3151 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3152 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3155 case UPIU_TRANSACTION_NOP_IN:
3156 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3158 dev_err(hba->dev, "%s: unexpected response %x\n",
3162 case UPIU_TRANSACTION_QUERY_RSP: {
3163 u8 response = lrbp->ucd_rsp_ptr->header.response;
3166 err = ufshcd_copy_query_response(hba, lrbp);
3169 case UPIU_TRANSACTION_REJECT_UPIU:
3170 /* TODO: handle Reject UPIU Response */
3172 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3175 case UPIU_TRANSACTION_RESPONSE:
3176 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3178 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3183 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3191 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3192 struct ufshcd_lrb *lrbp, int max_timeout)
3194 unsigned long time_left = msecs_to_jiffies(max_timeout);
3195 unsigned long flags;
3200 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3203 if (likely(time_left)) {
3205 * The completion handler called complete() and the caller of
3206 * this function still owns the @lrbp tag so the code below does
3207 * not trigger any race conditions.
3209 hba->dev_cmd.complete = NULL;
3210 err = ufshcd_get_tr_ocs(lrbp, NULL);
3212 err = ufshcd_dev_cmd_completion(hba, lrbp);
3215 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3216 __func__, lrbp->task_tag);
3219 if (is_mcq_enabled(hba)) {
3220 /* successfully cleared the command, retry if needed */
3221 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
3223 hba->dev_cmd.complete = NULL;
3228 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
3229 /* successfully cleared the command, retry if needed */
3232 * Since clearing the command succeeded we also need to
3233 * clear the task tag bit from the outstanding_reqs
3236 spin_lock_irqsave(&hba->outstanding_lock, flags);
3237 pending = test_bit(lrbp->task_tag,
3238 &hba->outstanding_reqs);
3240 hba->dev_cmd.complete = NULL;
3241 __clear_bit(lrbp->task_tag,
3242 &hba->outstanding_reqs);
3244 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3248 * The completion handler ran while we tried to
3249 * clear the command.
3255 dev_err(hba->dev, "%s: failed to clear tag %d\n",
3256 __func__, lrbp->task_tag);
3258 spin_lock_irqsave(&hba->outstanding_lock, flags);
3259 pending = test_bit(lrbp->task_tag,
3260 &hba->outstanding_reqs);
3262 hba->dev_cmd.complete = NULL;
3263 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3267 * The completion handler ran while we tried to
3268 * clear the command.
3280 * ufshcd_exec_dev_cmd - API for sending device management requests
3282 * @cmd_type: specifies the type (NOP, Query...)
3283 * @timeout: timeout in milliseconds
3285 * Return: 0 upon success; < 0 upon failure.
3287 * NOTE: Since there is only one available tag for device management commands,
3288 * it is expected you hold the hba->dev_cmd.lock mutex.
3290 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3291 enum dev_cmd_type cmd_type, int timeout)
3293 DECLARE_COMPLETION_ONSTACK(wait);
3294 const u32 tag = hba->reserved_slot;
3295 struct ufshcd_lrb *lrbp;
3298 /* Protects use of hba->reserved_slot. */
3299 lockdep_assert_held(&hba->dev_cmd.lock);
3301 down_read(&hba->clk_scaling_lock);
3303 lrbp = &hba->lrb[tag];
3305 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3309 hba->dev_cmd.complete = &wait;
3311 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3313 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3314 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3315 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3316 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3319 up_read(&hba->clk_scaling_lock);
3324 * ufshcd_init_query() - init the query response and request parameters
3325 * @hba: per-adapter instance
3326 * @request: address of the request pointer to be initialized
3327 * @response: address of the response pointer to be initialized
3328 * @opcode: operation to perform
3329 * @idn: flag idn to access
3330 * @index: LU number to access
3331 * @selector: query/flag/descriptor further identification
3333 static inline void ufshcd_init_query(struct ufs_hba *hba,
3334 struct ufs_query_req **request, struct ufs_query_res **response,
3335 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3337 *request = &hba->dev_cmd.query.request;
3338 *response = &hba->dev_cmd.query.response;
3339 memset(*request, 0, sizeof(struct ufs_query_req));
3340 memset(*response, 0, sizeof(struct ufs_query_res));
3341 (*request)->upiu_req.opcode = opcode;
3342 (*request)->upiu_req.idn = idn;
3343 (*request)->upiu_req.index = index;
3344 (*request)->upiu_req.selector = selector;
3347 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3348 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3353 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3354 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3357 "%s: failed with error %d, retries %d\n",
3358 __func__, ret, retries);
3365 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3366 __func__, opcode, idn, ret, retries);
3371 * ufshcd_query_flag() - API function for sending flag query requests
3372 * @hba: per-adapter instance
3373 * @opcode: flag query to perform
3374 * @idn: flag idn to access
3375 * @index: flag index to access
3376 * @flag_res: the flag value after the query request completes
3378 * Return: 0 for success, non-zero in case of failure.
3380 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3381 enum flag_idn idn, u8 index, bool *flag_res)
3383 struct ufs_query_req *request = NULL;
3384 struct ufs_query_res *response = NULL;
3385 int err, selector = 0;
3386 int timeout = QUERY_REQ_TIMEOUT;
3391 mutex_lock(&hba->dev_cmd.lock);
3392 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3396 case UPIU_QUERY_OPCODE_SET_FLAG:
3397 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3398 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3399 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3401 case UPIU_QUERY_OPCODE_READ_FLAG:
3402 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3404 /* No dummy reads */
3405 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3413 "%s: Expected query flag opcode but got = %d\n",
3419 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3423 "%s: Sending flag query for idn %d failed, err = %d\n",
3424 __func__, idn, err);
3429 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3430 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3433 mutex_unlock(&hba->dev_cmd.lock);
3434 ufshcd_release(hba);
3439 * ufshcd_query_attr - API function for sending attribute requests
3440 * @hba: per-adapter instance
3441 * @opcode: attribute opcode
3442 * @idn: attribute idn to access
3443 * @index: index field
3444 * @selector: selector field
3445 * @attr_val: the attribute value after the query request completes
3447 * Return: 0 for success, non-zero in case of failure.
3449 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3450 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3452 struct ufs_query_req *request = NULL;
3453 struct ufs_query_res *response = NULL;
3459 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3466 mutex_lock(&hba->dev_cmd.lock);
3467 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3471 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3472 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3473 request->upiu_req.value = cpu_to_be32(*attr_val);
3475 case UPIU_QUERY_OPCODE_READ_ATTR:
3476 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3479 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3485 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3488 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3489 __func__, opcode, idn, index, err);
3493 *attr_val = be32_to_cpu(response->upiu_res.value);
3496 mutex_unlock(&hba->dev_cmd.lock);
3497 ufshcd_release(hba);
3502 * ufshcd_query_attr_retry() - API function for sending query
3503 * attribute with retries
3504 * @hba: per-adapter instance
3505 * @opcode: attribute opcode
3506 * @idn: attribute idn to access
3507 * @index: index field
3508 * @selector: selector field
3509 * @attr_val: the attribute value after the query request
3512 * Return: 0 for success, non-zero in case of failure.
3514 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3515 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3521 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3522 ret = ufshcd_query_attr(hba, opcode, idn, index,
3523 selector, attr_val);
3525 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3526 __func__, ret, retries);
3533 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3534 __func__, idn, ret, QUERY_REQ_RETRIES);
3538 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3539 enum query_opcode opcode, enum desc_idn idn, u8 index,
3540 u8 selector, u8 *desc_buf, int *buf_len)
3542 struct ufs_query_req *request = NULL;
3543 struct ufs_query_res *response = NULL;
3549 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3554 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3555 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3556 __func__, *buf_len);
3562 mutex_lock(&hba->dev_cmd.lock);
3563 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3565 hba->dev_cmd.query.descriptor = desc_buf;
3566 request->upiu_req.length = cpu_to_be16(*buf_len);
3569 case UPIU_QUERY_OPCODE_WRITE_DESC:
3570 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3572 case UPIU_QUERY_OPCODE_READ_DESC:
3573 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3577 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3583 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3586 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3587 __func__, opcode, idn, index, err);
3591 *buf_len = be16_to_cpu(response->upiu_res.length);
3594 hba->dev_cmd.query.descriptor = NULL;
3595 mutex_unlock(&hba->dev_cmd.lock);
3596 ufshcd_release(hba);
3601 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3602 * @hba: per-adapter instance
3603 * @opcode: attribute opcode
3604 * @idn: attribute idn to access
3605 * @index: index field
3606 * @selector: selector field
3607 * @desc_buf: the buffer that contains the descriptor
3608 * @buf_len: length parameter passed to the device
3610 * The buf_len parameter will contain, on return, the length parameter
3611 * received on the response.
3613 * Return: 0 for success, non-zero in case of failure.
3615 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3616 enum query_opcode opcode,
3617 enum desc_idn idn, u8 index,
3619 u8 *desc_buf, int *buf_len)
3624 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3625 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3626 selector, desc_buf, buf_len);
3627 if (!err || err == -EINVAL)
3635 * ufshcd_read_desc_param - read the specified descriptor parameter
3636 * @hba: Pointer to adapter instance
3637 * @desc_id: descriptor idn value
3638 * @desc_index: descriptor index
3639 * @param_offset: offset of the parameter to read
3640 * @param_read_buf: pointer to buffer where parameter would be read
3641 * @param_size: sizeof(param_read_buf)
3643 * Return: 0 in case of success, non-zero otherwise.
3645 int ufshcd_read_desc_param(struct ufs_hba *hba,
3646 enum desc_idn desc_id,
3654 int buff_len = QUERY_DESC_MAX_SIZE;
3655 bool is_kmalloc = true;
3658 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3661 /* Check whether we need temp memory */
3662 if (param_offset != 0 || param_size < buff_len) {
3663 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3667 desc_buf = param_read_buf;
3671 /* Request for full descriptor */
3672 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3673 desc_id, desc_index, 0,
3674 desc_buf, &buff_len);
3676 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3677 __func__, desc_id, desc_index, param_offset, ret);
3681 /* Update descriptor length */
3682 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3684 if (param_offset >= buff_len) {
3685 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3686 __func__, param_offset, desc_id, buff_len);
3692 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3693 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3694 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3700 /* Make sure we don't copy more data than available */
3701 if (param_offset >= buff_len)
3704 memcpy(param_read_buf, &desc_buf[param_offset],
3705 min_t(u32, param_size, buff_len - param_offset));
3714 * struct uc_string_id - unicode string
3716 * @len: size of this descriptor inclusive
3717 * @type: descriptor type
3718 * @uc: unicode string character
3720 struct uc_string_id {
3726 /* replace non-printable or non-ASCII characters with spaces */
3727 static inline char ufshcd_remove_non_printable(u8 ch)
3729 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3733 * ufshcd_read_string_desc - read string descriptor
3734 * @hba: pointer to adapter instance
3735 * @desc_index: descriptor index
3736 * @buf: pointer to buffer where descriptor would be read,
3737 * the caller should free the memory.
3738 * @ascii: if true convert from unicode to ascii characters
3739 * null terminated string.
3742 * * string size on success.
3743 * * -ENOMEM: on allocation failure
3744 * * -EINVAL: on a wrong parameter
3746 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3747 u8 **buf, bool ascii)
3749 struct uc_string_id *uc_str;
3756 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3760 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3761 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3763 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3764 QUERY_REQ_RETRIES, ret);
3769 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3770 dev_dbg(hba->dev, "String Desc is of zero length\n");
3779 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3780 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3781 str = kzalloc(ascii_len, GFP_KERNEL);
3788 * the descriptor contains string in UTF16 format
3789 * we need to convert to utf-8 so it can be displayed
3791 ret = utf16s_to_utf8s(uc_str->uc,
3792 uc_str->len - QUERY_DESC_HDR_SIZE,
3793 UTF16_BIG_ENDIAN, str, ascii_len - 1);
3795 /* replace non-printable or non-ASCII characters with spaces */
3796 for (i = 0; i < ret; i++)
3797 str[i] = ufshcd_remove_non_printable(str[i]);
3802 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3816 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3817 * @hba: Pointer to adapter instance
3819 * @param_offset: offset of the parameter to read
3820 * @param_read_buf: pointer to buffer where parameter would be read
3821 * @param_size: sizeof(param_read_buf)
3823 * Return: 0 in case of success, non-zero otherwise.
3825 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3827 enum unit_desc_param param_offset,
3832 * Unit descriptors are only available for general purpose LUs (LUN id
3833 * from 0 to 7) and RPMB Well known LU.
3835 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3838 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3839 param_offset, param_read_buf, param_size);
3842 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3845 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3847 if (hba->dev_info.wspecversion >= 0x300) {
3848 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3849 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3852 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3855 if (gating_wait == 0) {
3856 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3857 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3861 hba->dev_info.clk_gating_wait_us = gating_wait;
3868 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3869 * @hba: per adapter instance
3871 * 1. Allocate DMA memory for Command Descriptor array
3872 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3873 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3874 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3876 * 4. Allocate memory for local reference block(lrb).
3878 * Return: 0 for success, non-zero in case of failure.
3880 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3882 size_t utmrdl_size, utrdl_size, ucdl_size;
3884 /* Allocate memory for UTP command descriptors */
3885 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
3886 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3888 &hba->ucdl_dma_addr,
3892 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3894 if (!hba->ucdl_base_addr ||
3895 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3897 "Command Descriptor Memory allocation failed\n");
3902 * Allocate memory for UTP Transfer descriptors
3903 * UFSHCI requires 1KB alignment of UTRD
3905 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3906 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3908 &hba->utrdl_dma_addr,
3910 if (!hba->utrdl_base_addr ||
3911 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
3913 "Transfer Descriptor Memory allocation failed\n");
3918 * Skip utmrdl allocation; it may have been
3919 * allocated during first pass and not released during
3920 * MCQ memory allocation.
3921 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3923 if (hba->utmrdl_base_addr)
3926 * Allocate memory for UTP Task Management descriptors
3927 * UFSHCI requires 1KB alignment of UTMRD
3929 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3930 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3932 &hba->utmrdl_dma_addr,
3934 if (!hba->utmrdl_base_addr ||
3935 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
3937 "Task Management Descriptor Memory allocation failed\n");
3942 /* Allocate memory for local reference block */
3943 hba->lrb = devm_kcalloc(hba->dev,
3944 hba->nutrs, sizeof(struct ufshcd_lrb),
3947 dev_err(hba->dev, "LRB Memory allocation failed\n");
3956 * ufshcd_host_memory_configure - configure local reference block with
3958 * @hba: per adapter instance
3960 * Configure Host memory space
3961 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3963 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3965 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3966 * into local reference block.
3968 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3970 struct utp_transfer_req_desc *utrdlp;
3971 dma_addr_t cmd_desc_dma_addr;
3972 dma_addr_t cmd_desc_element_addr;
3973 u16 response_offset;
3978 utrdlp = hba->utrdl_base_addr;
3981 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3983 offsetof(struct utp_transfer_cmd_desc, prd_table);
3985 cmd_desc_size = ufshcd_get_ucd_size(hba);
3986 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3988 for (i = 0; i < hba->nutrs; i++) {
3989 /* Configure UTRD with command descriptor base address */
3990 cmd_desc_element_addr =
3991 (cmd_desc_dma_addr + (cmd_desc_size * i));
3992 utrdlp[i].command_desc_base_addr =
3993 cpu_to_le64(cmd_desc_element_addr);
3995 /* Response upiu and prdt offset should be in double words */
3996 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3997 utrdlp[i].response_upiu_offset =
3998 cpu_to_le16(response_offset);
3999 utrdlp[i].prd_table_offset =
4000 cpu_to_le16(prdt_offset);
4001 utrdlp[i].response_upiu_length =
4002 cpu_to_le16(ALIGNED_UPIU_SIZE);
4004 utrdlp[i].response_upiu_offset =
4005 cpu_to_le16(response_offset >> 2);
4006 utrdlp[i].prd_table_offset =
4007 cpu_to_le16(prdt_offset >> 2);
4008 utrdlp[i].response_upiu_length =
4009 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
4012 ufshcd_init_lrb(hba, &hba->lrb[i], i);
4017 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
4018 * @hba: per adapter instance
4020 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
4021 * in order to initialize the Unipro link startup procedure.
4022 * Once the Unipro links are up, the device connected to the controller
4025 * Return: 0 on success, non-zero value on failure.
4027 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4029 struct uic_command uic_cmd = {0};
4032 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
4034 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4037 "dme-link-startup: error code %d\n", ret);
4041 * ufshcd_dme_reset - UIC command for DME_RESET
4042 * @hba: per adapter instance
4044 * DME_RESET command is issued in order to reset UniPro stack.
4045 * This function now deals with cold reset.
4047 * Return: 0 on success, non-zero value on failure.
4049 static int ufshcd_dme_reset(struct ufs_hba *hba)
4051 struct uic_command uic_cmd = {0};
4054 uic_cmd.command = UIC_CMD_DME_RESET;
4056 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4059 "dme-reset: error code %d\n", ret);
4064 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
4070 if (agreed_gear < UFS_HS_G4)
4071 adapt_val = PA_NO_ADAPT;
4073 ret = ufshcd_dme_set(hba,
4074 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
4078 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
4081 * ufshcd_dme_enable - UIC command for DME_ENABLE
4082 * @hba: per adapter instance
4084 * DME_ENABLE command is issued in order to enable UniPro stack.
4086 * Return: 0 on success, non-zero value on failure.
4088 static int ufshcd_dme_enable(struct ufs_hba *hba)
4090 struct uic_command uic_cmd = {0};
4093 uic_cmd.command = UIC_CMD_DME_ENABLE;
4095 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4098 "dme-enable: error code %d\n", ret);
4103 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4105 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4106 unsigned long min_sleep_time_us;
4108 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4112 * last_dme_cmd_tstamp will be 0 only for 1st call to
4115 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4116 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4118 unsigned long delta =
4119 (unsigned long) ktime_to_us(
4120 ktime_sub(ktime_get(),
4121 hba->last_dme_cmd_tstamp));
4123 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4125 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4127 return; /* no more delay required */
4130 /* allow sleep for extra 50us if needed */
4131 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4135 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4136 * @hba: per adapter instance
4137 * @attr_sel: uic command argument1
4138 * @attr_set: attribute set type as uic command argument2
4139 * @mib_val: setting value as uic command argument3
4140 * @peer: indicate whether peer or local
4142 * Return: 0 on success, non-zero value on failure.
4144 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4145 u8 attr_set, u32 mib_val, u8 peer)
4147 struct uic_command uic_cmd = {0};
4148 static const char *const action[] = {
4152 const char *set = action[!!peer];
4154 int retries = UFS_UIC_COMMAND_RETRIES;
4156 uic_cmd.command = peer ?
4157 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4158 uic_cmd.argument1 = attr_sel;
4159 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4160 uic_cmd.argument3 = mib_val;
4163 /* for peer attributes we retry upon failure */
4164 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4166 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4167 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4168 } while (ret && peer && --retries);
4171 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4172 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4173 UFS_UIC_COMMAND_RETRIES - retries);
4177 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4180 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4181 * @hba: per adapter instance
4182 * @attr_sel: uic command argument1
4183 * @mib_val: the value of the attribute as returned by the UIC command
4184 * @peer: indicate whether peer or local
4186 * Return: 0 on success, non-zero value on failure.
4188 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4189 u32 *mib_val, u8 peer)
4191 struct uic_command uic_cmd = {0};
4192 static const char *const action[] = {
4196 const char *get = action[!!peer];
4198 int retries = UFS_UIC_COMMAND_RETRIES;
4199 struct ufs_pa_layer_attr orig_pwr_info;
4200 struct ufs_pa_layer_attr temp_pwr_info;
4201 bool pwr_mode_change = false;
4203 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4204 orig_pwr_info = hba->pwr_info;
4205 temp_pwr_info = orig_pwr_info;
4207 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4208 orig_pwr_info.pwr_rx == FAST_MODE) {
4209 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4210 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4211 pwr_mode_change = true;
4212 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4213 orig_pwr_info.pwr_rx == SLOW_MODE) {
4214 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4215 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4216 pwr_mode_change = true;
4218 if (pwr_mode_change) {
4219 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4225 uic_cmd.command = peer ?
4226 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4227 uic_cmd.argument1 = attr_sel;
4230 /* for peer attributes we retry upon failure */
4231 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4233 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4234 get, UIC_GET_ATTR_ID(attr_sel), ret);
4235 } while (ret && peer && --retries);
4238 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4239 get, UIC_GET_ATTR_ID(attr_sel),
4240 UFS_UIC_COMMAND_RETRIES - retries);
4242 if (mib_val && !ret)
4243 *mib_val = uic_cmd.argument3;
4245 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4247 ufshcd_change_power_mode(hba, &orig_pwr_info);
4251 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4254 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4255 * state) and waits for it to take effect.
4257 * @hba: per adapter instance
4258 * @cmd: UIC command to execute
4260 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4261 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4262 * and device UniPro link and hence it's final completion would be indicated by
4263 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4264 * addition to normal UIC command completion Status (UCCS). This function only
4265 * returns after the relevant status bits indicate the completion.
4267 * Return: 0 on success, non-zero value on failure.
4269 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4271 DECLARE_COMPLETION_ONSTACK(uic_async_done);
4272 unsigned long flags;
4275 bool reenable_intr = false;
4277 mutex_lock(&hba->uic_cmd_mutex);
4278 ufshcd_add_delay_before_dme_cmd(hba);
4280 spin_lock_irqsave(hba->host->host_lock, flags);
4281 if (ufshcd_is_link_broken(hba)) {
4285 hba->uic_async_done = &uic_async_done;
4286 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4287 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4289 * Make sure UIC command completion interrupt is disabled before
4290 * issuing UIC command.
4293 reenable_intr = true;
4295 spin_unlock_irqrestore(hba->host->host_lock, flags);
4296 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4299 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4300 cmd->command, cmd->argument3, ret);
4304 if (!wait_for_completion_timeout(hba->uic_async_done,
4305 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4307 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4308 cmd->command, cmd->argument3);
4310 if (!cmd->cmd_active) {
4311 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4321 status = ufshcd_get_upmcrs(hba);
4322 if (status != PWR_LOCAL) {
4324 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4325 cmd->command, status);
4326 ret = (status != PWR_OK) ? status : -1;
4330 ufshcd_print_host_state(hba);
4331 ufshcd_print_pwr_info(hba);
4332 ufshcd_print_evt_hist(hba);
4335 spin_lock_irqsave(hba->host->host_lock, flags);
4336 hba->active_uic_cmd = NULL;
4337 hba->uic_async_done = NULL;
4339 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4341 ufshcd_set_link_broken(hba);
4342 ufshcd_schedule_eh_work(hba);
4345 spin_unlock_irqrestore(hba->host->host_lock, flags);
4346 mutex_unlock(&hba->uic_cmd_mutex);
4352 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4353 * using DME_SET primitives.
4354 * @hba: per adapter instance
4355 * @mode: powr mode value
4357 * Return: 0 on success, non-zero value on failure.
4359 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4361 struct uic_command uic_cmd = {0};
4364 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4365 ret = ufshcd_dme_set(hba,
4366 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4368 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4374 uic_cmd.command = UIC_CMD_DME_SET;
4375 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4376 uic_cmd.argument3 = mode;
4378 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4379 ufshcd_release(hba);
4384 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4386 int ufshcd_link_recovery(struct ufs_hba *hba)
4389 unsigned long flags;
4391 spin_lock_irqsave(hba->host->host_lock, flags);
4392 hba->ufshcd_state = UFSHCD_STATE_RESET;
4393 ufshcd_set_eh_in_progress(hba);
4394 spin_unlock_irqrestore(hba->host->host_lock, flags);
4396 /* Reset the attached device */
4397 ufshcd_device_reset(hba);
4399 ret = ufshcd_host_reset_and_restore(hba);
4401 spin_lock_irqsave(hba->host->host_lock, flags);
4403 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4404 ufshcd_clear_eh_in_progress(hba);
4405 spin_unlock_irqrestore(hba->host->host_lock, flags);
4408 dev_err(hba->dev, "%s: link recovery failed, err %d",
4413 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4415 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4418 struct uic_command uic_cmd = {0};
4419 ktime_t start = ktime_get();
4421 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4423 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4424 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4425 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4426 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4429 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4432 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4437 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4439 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4441 struct uic_command uic_cmd = {0};
4443 ktime_t start = ktime_get();
4445 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4447 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4448 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4449 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4450 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4453 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4456 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4458 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4459 hba->ufs_stats.hibern8_exit_cnt++;
4464 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4466 static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba)
4468 if (!ufshcd_is_auto_hibern8_supported(hba))
4471 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4474 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4476 const u32 cur_ahit = READ_ONCE(hba->ahit);
4478 if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit)
4481 WRITE_ONCE(hba->ahit, ahit);
4482 if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4483 ufshcd_rpm_get_sync(hba);
4485 ufshcd_configure_auto_hibern8(hba);
4486 ufshcd_release(hba);
4487 ufshcd_rpm_put_sync(hba);
4490 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4493 * ufshcd_init_pwr_info - setting the POR (power on reset)
4494 * values in hba power info
4495 * @hba: per-adapter instance
4497 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4499 hba->pwr_info.gear_rx = UFS_PWM_G1;
4500 hba->pwr_info.gear_tx = UFS_PWM_G1;
4501 hba->pwr_info.lane_rx = UFS_LANE_1;
4502 hba->pwr_info.lane_tx = UFS_LANE_1;
4503 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4504 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4505 hba->pwr_info.hs_rate = 0;
4509 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4510 * @hba: per-adapter instance
4512 * Return: 0 upon success; < 0 upon failure.
4514 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4516 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4518 if (hba->max_pwr_info.is_valid)
4521 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4522 pwr_info->pwr_tx = FASTAUTO_MODE;
4523 pwr_info->pwr_rx = FASTAUTO_MODE;
4525 pwr_info->pwr_tx = FAST_MODE;
4526 pwr_info->pwr_rx = FAST_MODE;
4528 pwr_info->hs_rate = PA_HS_MODE_B;
4530 /* Get the connected lane count */
4531 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4532 &pwr_info->lane_rx);
4533 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4534 &pwr_info->lane_tx);
4536 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4537 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4545 * First, get the maximum gears of HS speed.
4546 * If a zero value, it means there is no HSGEAR capability.
4547 * Then, get the maximum gears of PWM speed.
4549 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4550 if (!pwr_info->gear_rx) {
4551 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4552 &pwr_info->gear_rx);
4553 if (!pwr_info->gear_rx) {
4554 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4555 __func__, pwr_info->gear_rx);
4558 pwr_info->pwr_rx = SLOW_MODE;
4561 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4562 &pwr_info->gear_tx);
4563 if (!pwr_info->gear_tx) {
4564 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4565 &pwr_info->gear_tx);
4566 if (!pwr_info->gear_tx) {
4567 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4568 __func__, pwr_info->gear_tx);
4571 pwr_info->pwr_tx = SLOW_MODE;
4574 hba->max_pwr_info.is_valid = true;
4578 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4579 struct ufs_pa_layer_attr *pwr_mode)
4583 /* if already configured to the requested pwr_mode */
4584 if (!hba->force_pmc &&
4585 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4586 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4587 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4588 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4589 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4590 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4591 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4592 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4597 * Configure attributes for power mode change with below.
4598 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4599 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4602 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4603 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4605 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4606 pwr_mode->pwr_rx == FAST_MODE)
4607 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4609 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4611 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4612 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4614 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4615 pwr_mode->pwr_tx == FAST_MODE)
4616 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4618 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4620 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4621 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4622 pwr_mode->pwr_rx == FAST_MODE ||
4623 pwr_mode->pwr_tx == FAST_MODE)
4624 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4627 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4628 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4629 DL_FC0ProtectionTimeOutVal_Default);
4630 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4631 DL_TC0ReplayTimeOutVal_Default);
4632 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4633 DL_AFC0ReqTimeOutVal_Default);
4634 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4635 DL_FC1ProtectionTimeOutVal_Default);
4636 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4637 DL_TC1ReplayTimeOutVal_Default);
4638 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4639 DL_AFC1ReqTimeOutVal_Default);
4641 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4642 DL_FC0ProtectionTimeOutVal_Default);
4643 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4644 DL_TC0ReplayTimeOutVal_Default);
4645 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4646 DL_AFC0ReqTimeOutVal_Default);
4649 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4650 | pwr_mode->pwr_tx);
4654 "%s: power mode change failed %d\n", __func__, ret);
4656 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4659 memcpy(&hba->pwr_info, pwr_mode,
4660 sizeof(struct ufs_pa_layer_attr));
4667 * ufshcd_config_pwr_mode - configure a new power mode
4668 * @hba: per-adapter instance
4669 * @desired_pwr_mode: desired power configuration
4671 * Return: 0 upon success; < 0 upon failure.
4673 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4674 struct ufs_pa_layer_attr *desired_pwr_mode)
4676 struct ufs_pa_layer_attr final_params = { 0 };
4679 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4680 desired_pwr_mode, &final_params);
4683 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4685 ret = ufshcd_change_power_mode(hba, &final_params);
4689 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4692 * ufshcd_complete_dev_init() - checks device readiness
4693 * @hba: per-adapter instance
4695 * Set fDeviceInit flag and poll until device toggles it.
4697 * Return: 0 upon success; < 0 upon failure.
4699 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4702 bool flag_res = true;
4705 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4706 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4709 "%s: setting fDeviceInit flag failed with error %d\n",
4714 /* Poll fDeviceInit flag to be cleared */
4715 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4717 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4718 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4721 usleep_range(500, 1000);
4722 } while (ktime_before(ktime_get(), timeout));
4726 "%s: reading fDeviceInit flag failed with error %d\n",
4728 } else if (flag_res) {
4730 "%s: fDeviceInit was not cleared by the device\n",
4739 * ufshcd_make_hba_operational - Make UFS controller operational
4740 * @hba: per adapter instance
4742 * To bring UFS host controller to operational state,
4743 * 1. Enable required interrupts
4744 * 2. Configure interrupt aggregation
4745 * 3. Program UTRL and UTMRL base address
4746 * 4. Configure run-stop-registers
4748 * Return: 0 on success, non-zero value on failure.
4750 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4755 /* Enable required interrupts */
4756 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4758 /* Configure interrupt aggregation */
4759 if (ufshcd_is_intr_aggr_allowed(hba))
4760 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4762 ufshcd_disable_intr_aggr(hba);
4764 /* Configure UTRL and UTMRL base address registers */
4765 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4766 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4767 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4768 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4769 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4770 REG_UTP_TASK_REQ_LIST_BASE_L);
4771 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4772 REG_UTP_TASK_REQ_LIST_BASE_H);
4775 * Make sure base address and interrupt setup are updated before
4776 * enabling the run/stop registers below.
4781 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4783 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4784 if (!(ufshcd_get_lists_status(reg))) {
4785 ufshcd_enable_run_stop_reg(hba);
4788 "Host controller not ready to process requests");
4794 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4797 * ufshcd_hba_stop - Send controller to reset state
4798 * @hba: per adapter instance
4800 void ufshcd_hba_stop(struct ufs_hba *hba)
4802 unsigned long flags;
4806 * Obtain the host lock to prevent that the controller is disabled
4807 * while the UFS interrupt handler is active on another CPU.
4809 spin_lock_irqsave(hba->host->host_lock, flags);
4810 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4811 spin_unlock_irqrestore(hba->host->host_lock, flags);
4813 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4814 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4817 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4819 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4822 * ufshcd_hba_execute_hce - initialize the controller
4823 * @hba: per adapter instance
4825 * The controller resets itself and controller firmware initialization
4826 * sequence kicks off. When controller is ready it will set
4827 * the Host Controller Enable bit to 1.
4829 * Return: 0 on success, non-zero value on failure.
4831 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4833 int retry_outer = 3;
4837 if (ufshcd_is_hba_active(hba))
4838 /* change controller state to "reset state" */
4839 ufshcd_hba_stop(hba);
4841 /* UniPro link is disabled at this point */
4842 ufshcd_set_link_off(hba);
4844 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4846 /* start controller initialization sequence */
4847 ufshcd_hba_start(hba);
4850 * To initialize a UFS host controller HCE bit must be set to 1.
4851 * During initialization the HCE bit value changes from 1->0->1.
4852 * When the host controller completes initialization sequence
4853 * it sets the value of HCE bit to 1. The same HCE bit is read back
4854 * to check if the controller has completed initialization sequence.
4855 * So without this delay the value HCE = 1, set in the previous
4856 * instruction might be read back.
4857 * This delay can be changed based on the controller.
4859 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4861 /* wait for the host controller to complete initialization */
4863 while (!ufshcd_is_hba_active(hba)) {
4868 "Controller enable failed\n");
4875 usleep_range(1000, 1100);
4878 /* enable UIC related interrupts */
4879 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4881 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4886 int ufshcd_hba_enable(struct ufs_hba *hba)
4890 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4891 ufshcd_set_link_off(hba);
4892 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4894 /* enable UIC related interrupts */
4895 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4896 ret = ufshcd_dme_reset(hba);
4898 dev_err(hba->dev, "DME_RESET failed\n");
4902 ret = ufshcd_dme_enable(hba);
4904 dev_err(hba->dev, "Enabling DME failed\n");
4908 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4910 ret = ufshcd_hba_execute_hce(hba);
4915 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4917 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4919 int tx_lanes = 0, i, err = 0;
4922 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4925 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4927 for (i = 0; i < tx_lanes; i++) {
4929 err = ufshcd_dme_set(hba,
4930 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4931 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4934 err = ufshcd_dme_peer_set(hba,
4935 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4936 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4939 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4940 __func__, peer, i, err);
4948 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4950 return ufshcd_disable_tx_lcc(hba, true);
4953 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4955 struct ufs_event_hist *e;
4957 if (id >= UFS_EVT_CNT)
4960 e = &hba->ufs_stats.event[id];
4961 e->val[e->pos] = val;
4962 e->tstamp[e->pos] = local_clock();
4964 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4966 ufshcd_vops_event_notify(hba, id, &val);
4968 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4971 * ufshcd_link_startup - Initialize unipro link startup
4972 * @hba: per adapter instance
4974 * Return: 0 for success, non-zero in case of failure.
4976 static int ufshcd_link_startup(struct ufs_hba *hba)
4979 int retries = DME_LINKSTARTUP_RETRIES;
4980 bool link_startup_again = false;
4983 * If UFS device isn't active then we will have to issue link startup
4984 * 2 times to make sure the device state move to active.
4986 if (!ufshcd_is_ufs_dev_active(hba))
4987 link_startup_again = true;
4991 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4993 ret = ufshcd_dme_link_startup(hba);
4995 /* check if device is detected by inter-connect layer */
4996 if (!ret && !ufshcd_is_device_present(hba)) {
4997 ufshcd_update_evt_hist(hba,
4998 UFS_EVT_LINK_STARTUP_FAIL,
5000 dev_err(hba->dev, "%s: Device not present\n", __func__);
5006 * DME link lost indication is only received when link is up,
5007 * but we can't be sure if the link is up until link startup
5008 * succeeds. So reset the local Uni-Pro and try again.
5010 if (ret && retries && ufshcd_hba_enable(hba)) {
5011 ufshcd_update_evt_hist(hba,
5012 UFS_EVT_LINK_STARTUP_FAIL,
5016 } while (ret && retries--);
5019 /* failed to get the link up... retire */
5020 ufshcd_update_evt_hist(hba,
5021 UFS_EVT_LINK_STARTUP_FAIL,
5026 if (link_startup_again) {
5027 link_startup_again = false;
5028 retries = DME_LINKSTARTUP_RETRIES;
5032 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5033 ufshcd_init_pwr_info(hba);
5034 ufshcd_print_pwr_info(hba);
5036 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5037 ret = ufshcd_disable_device_tx_lcc(hba);
5042 /* Include any host controller configuration via UIC commands */
5043 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5047 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
5048 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5049 ret = ufshcd_make_hba_operational(hba);
5052 dev_err(hba->dev, "link startup failed %d\n", ret);
5053 ufshcd_print_host_state(hba);
5054 ufshcd_print_pwr_info(hba);
5055 ufshcd_print_evt_hist(hba);
5061 * ufshcd_verify_dev_init() - Verify device initialization
5062 * @hba: per-adapter instance
5064 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5065 * device Transport Protocol (UTP) layer is ready after a reset.
5066 * If the UTP layer at the device side is not initialized, it may
5067 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5068 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5070 * Return: 0 upon success; < 0 upon failure.
5072 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5078 mutex_lock(&hba->dev_cmd.lock);
5079 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5080 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5081 hba->nop_out_timeout);
5083 if (!err || err == -ETIMEDOUT)
5086 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5088 mutex_unlock(&hba->dev_cmd.lock);
5089 ufshcd_release(hba);
5092 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5097 * ufshcd_setup_links - associate link b/w device wlun and other luns
5098 * @sdev: pointer to SCSI device
5099 * @hba: pointer to ufs hba
5101 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
5103 struct device_link *link;
5106 * Device wlun is the supplier & rest of the luns are consumers.
5107 * This ensures that device wlun suspends after all other luns.
5109 if (hba->ufs_device_wlun) {
5110 link = device_link_add(&sdev->sdev_gendev,
5111 &hba->ufs_device_wlun->sdev_gendev,
5112 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
5114 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
5115 dev_name(&hba->ufs_device_wlun->sdev_gendev));
5119 /* Ignore REPORT_LUN wlun probing */
5120 if (hba->luns_avail == 1) {
5121 ufshcd_rpm_put(hba);
5126 * Device wlun is probed. The assumption is that WLUNs are
5127 * scanned before other LUNs.
5134 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5135 * @hba: per-adapter instance
5136 * @sdev: pointer to SCSI device
5138 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5140 int len = QUERY_DESC_MAX_SIZE;
5141 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5142 u8 lun_qdepth = hba->nutrs;
5146 desc_buf = kzalloc(len, GFP_KERNEL);
5150 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5152 if (ret == -EOPNOTSUPP)
5153 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5159 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5161 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5162 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5164 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5167 * According to UFS device specification, the write protection mode is only supported by
5168 * normal LU, not supported by WLUN.
5170 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5171 !hba->dev_info.is_lu_power_on_wp &&
5172 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5173 hba->dev_info.is_lu_power_on_wp = true;
5175 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5176 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5177 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5178 hba->dev_info.b_advanced_rpmb_en = true;
5184 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5185 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5187 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5188 scsi_change_queue_depth(sdev, lun_qdepth);
5192 * ufshcd_slave_alloc - handle initial SCSI device configurations
5193 * @sdev: pointer to SCSI device
5197 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5199 struct ufs_hba *hba;
5201 hba = shost_priv(sdev->host);
5203 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5204 sdev->use_10_for_ms = 1;
5206 /* DBD field should be set to 1 in mode sense(10) */
5207 sdev->set_dbd_for_ms = 1;
5209 /* allow SCSI layer to restart the device in case of errors */
5210 sdev->allow_restart = 1;
5212 /* REPORT SUPPORTED OPERATION CODES is not supported */
5213 sdev->no_report_opcodes = 1;
5215 /* WRITE_SAME command is not supported */
5216 sdev->no_write_same = 1;
5218 ufshcd_lu_init(hba, sdev);
5220 ufshcd_setup_links(hba, sdev);
5226 * ufshcd_change_queue_depth - change queue depth
5227 * @sdev: pointer to SCSI device
5228 * @depth: required depth to set
5230 * Change queue depth and make sure the max. limits are not crossed.
5232 * Return: new queue depth.
5234 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5236 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5240 * ufshcd_slave_configure - adjust SCSI device configurations
5241 * @sdev: pointer to SCSI device
5243 * Return: 0 (success).
5245 static int ufshcd_slave_configure(struct scsi_device *sdev)
5247 struct ufs_hba *hba = shost_priv(sdev->host);
5248 struct request_queue *q = sdev->request_queue;
5250 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5253 * Block runtime-pm until all consumers are added.
5254 * Refer ufshcd_setup_links().
5256 if (is_device_wlun(sdev))
5257 pm_runtime_get_noresume(&sdev->sdev_gendev);
5258 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5259 sdev->rpm_autosuspend = 1;
5261 * Do not print messages during runtime PM to avoid never-ending cycles
5262 * of messages written back to storage by user space causing runtime
5263 * resume, causing more messages and so on.
5265 sdev->silence_suspend = 1;
5267 if (hba->vops && hba->vops->config_scsi_dev)
5268 hba->vops->config_scsi_dev(sdev);
5270 ufshcd_crypto_register(hba, q);
5276 * ufshcd_slave_destroy - remove SCSI device configurations
5277 * @sdev: pointer to SCSI device
5279 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5281 struct ufs_hba *hba;
5282 unsigned long flags;
5284 hba = shost_priv(sdev->host);
5286 /* Drop the reference as it won't be needed anymore */
5287 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5288 spin_lock_irqsave(hba->host->host_lock, flags);
5289 hba->ufs_device_wlun = NULL;
5290 spin_unlock_irqrestore(hba->host->host_lock, flags);
5291 } else if (hba->ufs_device_wlun) {
5292 struct device *supplier = NULL;
5294 /* Ensure UFS Device WLUN exists and does not disappear */
5295 spin_lock_irqsave(hba->host->host_lock, flags);
5296 if (hba->ufs_device_wlun) {
5297 supplier = &hba->ufs_device_wlun->sdev_gendev;
5298 get_device(supplier);
5300 spin_unlock_irqrestore(hba->host->host_lock, flags);
5304 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5305 * device will not have been registered but can still
5306 * have a device link holding a reference to the device.
5308 device_link_remove(&sdev->sdev_gendev, supplier);
5309 put_device(supplier);
5315 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5316 * @lrbp: pointer to local reference block of completed command
5317 * @scsi_status: SCSI command status
5319 * Return: value base on SCSI command status.
5322 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5326 switch (scsi_status) {
5327 case SAM_STAT_CHECK_CONDITION:
5328 ufshcd_copy_sense_data(lrbp);
5331 result |= DID_OK << 16 | scsi_status;
5333 case SAM_STAT_TASK_SET_FULL:
5335 case SAM_STAT_TASK_ABORTED:
5336 ufshcd_copy_sense_data(lrbp);
5337 result |= scsi_status;
5340 result |= DID_ERROR << 16;
5342 } /* end of switch */
5348 * ufshcd_transfer_rsp_status - Get overall status of the response
5349 * @hba: per adapter instance
5350 * @lrbp: pointer to local reference block of completed command
5351 * @cqe: pointer to the completion queue entry
5353 * Return: result of the command to notify SCSI midlayer.
5356 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5357 struct cq_entry *cqe)
5365 upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
5366 resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
5368 * Test !overflow instead of underflow to support UFS devices that do
5369 * not set either flag.
5371 if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
5372 scsi_set_resid(lrbp->cmd, resid);
5374 /* overall command status of utrd */
5375 ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5377 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5378 if (lrbp->ucd_rsp_ptr->header.response ||
5379 lrbp->ucd_rsp_ptr->header.status)
5385 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5386 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
5387 case UPIU_TRANSACTION_RESPONSE:
5389 * get the result based on SCSI status response
5390 * to notify the SCSI midlayer of the command status
5392 scsi_status = lrbp->ucd_rsp_ptr->header.status;
5393 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5396 * Currently we are only supporting BKOPs exception
5397 * events hence we can ignore BKOPs exception event
5398 * during power management callbacks. BKOPs exception
5399 * event is not expected to be raised in runtime suspend
5400 * callback as it allows the urgent bkops.
5401 * During system suspend, we are anyway forcefully
5402 * disabling the bkops and if urgent bkops is needed
5403 * it will be enabled on system resume. Long term
5404 * solution could be to abort the system suspend if
5405 * UFS device needs urgent BKOPs.
5407 if (!hba->pm_op_in_progress &&
5408 !ufshcd_eh_in_progress(hba) &&
5409 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5410 /* Flushed in suspend */
5411 schedule_work(&hba->eeh_work);
5413 case UPIU_TRANSACTION_REJECT_UPIU:
5414 /* TODO: handle Reject UPIU Response */
5415 result = DID_ERROR << 16;
5417 "Reject UPIU not fully implemented\n");
5421 "Unexpected request response code = %x\n",
5423 result = DID_ERROR << 16;
5428 result |= DID_ABORT << 16;
5430 case OCS_INVALID_COMMAND_STATUS:
5431 result |= DID_REQUEUE << 16;
5433 case OCS_INVALID_CMD_TABLE_ATTR:
5434 case OCS_INVALID_PRDT_ATTR:
5435 case OCS_MISMATCH_DATA_BUF_SIZE:
5436 case OCS_MISMATCH_RESP_UPIU_SIZE:
5437 case OCS_PEER_COMM_FAILURE:
5438 case OCS_FATAL_ERROR:
5439 case OCS_DEVICE_FATAL_ERROR:
5440 case OCS_INVALID_CRYPTO_CONFIG:
5441 case OCS_GENERAL_CRYPTO_ERROR:
5443 result |= DID_ERROR << 16;
5445 "OCS error from controller = %x for tag %d\n",
5446 ocs, lrbp->task_tag);
5447 ufshcd_print_evt_hist(hba);
5448 ufshcd_print_host_state(hba);
5450 } /* end of switch */
5452 if ((host_byte(result) != DID_OK) &&
5453 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5454 ufshcd_print_tr(hba, lrbp->task_tag, true);
5458 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5461 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5462 !ufshcd_is_auto_hibern8_enabled(hba))
5465 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5468 if (hba->active_uic_cmd &&
5469 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5470 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5477 * ufshcd_uic_cmd_compl - handle completion of uic command
5478 * @hba: per adapter instance
5479 * @intr_status: interrupt status generated by the controller
5482 * IRQ_HANDLED - If interrupt is valid
5483 * IRQ_NONE - If invalid interrupt
5485 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5487 irqreturn_t retval = IRQ_NONE;
5489 spin_lock(hba->host->host_lock);
5490 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5491 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5493 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5494 hba->active_uic_cmd->argument2 |=
5495 ufshcd_get_uic_cmd_result(hba);
5496 hba->active_uic_cmd->argument3 =
5497 ufshcd_get_dme_attr_val(hba);
5498 if (!hba->uic_async_done)
5499 hba->active_uic_cmd->cmd_active = 0;
5500 complete(&hba->active_uic_cmd->done);
5501 retval = IRQ_HANDLED;
5504 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5505 hba->active_uic_cmd->cmd_active = 0;
5506 complete(hba->uic_async_done);
5507 retval = IRQ_HANDLED;
5510 if (retval == IRQ_HANDLED)
5511 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5513 spin_unlock(hba->host->host_lock);
5517 /* Release the resources allocated for processing a SCSI command. */
5518 void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5519 struct ufshcd_lrb *lrbp)
5521 struct scsi_cmnd *cmd = lrbp->cmd;
5523 scsi_dma_unmap(cmd);
5524 ufshcd_release(hba);
5525 ufshcd_clk_scaling_update_busy(hba);
5529 * ufshcd_compl_one_cqe - handle a completion queue entry
5530 * @hba: per adapter instance
5531 * @task_tag: the task tag of the request to be completed
5532 * @cqe: pointer to the completion queue entry
5534 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5535 struct cq_entry *cqe)
5537 struct ufshcd_lrb *lrbp;
5538 struct scsi_cmnd *cmd;
5541 lrbp = &hba->lrb[task_tag];
5542 lrbp->compl_time_stamp = ktime_get();
5545 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5546 ufshcd_update_monitor(hba, lrbp);
5547 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5548 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5549 ufshcd_release_scsi_cmd(hba, lrbp);
5550 /* Do not touch lrbp after scsi done */
5552 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5553 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5554 if (hba->dev_cmd.complete) {
5556 ocs = le32_to_cpu(cqe->status) & MASK_OCS;
5557 lrbp->utr_descriptor_ptr->header.ocs = ocs;
5559 complete(hba->dev_cmd.complete);
5565 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5566 * @hba: per adapter instance
5567 * @completed_reqs: bitmask that indicates which requests to complete
5569 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5570 unsigned long completed_reqs)
5574 for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5575 ufshcd_compl_one_cqe(hba, tag, NULL);
5578 /* Any value that is not an existing queue number is fine for this constant. */
5580 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5583 static void ufshcd_clear_polled(struct ufs_hba *hba,
5584 unsigned long *completed_reqs)
5588 for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5589 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5593 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5594 __clear_bit(tag, completed_reqs);
5599 * Return: > 0 if one or more commands have been completed or 0 if no
5600 * requests have been completed.
5602 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5604 struct ufs_hba *hba = shost_priv(shost);
5605 unsigned long completed_reqs, flags;
5607 struct ufs_hw_queue *hwq;
5609 if (is_mcq_enabled(hba)) {
5610 hwq = &hba->uhq[queue_num];
5612 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5615 spin_lock_irqsave(&hba->outstanding_lock, flags);
5616 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5617 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5618 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5619 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5620 hba->outstanding_reqs);
5621 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5622 /* Do not complete polled requests from interrupt context. */
5623 ufshcd_clear_polled(hba, &completed_reqs);
5625 hba->outstanding_reqs &= ~completed_reqs;
5626 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5629 __ufshcd_transfer_req_compl(hba, completed_reqs);
5631 return completed_reqs != 0;
5635 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5636 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5637 * to complete the pending transfers and free the resources associated with
5640 * @hba: per adapter instance
5641 * @force_compl: This flag is set to true when invoked
5642 * from ufshcd_host_reset_and_restore() in which case it requires special
5643 * handling because the host controller has been reset by ufshcd_hba_stop().
5645 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5648 struct ufs_hw_queue *hwq;
5649 struct ufshcd_lrb *lrbp;
5650 struct scsi_cmnd *cmd;
5651 unsigned long flags;
5654 for (tag = 0; tag < hba->nutrs; tag++) {
5655 lrbp = &hba->lrb[tag];
5657 if (!ufshcd_cmd_inflight(cmd) ||
5658 test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5661 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
5664 ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5666 * For those cmds of which the cqes are not present
5667 * in the cq, complete them explicitly.
5669 spin_lock_irqsave(&hwq->cq_lock, flags);
5670 if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5671 set_host_byte(cmd, DID_REQUEUE);
5672 ufshcd_release_scsi_cmd(hba, lrbp);
5675 spin_unlock_irqrestore(&hwq->cq_lock, flags);
5677 ufshcd_mcq_poll_cqe_lock(hba, hwq);
5683 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5684 * @hba: per adapter instance
5687 * IRQ_HANDLED - If interrupt is valid
5688 * IRQ_NONE - If invalid interrupt
5690 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5692 /* Resetting interrupt aggregation counters first and reading the
5693 * DOOR_BELL afterward allows us to handle all the completed requests.
5694 * In order to prevent other interrupts starvation the DB is read once
5695 * after reset. The down side of this solution is the possibility of
5696 * false interrupt if device completes another request after resetting
5697 * aggregation and before reading the DB.
5699 if (ufshcd_is_intr_aggr_allowed(hba) &&
5700 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5701 ufshcd_reset_intr_aggr(hba);
5703 if (ufs_fail_completion(hba))
5707 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5708 * do not want polling to trigger spurious interrupt complaints.
5710 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5715 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5717 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5718 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5722 int ufshcd_write_ee_control(struct ufs_hba *hba)
5726 mutex_lock(&hba->ee_ctrl_mutex);
5727 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5728 mutex_unlock(&hba->ee_ctrl_mutex);
5730 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5735 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5736 const u16 *other_mask, u16 set, u16 clr)
5738 u16 new_mask, ee_ctrl_mask;
5741 mutex_lock(&hba->ee_ctrl_mutex);
5742 new_mask = (*mask & ~clr) | set;
5743 ee_ctrl_mask = new_mask | *other_mask;
5744 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5745 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5746 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5748 hba->ee_ctrl_mask = ee_ctrl_mask;
5751 mutex_unlock(&hba->ee_ctrl_mutex);
5756 * ufshcd_disable_ee - disable exception event
5757 * @hba: per-adapter instance
5758 * @mask: exception event to disable
5760 * Disables exception event in the device so that the EVENT_ALERT
5763 * Return: zero on success, non-zero error value on failure.
5765 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5767 return ufshcd_update_ee_drv_mask(hba, 0, mask);
5771 * ufshcd_enable_ee - enable exception event
5772 * @hba: per-adapter instance
5773 * @mask: exception event to enable
5775 * Enable corresponding exception event in the device to allow
5776 * device to alert host in critical scenarios.
5778 * Return: zero on success, non-zero error value on failure.
5780 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5782 return ufshcd_update_ee_drv_mask(hba, mask, 0);
5786 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5787 * @hba: per-adapter instance
5789 * Allow device to manage background operations on its own. Enabling
5790 * this might lead to inconsistent latencies during normal data transfers
5791 * as the device is allowed to manage its own way of handling background
5794 * Return: zero on success, non-zero on failure.
5796 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5800 if (hba->auto_bkops_enabled)
5803 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5804 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5806 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5811 hba->auto_bkops_enabled = true;
5812 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5814 /* No need of URGENT_BKOPS exception from the device */
5815 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5817 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5824 * ufshcd_disable_auto_bkops - block device in doing background operations
5825 * @hba: per-adapter instance
5827 * Disabling background operations improves command response latency but
5828 * has drawback of device moving into critical state where the device is
5829 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5830 * host is idle so that BKOPS are managed effectively without any negative
5833 * Return: zero on success, non-zero on failure.
5835 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5839 if (!hba->auto_bkops_enabled)
5843 * If host assisted BKOPs is to be enabled, make sure
5844 * urgent bkops exception is allowed.
5846 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5848 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5853 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5854 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5856 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5858 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5862 hba->auto_bkops_enabled = false;
5863 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5864 hba->is_urgent_bkops_lvl_checked = false;
5870 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5871 * @hba: per adapter instance
5873 * After a device reset the device may toggle the BKOPS_EN flag
5874 * to default value. The s/w tracking variables should be updated
5875 * as well. This function would change the auto-bkops state based on
5876 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5878 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5880 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5881 hba->auto_bkops_enabled = false;
5882 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5883 ufshcd_enable_auto_bkops(hba);
5885 hba->auto_bkops_enabled = true;
5886 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5887 ufshcd_disable_auto_bkops(hba);
5889 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5890 hba->is_urgent_bkops_lvl_checked = false;
5893 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5895 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5896 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5900 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5901 * @hba: per-adapter instance
5902 * @status: bkops_status value
5904 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5905 * flag in the device to permit background operations if the device
5906 * bkops_status is greater than or equal to "status" argument passed to
5907 * this function, disable otherwise.
5909 * Return: 0 for success, non-zero in case of failure.
5911 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5912 * to know whether auto bkops is enabled or disabled after this function
5913 * returns control to it.
5915 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5916 enum bkops_status status)
5919 u32 curr_status = 0;
5921 err = ufshcd_get_bkops_status(hba, &curr_status);
5923 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5926 } else if (curr_status > BKOPS_STATUS_MAX) {
5927 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5928 __func__, curr_status);
5933 if (curr_status >= status)
5934 err = ufshcd_enable_auto_bkops(hba);
5936 err = ufshcd_disable_auto_bkops(hba);
5942 * ufshcd_urgent_bkops - handle urgent bkops exception event
5943 * @hba: per-adapter instance
5945 * Enable fBackgroundOpsEn flag in the device to permit background
5948 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5949 * and negative error value for any other failure.
5951 * Return: 0 upon success; < 0 upon failure.
5953 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5955 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5958 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5960 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5961 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5964 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5967 u32 curr_status = 0;
5969 if (hba->is_urgent_bkops_lvl_checked)
5970 goto enable_auto_bkops;
5972 err = ufshcd_get_bkops_status(hba, &curr_status);
5974 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5980 * We are seeing that some devices are raising the urgent bkops
5981 * exception events even when BKOPS status doesn't indicate performace
5982 * impacted or critical. Handle these device by determining their urgent
5983 * bkops status at runtime.
5985 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5986 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5987 __func__, curr_status);
5988 /* update the current status as the urgent bkops level */
5989 hba->urgent_bkops_lvl = curr_status;
5990 hba->is_urgent_bkops_lvl_checked = true;
5994 err = ufshcd_enable_auto_bkops(hba);
5997 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
6001 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
6005 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6006 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
6009 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
6011 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
6014 * A placeholder for the platform vendors to add whatever additional
6019 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
6022 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
6023 UPIU_QUERY_OPCODE_CLEAR_FLAG;
6025 index = ufshcd_wb_get_query_index(hba);
6026 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
6029 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
6033 if (!ufshcd_is_wb_allowed(hba) ||
6034 hba->dev_info.wb_enabled == enable)
6037 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
6039 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
6040 __func__, enable ? "enabling" : "disabling", ret);
6044 hba->dev_info.wb_enabled = enable;
6045 dev_dbg(hba->dev, "%s: Write Booster %s\n",
6046 __func__, enable ? "enabled" : "disabled");
6051 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
6056 ret = __ufshcd_wb_toggle(hba, enable,
6057 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
6059 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
6060 __func__, enable ? "enabling" : "disabling", ret);
6063 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
6064 __func__, enable ? "enabled" : "disabled");
6067 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
6071 if (!ufshcd_is_wb_allowed(hba) ||
6072 hba->dev_info.wb_buf_flush_enabled == enable)
6075 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
6077 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
6078 __func__, enable ? "enabling" : "disabling", ret);
6082 hba->dev_info.wb_buf_flush_enabled = enable;
6083 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
6084 __func__, enable ? "enabled" : "disabled");
6089 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
6096 index = ufshcd_wb_get_query_index(hba);
6097 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6098 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
6099 index, 0, &cur_buf);
6101 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6107 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
6111 /* Let it continue to flush when available buffer exceeds threshold */
6112 return avail_buf < hba->vps->wb_flush_threshold;
6115 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
6117 if (ufshcd_is_wb_buf_flush_allowed(hba))
6118 ufshcd_wb_toggle_buf_flush(hba, false);
6120 ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
6121 ufshcd_wb_toggle(hba, false);
6122 hba->caps &= ~UFSHCD_CAP_WB_EN;
6124 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
6127 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
6133 index = ufshcd_wb_get_query_index(hba);
6134 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6135 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
6136 index, 0, &lifetime);
6139 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6144 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
6145 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
6146 __func__, lifetime);
6150 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
6151 __func__, lifetime);
6156 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6162 if (!ufshcd_is_wb_allowed(hba))
6165 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6166 ufshcd_wb_force_disable(hba);
6171 * The ufs device needs the vcc to be ON to flush.
6172 * With user-space reduction enabled, it's enough to enable flush
6173 * by checking only the available buffer. The threshold
6174 * defined here is > 90% full.
6175 * With user-space preserved enabled, the current-buffer
6176 * should be checked too because the wb buffer size can reduce
6177 * when disk tends to be full. This info is provided by current
6178 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6179 * keeping vcc on when current buffer is empty.
6181 index = ufshcd_wb_get_query_index(hba);
6182 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6183 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6184 index, 0, &avail_buf);
6186 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6191 if (!hba->dev_info.b_presrv_uspc_en)
6192 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6194 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6197 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6199 struct ufs_hba *hba = container_of(to_delayed_work(work),
6201 rpm_dev_flush_recheck_work);
6203 * To prevent unnecessary VCC power drain after device finishes
6204 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6205 * after a certain delay to recheck the threshold by next runtime
6208 ufshcd_rpm_get_sync(hba);
6209 ufshcd_rpm_put_sync(hba);
6213 * ufshcd_exception_event_handler - handle exceptions raised by device
6214 * @work: pointer to work data
6216 * Read bExceptionEventStatus attribute from the device and handle the
6217 * exception event accordingly.
6219 static void ufshcd_exception_event_handler(struct work_struct *work)
6221 struct ufs_hba *hba;
6224 hba = container_of(work, struct ufs_hba, eeh_work);
6226 ufshcd_scsi_block_requests(hba);
6227 err = ufshcd_get_ee_status(hba, &status);
6229 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6234 trace_ufshcd_exception_event(dev_name(hba->dev), status);
6236 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6237 ufshcd_bkops_exception_event_handler(hba);
6239 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6240 ufshcd_temp_exception_event_handler(hba, status);
6242 ufs_debugfs_exception_event(hba, status);
6244 ufshcd_scsi_unblock_requests(hba);
6247 /* Complete requests that have door-bell cleared */
6248 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
6250 if (is_mcq_enabled(hba))
6251 ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6253 ufshcd_transfer_req_compl(hba);
6255 ufshcd_tmc_handler(hba);
6259 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6260 * to recover from the DL NAC errors or not.
6261 * @hba: per-adapter instance
6263 * Return: true if error handling is required, false otherwise.
6265 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6267 unsigned long flags;
6268 bool err_handling = true;
6270 spin_lock_irqsave(hba->host->host_lock, flags);
6272 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6273 * device fatal error and/or DL NAC & REPLAY timeout errors.
6275 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6278 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6279 ((hba->saved_err & UIC_ERROR) &&
6280 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6283 if ((hba->saved_err & UIC_ERROR) &&
6284 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6287 * wait for 50ms to see if we can get any other errors or not.
6289 spin_unlock_irqrestore(hba->host->host_lock, flags);
6291 spin_lock_irqsave(hba->host->host_lock, flags);
6294 * now check if we have got any other severe errors other than
6297 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6298 ((hba->saved_err & UIC_ERROR) &&
6299 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6303 * As DL NAC is the only error received so far, send out NOP
6304 * command to confirm if link is still active or not.
6305 * - If we don't get any response then do error recovery.
6306 * - If we get response then clear the DL NAC error bit.
6309 spin_unlock_irqrestore(hba->host->host_lock, flags);
6310 err = ufshcd_verify_dev_init(hba);
6311 spin_lock_irqsave(hba->host->host_lock, flags);
6316 /* Link seems to be alive hence ignore the DL NAC errors */
6317 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6318 hba->saved_err &= ~UIC_ERROR;
6319 /* clear NAC error */
6320 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6321 if (!hba->saved_uic_err)
6322 err_handling = false;
6325 spin_unlock_irqrestore(hba->host->host_lock, flags);
6326 return err_handling;
6329 /* host lock must be held before calling this func */
6330 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6332 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6333 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6336 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6338 lockdep_assert_held(hba->host->host_lock);
6340 /* handle fatal errors only when link is not in error state */
6341 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6342 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6343 ufshcd_is_saved_err_fatal(hba))
6344 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6346 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6347 queue_work(hba->eh_wq, &hba->eh_work);
6351 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6353 spin_lock_irq(hba->host->host_lock);
6354 hba->force_reset = true;
6355 ufshcd_schedule_eh_work(hba);
6356 spin_unlock_irq(hba->host->host_lock);
6359 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6361 mutex_lock(&hba->wb_mutex);
6362 down_write(&hba->clk_scaling_lock);
6363 hba->clk_scaling.is_allowed = allow;
6364 up_write(&hba->clk_scaling_lock);
6365 mutex_unlock(&hba->wb_mutex);
6368 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6371 if (hba->clk_scaling.is_enabled)
6372 ufshcd_suspend_clkscaling(hba);
6373 ufshcd_clk_scaling_allow(hba, false);
6375 ufshcd_clk_scaling_allow(hba, true);
6376 if (hba->clk_scaling.is_enabled)
6377 ufshcd_resume_clkscaling(hba);
6381 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6383 ufshcd_rpm_get_sync(hba);
6384 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6385 hba->is_sys_suspended) {
6386 enum ufs_pm_op pm_op;
6389 * Don't assume anything of resume, if
6390 * resume fails, irq and clocks can be OFF, and powers
6391 * can be OFF or in LPM.
6393 ufshcd_setup_hba_vreg(hba, true);
6394 ufshcd_enable_irq(hba);
6395 ufshcd_setup_vreg(hba, true);
6396 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6397 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6399 if (!ufshcd_is_clkgating_allowed(hba))
6400 ufshcd_setup_clocks(hba, true);
6401 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6402 ufshcd_vops_resume(hba, pm_op);
6405 if (ufshcd_is_clkscaling_supported(hba) &&
6406 hba->clk_scaling.is_enabled)
6407 ufshcd_suspend_clkscaling(hba);
6408 ufshcd_clk_scaling_allow(hba, false);
6410 ufshcd_scsi_block_requests(hba);
6411 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6412 blk_mq_wait_quiesce_done(&hba->host->tag_set);
6413 cancel_work_sync(&hba->eeh_work);
6416 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6418 ufshcd_scsi_unblock_requests(hba);
6419 ufshcd_release(hba);
6420 if (ufshcd_is_clkscaling_supported(hba))
6421 ufshcd_clk_scaling_suspend(hba, false);
6422 ufshcd_rpm_put(hba);
6425 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6427 return (!hba->is_powered || hba->shutting_down ||
6428 !hba->ufs_device_wlun ||
6429 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6430 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6431 ufshcd_is_link_broken(hba))));
6435 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6437 struct Scsi_Host *shost = hba->host;
6438 struct scsi_device *sdev;
6439 struct request_queue *q;
6442 hba->is_sys_suspended = false;
6444 * Set RPM status of wlun device to RPM_ACTIVE,
6445 * this also clears its runtime error.
6447 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6449 /* hba device might have a runtime error otherwise */
6451 ret = pm_runtime_set_active(hba->dev);
6453 * If wlun device had runtime error, we also need to resume those
6454 * consumer scsi devices in case any of them has failed to be
6455 * resumed due to supplier runtime resume failure. This is to unblock
6456 * blk_queue_enter in case there are bios waiting inside it.
6459 shost_for_each_device(sdev, shost) {
6460 q = sdev->request_queue;
6461 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6462 q->rpm_status == RPM_SUSPENDING))
6463 pm_request_resume(q->dev);
6468 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6473 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6475 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6478 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6480 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6483 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6489 static bool ufshcd_abort_one(struct request *rq, void *priv)
6493 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
6494 struct scsi_device *sdev = cmd->device;
6495 struct Scsi_Host *shost = sdev->host;
6496 struct ufs_hba *hba = shost_priv(shost);
6497 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6498 struct ufs_hw_queue *hwq;
6499 unsigned long flags;
6501 *ret = ufshcd_try_to_abort_task(hba, tag);
6502 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6503 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6504 *ret ? "failed" : "succeeded");
6506 /* Release cmd in MCQ mode if abort succeeds */
6507 if (is_mcq_enabled(hba) && (*ret == 0)) {
6508 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
6509 spin_lock_irqsave(&hwq->cq_lock, flags);
6510 if (ufshcd_cmd_inflight(lrbp->cmd))
6511 ufshcd_release_scsi_cmd(hba, lrbp);
6512 spin_unlock_irqrestore(&hwq->cq_lock, flags);
6519 * ufshcd_abort_all - Abort all pending commands.
6520 * @hba: Host bus adapter pointer.
6522 * Return: true if and only if the host controller needs to be reset.
6524 static bool ufshcd_abort_all(struct ufs_hba *hba)
6528 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
6532 /* Clear pending task management requests */
6533 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6534 ret = ufshcd_clear_tm_cmd(hba, tag);
6540 /* Complete the requests that are cleared by s/w */
6541 ufshcd_complete_requests(hba, false);
6547 * ufshcd_err_handler - handle UFS errors that require s/w attention
6548 * @work: pointer to work structure
6550 static void ufshcd_err_handler(struct work_struct *work)
6552 int retries = MAX_ERR_HANDLER_RETRIES;
6553 struct ufs_hba *hba;
6554 unsigned long flags;
6559 hba = container_of(work, struct ufs_hba, eh_work);
6562 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6563 __func__, ufshcd_state_name[hba->ufshcd_state],
6564 hba->is_powered, hba->shutting_down, hba->saved_err,
6565 hba->saved_uic_err, hba->force_reset,
6566 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6568 down(&hba->host_sem);
6569 spin_lock_irqsave(hba->host->host_lock, flags);
6570 if (ufshcd_err_handling_should_stop(hba)) {
6571 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6572 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6573 spin_unlock_irqrestore(hba->host->host_lock, flags);
6577 ufshcd_set_eh_in_progress(hba);
6578 spin_unlock_irqrestore(hba->host->host_lock, flags);
6579 ufshcd_err_handling_prepare(hba);
6580 /* Complete requests that have door-bell cleared by h/w */
6581 ufshcd_complete_requests(hba, false);
6582 spin_lock_irqsave(hba->host->host_lock, flags);
6584 needs_restore = false;
6585 needs_reset = false;
6587 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6588 hba->ufshcd_state = UFSHCD_STATE_RESET;
6590 * A full reset and restore might have happened after preparation
6591 * is finished, double check whether we should stop.
6593 if (ufshcd_err_handling_should_stop(hba))
6594 goto skip_err_handling;
6596 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6599 spin_unlock_irqrestore(hba->host->host_lock, flags);
6600 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6601 ret = ufshcd_quirk_dl_nac_errors(hba);
6602 spin_lock_irqsave(hba->host->host_lock, flags);
6603 if (!ret && ufshcd_err_handling_should_stop(hba))
6604 goto skip_err_handling;
6607 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6608 (hba->saved_uic_err &&
6609 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6610 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6612 spin_unlock_irqrestore(hba->host->host_lock, flags);
6613 ufshcd_print_host_state(hba);
6614 ufshcd_print_pwr_info(hba);
6615 ufshcd_print_evt_hist(hba);
6616 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6617 ufshcd_print_trs_all(hba, pr_prdt);
6618 spin_lock_irqsave(hba->host->host_lock, flags);
6622 * if host reset is required then skip clearing the pending
6623 * transfers forcefully because they will get cleared during
6624 * host reset and restore
6626 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6627 ufshcd_is_saved_err_fatal(hba) ||
6628 ((hba->saved_err & UIC_ERROR) &&
6629 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6630 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6636 * If LINERESET was caught, UFS might have been put to PWM mode,
6637 * check if power mode restore is needed.
6639 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6640 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6641 if (!hba->saved_uic_err)
6642 hba->saved_err &= ~UIC_ERROR;
6643 spin_unlock_irqrestore(hba->host->host_lock, flags);
6644 if (ufshcd_is_pwr_mode_restore_needed(hba))
6645 needs_restore = true;
6646 spin_lock_irqsave(hba->host->host_lock, flags);
6647 if (!hba->saved_err && !needs_restore)
6648 goto skip_err_handling;
6651 hba->silence_err_logs = true;
6652 /* release lock as clear command might sleep */
6653 spin_unlock_irqrestore(hba->host->host_lock, flags);
6655 needs_reset = ufshcd_abort_all(hba);
6657 spin_lock_irqsave(hba->host->host_lock, flags);
6658 hba->silence_err_logs = false;
6663 * After all reqs and tasks are cleared from doorbell,
6664 * now it is safe to retore power mode.
6666 if (needs_restore) {
6667 spin_unlock_irqrestore(hba->host->host_lock, flags);
6669 * Hold the scaling lock just in case dev cmds
6670 * are sent via bsg and/or sysfs.
6672 down_write(&hba->clk_scaling_lock);
6673 hba->force_pmc = true;
6674 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6677 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6680 hba->force_pmc = false;
6681 ufshcd_print_pwr_info(hba);
6682 up_write(&hba->clk_scaling_lock);
6683 spin_lock_irqsave(hba->host->host_lock, flags);
6687 /* Fatal errors need reset */
6691 hba->force_reset = false;
6692 spin_unlock_irqrestore(hba->host->host_lock, flags);
6693 err = ufshcd_reset_and_restore(hba);
6695 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6698 ufshcd_recover_pm_error(hba);
6699 spin_lock_irqsave(hba->host->host_lock, flags);
6704 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6705 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6706 if (hba->saved_err || hba->saved_uic_err)
6707 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6708 __func__, hba->saved_err, hba->saved_uic_err);
6710 /* Exit in an operational state or dead */
6711 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6712 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6715 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6717 ufshcd_clear_eh_in_progress(hba);
6718 spin_unlock_irqrestore(hba->host->host_lock, flags);
6719 ufshcd_err_handling_unprepare(hba);
6722 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6723 ufshcd_state_name[hba->ufshcd_state]);
6727 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6728 * @hba: per-adapter instance
6731 * IRQ_HANDLED - If interrupt is valid
6732 * IRQ_NONE - If invalid interrupt
6734 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6737 irqreturn_t retval = IRQ_NONE;
6739 /* PHY layer error */
6740 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6741 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6742 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6743 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6745 * To know whether this error is fatal or not, DB timeout
6746 * must be checked but this error is handled separately.
6748 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6749 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6752 /* Got a LINERESET indication. */
6753 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6754 struct uic_command *cmd = NULL;
6756 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6757 if (hba->uic_async_done && hba->active_uic_cmd)
6758 cmd = hba->active_uic_cmd;
6760 * Ignore the LINERESET during power mode change
6761 * operation via DME_SET command.
6763 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6764 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6766 retval |= IRQ_HANDLED;
6769 /* PA_INIT_ERROR is fatal and needs UIC reset */
6770 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6771 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6772 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6773 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6775 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6776 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6777 else if (hba->dev_quirks &
6778 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6779 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6781 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6782 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6783 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6785 retval |= IRQ_HANDLED;
6788 /* UIC NL/TL/DME errors needs software retry */
6789 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6790 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6791 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6792 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6793 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6794 retval |= IRQ_HANDLED;
6797 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6798 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6799 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6800 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6801 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6802 retval |= IRQ_HANDLED;
6805 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6806 if ((reg & UIC_DME_ERROR) &&
6807 (reg & UIC_DME_ERROR_CODE_MASK)) {
6808 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6809 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6810 retval |= IRQ_HANDLED;
6813 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6814 __func__, hba->uic_error);
6819 * ufshcd_check_errors - Check for errors that need s/w attention
6820 * @hba: per-adapter instance
6821 * @intr_status: interrupt status generated by the controller
6824 * IRQ_HANDLED - If interrupt is valid
6825 * IRQ_NONE - If invalid interrupt
6827 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6829 bool queue_eh_work = false;
6830 irqreturn_t retval = IRQ_NONE;
6832 spin_lock(hba->host->host_lock);
6833 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6835 if (hba->errors & INT_FATAL_ERRORS) {
6836 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6838 queue_eh_work = true;
6841 if (hba->errors & UIC_ERROR) {
6843 retval = ufshcd_update_uic_error(hba);
6845 queue_eh_work = true;
6848 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6850 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6851 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6853 hba->errors, ufshcd_get_upmcrs(hba));
6854 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6856 ufshcd_set_link_broken(hba);
6857 queue_eh_work = true;
6860 if (queue_eh_work) {
6862 * update the transfer error masks to sticky bits, let's do this
6863 * irrespective of current ufshcd_state.
6865 hba->saved_err |= hba->errors;
6866 hba->saved_uic_err |= hba->uic_error;
6868 /* dump controller state before resetting */
6869 if ((hba->saved_err &
6870 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6871 (hba->saved_uic_err &&
6872 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6873 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6874 __func__, hba->saved_err,
6875 hba->saved_uic_err);
6876 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6878 ufshcd_print_pwr_info(hba);
6880 ufshcd_schedule_eh_work(hba);
6881 retval |= IRQ_HANDLED;
6884 * if (!queue_eh_work) -
6885 * Other errors are either non-fatal where host recovers
6886 * itself without s/w intervention or errors that will be
6887 * handled by the SCSI core layer.
6891 spin_unlock(hba->host->host_lock);
6896 * ufshcd_tmc_handler - handle task management function completion
6897 * @hba: per adapter instance
6900 * IRQ_HANDLED - If interrupt is valid
6901 * IRQ_NONE - If invalid interrupt
6903 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6905 unsigned long flags, pending, issued;
6906 irqreturn_t ret = IRQ_NONE;
6909 spin_lock_irqsave(hba->host->host_lock, flags);
6910 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6911 issued = hba->outstanding_tasks & ~pending;
6912 for_each_set_bit(tag, &issued, hba->nutmrs) {
6913 struct request *req = hba->tmf_rqs[tag];
6914 struct completion *c = req->end_io_data;
6919 spin_unlock_irqrestore(hba->host->host_lock, flags);
6925 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6926 * @hba: per adapter instance
6928 * Return: IRQ_HANDLED if interrupt is handled.
6930 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6932 struct ufs_hw_queue *hwq;
6933 unsigned long outstanding_cqs;
6934 unsigned int nr_queues;
6938 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6940 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6942 /* Exclude the poll queues */
6943 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6944 for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6947 events = ufshcd_mcq_read_cqis(hba, i);
6949 ufshcd_mcq_write_cqis(hba, events, i);
6951 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6952 ufshcd_mcq_poll_cqe_lock(hba, hwq);
6959 * ufshcd_sl_intr - Interrupt service routine
6960 * @hba: per adapter instance
6961 * @intr_status: contains interrupts generated by the controller
6964 * IRQ_HANDLED - If interrupt is valid
6965 * IRQ_NONE - If invalid interrupt
6967 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6969 irqreturn_t retval = IRQ_NONE;
6971 if (intr_status & UFSHCD_UIC_MASK)
6972 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6974 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6975 retval |= ufshcd_check_errors(hba, intr_status);
6977 if (intr_status & UTP_TASK_REQ_COMPL)
6978 retval |= ufshcd_tmc_handler(hba);
6980 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6981 retval |= ufshcd_transfer_req_compl(hba);
6983 if (intr_status & MCQ_CQ_EVENT_STATUS)
6984 retval |= ufshcd_handle_mcq_cq_events(hba);
6990 * ufshcd_intr - Main interrupt service routine
6992 * @__hba: pointer to adapter instance
6995 * IRQ_HANDLED - If interrupt is valid
6996 * IRQ_NONE - If invalid interrupt
6998 static irqreturn_t ufshcd_intr(int irq, void *__hba)
7000 u32 intr_status, enabled_intr_status = 0;
7001 irqreturn_t retval = IRQ_NONE;
7002 struct ufs_hba *hba = __hba;
7003 int retries = hba->nutrs;
7005 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7006 hba->ufs_stats.last_intr_status = intr_status;
7007 hba->ufs_stats.last_intr_ts = local_clock();
7010 * There could be max of hba->nutrs reqs in flight and in worst case
7011 * if the reqs get finished 1 by 1 after the interrupt status is
7012 * read, make sure we handle them by checking the interrupt status
7013 * again in a loop until we process all of the reqs before returning.
7015 while (intr_status && retries--) {
7016 enabled_intr_status =
7017 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
7018 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7019 if (enabled_intr_status)
7020 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7022 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7025 if (enabled_intr_status && retval == IRQ_NONE &&
7026 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
7027 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
7028 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
7031 hba->ufs_stats.last_intr_status,
7032 enabled_intr_status);
7033 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
7039 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
7042 u32 mask = 1 << tag;
7043 unsigned long flags;
7045 if (!test_bit(tag, &hba->outstanding_tasks))
7048 spin_lock_irqsave(hba->host->host_lock, flags);
7049 ufshcd_utmrl_clear(hba, tag);
7050 spin_unlock_irqrestore(hba->host->host_lock, flags);
7052 /* poll for max. 1 sec to clear door bell register by h/w */
7053 err = ufshcd_wait_for_register(hba,
7054 REG_UTP_TASK_REQ_DOOR_BELL,
7055 mask, 0, 1000, 1000);
7057 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
7058 tag, err < 0 ? "failed" : "succeeded");
7064 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
7065 struct utp_task_req_desc *treq, u8 tm_function)
7067 struct request_queue *q = hba->tmf_queue;
7068 struct Scsi_Host *host = hba->host;
7069 DECLARE_COMPLETION_ONSTACK(wait);
7070 struct request *req;
7071 unsigned long flags;
7075 * blk_mq_alloc_request() is used here only to get a free tag.
7077 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
7079 return PTR_ERR(req);
7081 req->end_io_data = &wait;
7084 spin_lock_irqsave(host->host_lock, flags);
7086 task_tag = req->tag;
7087 hba->tmf_rqs[req->tag] = req;
7088 treq->upiu_req.req_header.task_tag = task_tag;
7090 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
7091 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
7093 /* send command to the controller */
7094 __set_bit(task_tag, &hba->outstanding_tasks);
7096 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
7097 /* Make sure that doorbell is committed immediately */
7100 spin_unlock_irqrestore(host->host_lock, flags);
7102 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
7104 /* wait until the task management command is completed */
7105 err = wait_for_completion_io_timeout(&wait,
7106 msecs_to_jiffies(TM_CMD_TIMEOUT));
7108 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
7109 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
7110 __func__, tm_function);
7111 if (ufshcd_clear_tm_cmd(hba, task_tag))
7112 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7113 __func__, task_tag);
7117 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
7119 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7122 spin_lock_irqsave(hba->host->host_lock, flags);
7123 hba->tmf_rqs[req->tag] = NULL;
7124 __clear_bit(task_tag, &hba->outstanding_tasks);
7125 spin_unlock_irqrestore(hba->host->host_lock, flags);
7127 ufshcd_release(hba);
7128 blk_mq_free_request(req);
7134 * ufshcd_issue_tm_cmd - issues task management commands to controller
7135 * @hba: per adapter instance
7136 * @lun_id: LUN ID to which TM command is sent
7137 * @task_id: task ID to which the TM command is applicable
7138 * @tm_function: task management function opcode
7139 * @tm_response: task management service response return value
7141 * Return: non-zero value on error, zero on success.
7143 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
7144 u8 tm_function, u8 *tm_response)
7146 struct utp_task_req_desc treq = { };
7147 enum utp_ocs ocs_value;
7150 /* Configure task request descriptor */
7151 treq.header.interrupt = 1;
7152 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7154 /* Configure task request UPIU */
7155 treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
7156 treq.upiu_req.req_header.lun = lun_id;
7157 treq.upiu_req.req_header.tm_function = tm_function;
7160 * The host shall provide the same value for LUN field in the basic
7161 * header and for Input Parameter.
7163 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
7164 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
7166 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
7167 if (err == -ETIMEDOUT)
7170 ocs_value = treq.header.ocs & MASK_OCS;
7171 if (ocs_value != OCS_SUCCESS)
7172 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
7173 __func__, ocs_value);
7174 else if (tm_response)
7175 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
7176 MASK_TM_SERVICE_RESP;
7181 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7182 * @hba: per-adapter instance
7183 * @req_upiu: upiu request
7184 * @rsp_upiu: upiu reply
7185 * @desc_buff: pointer to descriptor buffer, NULL if NA
7186 * @buff_len: descriptor size, 0 if NA
7187 * @cmd_type: specifies the type (NOP, Query...)
7188 * @desc_op: descriptor operation
7190 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7191 * Therefore, it "rides" the device management infrastructure: uses its tag and
7192 * tasks work queues.
7194 * Since there is only one available tag for device management commands,
7195 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7197 * Return: 0 upon success; < 0 upon failure.
7199 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7200 struct utp_upiu_req *req_upiu,
7201 struct utp_upiu_req *rsp_upiu,
7202 u8 *desc_buff, int *buff_len,
7203 enum dev_cmd_type cmd_type,
7204 enum query_opcode desc_op)
7206 DECLARE_COMPLETION_ONSTACK(wait);
7207 const u32 tag = hba->reserved_slot;
7208 struct ufshcd_lrb *lrbp;
7212 /* Protects use of hba->reserved_slot. */
7213 lockdep_assert_held(&hba->dev_cmd.lock);
7215 down_read(&hba->clk_scaling_lock);
7217 lrbp = &hba->lrb[tag];
7219 lrbp->task_tag = tag;
7221 lrbp->intr_cmd = true;
7222 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7223 hba->dev_cmd.type = cmd_type;
7225 if (hba->ufs_version <= ufshci_version(1, 1))
7226 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
7228 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7230 /* update the task tag in the request upiu */
7231 req_upiu->header.task_tag = tag;
7233 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
7235 /* just copy the upiu request as it is */
7236 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7237 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7238 /* The Data Segment Area is optional depending upon the query
7239 * function value. for WRITE DESCRIPTOR, the data segment
7240 * follows right after the tsf.
7242 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7246 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7248 hba->dev_cmd.complete = &wait;
7250 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
7252 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7254 * ignore the returning value here - ufshcd_check_query_response is
7255 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7256 * read the response directly ignoring all errors.
7258 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7260 /* just copy the upiu response as it is */
7261 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7262 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7263 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7264 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
7265 .data_segment_length);
7267 if (*buff_len >= resp_len) {
7268 memcpy(desc_buff, descp, resp_len);
7269 *buff_len = resp_len;
7272 "%s: rsp size %d is bigger than buffer size %d",
7273 __func__, resp_len, *buff_len);
7278 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7279 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
7281 up_read(&hba->clk_scaling_lock);
7286 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7287 * @hba: per-adapter instance
7288 * @req_upiu: upiu request
7289 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7290 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7291 * @desc_buff: pointer to descriptor buffer, NULL if NA
7292 * @buff_len: descriptor size, 0 if NA
7293 * @desc_op: descriptor operation
7295 * Supports UTP Transfer requests (nop and query), and UTP Task
7296 * Management requests.
7297 * It is up to the caller to fill the upiu conent properly, as it will
7298 * be copied without any further input validations.
7300 * Return: 0 upon success; < 0 upon failure.
7302 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7303 struct utp_upiu_req *req_upiu,
7304 struct utp_upiu_req *rsp_upiu,
7305 enum upiu_request_transaction msgcode,
7306 u8 *desc_buff, int *buff_len,
7307 enum query_opcode desc_op)
7310 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7311 struct utp_task_req_desc treq = { };
7312 enum utp_ocs ocs_value;
7313 u8 tm_f = req_upiu->header.tm_function;
7316 case UPIU_TRANSACTION_NOP_OUT:
7317 cmd_type = DEV_CMD_TYPE_NOP;
7319 case UPIU_TRANSACTION_QUERY_REQ:
7321 mutex_lock(&hba->dev_cmd.lock);
7322 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7323 desc_buff, buff_len,
7325 mutex_unlock(&hba->dev_cmd.lock);
7326 ufshcd_release(hba);
7329 case UPIU_TRANSACTION_TASK_REQ:
7330 treq.header.interrupt = 1;
7331 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7333 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7335 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7336 if (err == -ETIMEDOUT)
7339 ocs_value = treq.header.ocs & MASK_OCS;
7340 if (ocs_value != OCS_SUCCESS) {
7341 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7346 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7359 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7360 * @hba: per adapter instance
7361 * @req_upiu: upiu request
7362 * @rsp_upiu: upiu reply
7363 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7364 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7365 * @sg_cnt: The number of sg lists actually used
7366 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7367 * @dir: DMA direction
7369 * Return: zero on success, non-zero on failure.
7371 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7372 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7373 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7374 enum dma_data_direction dir)
7376 DECLARE_COMPLETION_ONSTACK(wait);
7377 const u32 tag = hba->reserved_slot;
7378 struct ufshcd_lrb *lrbp;
7385 /* Protects use of hba->reserved_slot. */
7387 mutex_lock(&hba->dev_cmd.lock);
7388 down_read(&hba->clk_scaling_lock);
7390 lrbp = &hba->lrb[tag];
7392 lrbp->task_tag = tag;
7393 lrbp->lun = UFS_UPIU_RPMB_WLUN;
7395 lrbp->intr_cmd = true;
7396 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7397 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7399 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7400 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7403 * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7404 * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7405 * HW controller takes EHS length from UTRD.
7407 if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
7408 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7410 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
7412 /* update the task tag */
7413 req_upiu->header.task_tag = tag;
7415 /* copy the UPIU(contains CDB) request as it is */
7416 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7417 /* Copy EHS, starting with byte32, immediately after the CDB package */
7418 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7420 if (dir != DMA_NONE && sg_list)
7421 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7423 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7425 hba->dev_cmd.complete = &wait;
7427 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7429 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7432 /* Just copy the upiu response as it is */
7433 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7434 /* Get the response UPIU result */
7435 result = (lrbp->ucd_rsp_ptr->header.response << 8) |
7436 lrbp->ucd_rsp_ptr->header.status;
7438 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
7440 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7441 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7444 if (ehs_len == 2 && rsp_ehs) {
7446 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7447 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7449 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7450 memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7454 up_read(&hba->clk_scaling_lock);
7455 mutex_unlock(&hba->dev_cmd.lock);
7456 ufshcd_release(hba);
7457 return err ? : result;
7461 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7462 * @cmd: SCSI command pointer
7464 * Return: SUCCESS or FAILED.
7466 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7468 unsigned long flags, pending_reqs = 0, not_cleared = 0;
7469 struct Scsi_Host *host;
7470 struct ufs_hba *hba;
7471 struct ufs_hw_queue *hwq;
7472 struct ufshcd_lrb *lrbp;
7473 u32 pos, not_cleared_mask = 0;
7477 host = cmd->device->host;
7478 hba = shost_priv(host);
7480 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7481 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7482 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7488 if (is_mcq_enabled(hba)) {
7489 for (pos = 0; pos < hba->nutrs; pos++) {
7490 lrbp = &hba->lrb[pos];
7491 if (ufshcd_cmd_inflight(lrbp->cmd) &&
7493 ufshcd_clear_cmd(hba, pos);
7494 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7495 ufshcd_mcq_poll_cqe_lock(hba, hwq);
7502 /* clear the commands that were pending for corresponding LUN */
7503 spin_lock_irqsave(&hba->outstanding_lock, flags);
7504 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7505 if (hba->lrb[pos].lun == lun)
7506 __set_bit(pos, &pending_reqs);
7507 hba->outstanding_reqs &= ~pending_reqs;
7508 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7510 for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
7511 if (ufshcd_clear_cmd(hba, pos) < 0) {
7512 spin_lock_irqsave(&hba->outstanding_lock, flags);
7513 not_cleared = 1U << pos &
7514 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7515 hba->outstanding_reqs |= not_cleared;
7516 not_cleared_mask |= not_cleared;
7517 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7519 dev_err(hba->dev, "%s: failed to clear request %d\n",
7523 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
7526 hba->req_abort_count = 0;
7527 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7531 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7537 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7539 struct ufshcd_lrb *lrbp;
7542 for_each_set_bit(tag, &bitmap, hba->nutrs) {
7543 lrbp = &hba->lrb[tag];
7544 lrbp->req_abort_skip = true;
7549 * ufshcd_try_to_abort_task - abort a specific task
7550 * @hba: Pointer to adapter instance
7551 * @tag: Task tag/index to be aborted
7553 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7554 * command, and in host controller by clearing the door-bell register. There can
7555 * be race between controller sending the command to the device while abort is
7556 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7557 * really issued and then try to abort it.
7559 * Return: zero on success, non-zero on failure.
7561 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7563 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7569 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7570 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7571 UFS_QUERY_TASK, &resp);
7572 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7573 /* cmd pending in the device */
7574 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7577 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7579 * cmd not pending in the device, check if it is
7582 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7584 if (is_mcq_enabled(hba)) {
7586 if (ufshcd_cmd_inflight(lrbp->cmd)) {
7587 /* sleep for max. 200us same delay as in SDB mode */
7588 usleep_range(100, 200);
7591 /* command completed already */
7592 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
7597 /* Single Doorbell Mode */
7598 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7599 if (reg & (1 << tag)) {
7600 /* sleep for max. 200us to stabilize */
7601 usleep_range(100, 200);
7604 /* command completed already */
7605 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7610 "%s: no response from device. tag = %d, err %d\n",
7611 __func__, tag, err);
7613 err = resp; /* service response error */
7623 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7624 UFS_ABORT_TASK, &resp);
7625 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7627 err = resp; /* service response error */
7628 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7629 __func__, tag, err);
7634 err = ufshcd_clear_cmd(hba, tag);
7636 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7637 __func__, tag, err);
7644 * ufshcd_abort - scsi host template eh_abort_handler callback
7645 * @cmd: SCSI command pointer
7647 * Return: SUCCESS or FAILED.
7649 static int ufshcd_abort(struct scsi_cmnd *cmd)
7651 struct Scsi_Host *host = cmd->device->host;
7652 struct ufs_hba *hba = shost_priv(host);
7653 int tag = scsi_cmd_to_rq(cmd)->tag;
7654 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7655 unsigned long flags;
7662 if (!is_mcq_enabled(hba)) {
7663 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7664 if (!test_bit(tag, &hba->outstanding_reqs)) {
7665 /* If command is already aborted/completed, return FAILED. */
7667 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7668 __func__, tag, hba->outstanding_reqs, reg);
7673 /* Print Transfer Request of aborted task */
7674 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7677 * Print detailed info about aborted request.
7678 * As more than one request might get aborted at the same time,
7679 * print full information only for the first aborted request in order
7680 * to reduce repeated printouts. For other aborted requests only print
7683 scsi_print_command(cmd);
7684 if (!hba->req_abort_count) {
7685 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7686 ufshcd_print_evt_hist(hba);
7687 ufshcd_print_host_state(hba);
7688 ufshcd_print_pwr_info(hba);
7689 ufshcd_print_tr(hba, tag, true);
7691 ufshcd_print_tr(hba, tag, false);
7693 hba->req_abort_count++;
7695 if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
7696 /* only execute this code in single doorbell mode */
7698 "%s: cmd was completed, but without a notifying intr, tag = %d",
7700 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7705 * Task abort to the device W-LUN is illegal. When this command
7706 * will fail, due to spec violation, scsi err handling next step
7707 * will be to send LU reset which, again, is a spec violation.
7708 * To avoid these unnecessary/illegal steps, first we clean up
7709 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7710 * then queue the eh_work and bail.
7712 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7713 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7715 spin_lock_irqsave(host->host_lock, flags);
7716 hba->force_reset = true;
7717 ufshcd_schedule_eh_work(hba);
7718 spin_unlock_irqrestore(host->host_lock, flags);
7722 if (is_mcq_enabled(hba)) {
7723 /* MCQ mode. Branch off to handle abort for mcq mode */
7724 err = ufshcd_mcq_abort(cmd);
7728 /* Skip task abort in case previous aborts failed and report failure */
7729 if (lrbp->req_abort_skip) {
7730 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7731 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7735 err = ufshcd_try_to_abort_task(hba, tag);
7737 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7738 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7744 * Clear the corresponding bit from outstanding_reqs since the command
7745 * has been aborted successfully.
7747 spin_lock_irqsave(&hba->outstanding_lock, flags);
7748 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7749 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7752 ufshcd_release_scsi_cmd(hba, lrbp);
7757 /* Matches the ufshcd_hold() call at the start of this function. */
7758 ufshcd_release(hba);
7763 * ufshcd_host_reset_and_restore - reset and restore host controller
7764 * @hba: per-adapter instance
7766 * Note that host controller reset may issue DME_RESET to
7767 * local and remote (device) Uni-Pro stack and the attributes
7768 * are reset to default state.
7770 * Return: zero on success, non-zero on failure.
7772 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7777 * Stop the host controller and complete the requests
7780 ufshcd_hba_stop(hba);
7781 hba->silence_err_logs = true;
7782 ufshcd_complete_requests(hba, true);
7783 hba->silence_err_logs = false;
7785 /* scale up clocks to max frequency before full reinitialization */
7786 ufshcd_scale_clks(hba, ULONG_MAX, true);
7788 err = ufshcd_hba_enable(hba);
7790 /* Establish the link again and restore the device */
7792 err = ufshcd_probe_hba(hba, false);
7795 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7796 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7801 * ufshcd_reset_and_restore - reset and re-initialize host/device
7802 * @hba: per-adapter instance
7804 * Reset and recover device, host and re-establish link. This
7805 * is helpful to recover the communication in fatal error conditions.
7807 * Return: zero on success, non-zero on failure.
7809 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7812 u32 saved_uic_err = 0;
7814 unsigned long flags;
7815 int retries = MAX_HOST_RESET_RETRIES;
7817 spin_lock_irqsave(hba->host->host_lock, flags);
7820 * This is a fresh start, cache and clear saved error first,
7821 * in case new error generated during reset and restore.
7823 saved_err |= hba->saved_err;
7824 saved_uic_err |= hba->saved_uic_err;
7826 hba->saved_uic_err = 0;
7827 hba->force_reset = false;
7828 hba->ufshcd_state = UFSHCD_STATE_RESET;
7829 spin_unlock_irqrestore(hba->host->host_lock, flags);
7831 /* Reset the attached device */
7832 ufshcd_device_reset(hba);
7834 err = ufshcd_host_reset_and_restore(hba);
7836 spin_lock_irqsave(hba->host->host_lock, flags);
7839 /* Do not exit unless operational or dead */
7840 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7841 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7842 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7844 } while (err && --retries);
7847 * Inform scsi mid-layer that we did reset and allow to handle
7848 * Unit Attention properly.
7850 scsi_report_bus_reset(hba->host, 0);
7852 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7853 hba->saved_err |= saved_err;
7854 hba->saved_uic_err |= saved_uic_err;
7856 spin_unlock_irqrestore(hba->host->host_lock, flags);
7862 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7863 * @cmd: SCSI command pointer
7865 * Return: SUCCESS or FAILED.
7867 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7870 unsigned long flags;
7871 struct ufs_hba *hba;
7873 hba = shost_priv(cmd->device->host);
7876 * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7877 * stuck in this function waiting for flush_work(&hba->eh_work). And
7878 * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7879 * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7881 if (hba->pm_op_in_progress) {
7882 if (ufshcd_link_recovery(hba))
7888 spin_lock_irqsave(hba->host->host_lock, flags);
7889 hba->force_reset = true;
7890 ufshcd_schedule_eh_work(hba);
7891 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7892 spin_unlock_irqrestore(hba->host->host_lock, flags);
7894 flush_work(&hba->eh_work);
7896 spin_lock_irqsave(hba->host->host_lock, flags);
7897 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7899 spin_unlock_irqrestore(hba->host->host_lock, flags);
7905 * ufshcd_get_max_icc_level - calculate the ICC level
7906 * @sup_curr_uA: max. current supported by the regulator
7907 * @start_scan: row at the desc table to start scan from
7908 * @buff: power descriptor buffer
7910 * Return: calculated max ICC level for specific regulator.
7912 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7920 for (i = start_scan; i >= 0; i--) {
7921 data = get_unaligned_be16(&buff[2 * i]);
7922 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7923 ATTR_ICC_LVL_UNIT_OFFSET;
7924 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7926 case UFSHCD_NANO_AMP:
7927 curr_uA = curr_uA / 1000;
7929 case UFSHCD_MILI_AMP:
7930 curr_uA = curr_uA * 1000;
7933 curr_uA = curr_uA * 1000 * 1000;
7935 case UFSHCD_MICRO_AMP:
7939 if (sup_curr_uA >= curr_uA)
7944 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7951 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7952 * In case regulators are not initialized we'll return 0
7953 * @hba: per-adapter instance
7954 * @desc_buf: power descriptor buffer to extract ICC levels from.
7956 * Return: calculated ICC level.
7958 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7963 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7964 !hba->vreg_info.vccq2) {
7966 * Using dev_dbg to avoid messages during runtime PM to avoid
7967 * never-ending cycles of messages written back to storage by
7968 * user space causing runtime resume, causing more messages and
7972 "%s: Regulator capability was not set, actvIccLevel=%d",
7973 __func__, icc_level);
7977 if (hba->vreg_info.vcc->max_uA)
7978 icc_level = ufshcd_get_max_icc_level(
7979 hba->vreg_info.vcc->max_uA,
7980 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7981 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7983 if (hba->vreg_info.vccq->max_uA)
7984 icc_level = ufshcd_get_max_icc_level(
7985 hba->vreg_info.vccq->max_uA,
7987 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7989 if (hba->vreg_info.vccq2->max_uA)
7990 icc_level = ufshcd_get_max_icc_level(
7991 hba->vreg_info.vccq2->max_uA,
7993 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7998 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
8004 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8008 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
8009 desc_buf, QUERY_DESC_MAX_SIZE);
8012 "%s: Failed reading power descriptor ret = %d",
8017 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
8018 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
8020 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8021 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
8025 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
8026 __func__, icc_level, ret);
8032 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
8034 struct Scsi_Host *shost = sdev->host;
8036 scsi_autopm_get_device(sdev);
8037 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
8038 if (sdev->rpm_autosuspend)
8039 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
8040 shost->rpm_autosuspend_delay);
8041 scsi_autopm_put_device(sdev);
8045 * ufshcd_scsi_add_wlus - Adds required W-LUs
8046 * @hba: per-adapter instance
8048 * UFS device specification requires the UFS devices to support 4 well known
8050 * "REPORT_LUNS" (address: 01h)
8051 * "UFS Device" (address: 50h)
8052 * "RPMB" (address: 44h)
8053 * "BOOT" (address: 30h)
8054 * UFS device's power management needs to be controlled by "POWER CONDITION"
8055 * field of SSU (START STOP UNIT) command. But this "power condition" field
8056 * will take effect only when its sent to "UFS device" well known logical unit
8057 * hence we require the scsi_device instance to represent this logical unit in
8058 * order for the UFS host driver to send the SSU command for power management.
8060 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
8061 * Block) LU so user space process can control this LU. User space may also
8062 * want to have access to BOOT LU.
8064 * This function adds scsi device instances for each of all well known LUs
8065 * (except "REPORT LUNS" LU).
8067 * Return: zero on success (all required W-LUs are added successfully),
8068 * non-zero error value on failure (if failed to add any of the required W-LU).
8070 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
8073 struct scsi_device *sdev_boot, *sdev_rpmb;
8075 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
8076 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
8077 if (IS_ERR(hba->ufs_device_wlun)) {
8078 ret = PTR_ERR(hba->ufs_device_wlun);
8079 hba->ufs_device_wlun = NULL;
8082 scsi_device_put(hba->ufs_device_wlun);
8084 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
8085 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
8086 if (IS_ERR(sdev_rpmb)) {
8087 ret = PTR_ERR(sdev_rpmb);
8088 goto remove_ufs_device_wlun;
8090 ufshcd_blk_pm_runtime_init(sdev_rpmb);
8091 scsi_device_put(sdev_rpmb);
8093 sdev_boot = __scsi_add_device(hba->host, 0, 0,
8094 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
8095 if (IS_ERR(sdev_boot)) {
8096 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
8098 ufshcd_blk_pm_runtime_init(sdev_boot);
8099 scsi_device_put(sdev_boot);
8103 remove_ufs_device_wlun:
8104 scsi_remove_device(hba->ufs_device_wlun);
8109 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
8111 struct ufs_dev_info *dev_info = &hba->dev_info;
8113 u32 d_lu_wb_buf_alloc;
8114 u32 ext_ufs_feature;
8116 if (!ufshcd_is_wb_allowed(hba))
8120 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8121 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8124 if (!(dev_info->wspecversion >= 0x310 ||
8125 dev_info->wspecversion == 0x220 ||
8126 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
8129 ext_ufs_feature = get_unaligned_be32(desc_buf +
8130 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8132 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
8136 * WB may be supported but not configured while provisioning. The spec
8137 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8138 * buffer configured.
8140 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
8142 dev_info->b_presrv_uspc_en =
8143 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
8145 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
8146 if (!get_unaligned_be32(desc_buf +
8147 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
8150 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
8151 d_lu_wb_buf_alloc = 0;
8152 ufshcd_read_unit_desc_param(hba,
8154 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
8155 (u8 *)&d_lu_wb_buf_alloc,
8156 sizeof(d_lu_wb_buf_alloc));
8157 if (d_lu_wb_buf_alloc) {
8158 dev_info->wb_dedicated_lu = lun;
8163 if (!d_lu_wb_buf_alloc)
8167 if (!ufshcd_is_wb_buf_lifetime_available(hba))
8173 hba->caps &= ~UFSHCD_CAP_WB_EN;
8176 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
8178 struct ufs_dev_info *dev_info = &hba->dev_info;
8179 u32 ext_ufs_feature;
8182 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
8185 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8187 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
8188 mask |= MASK_EE_TOO_LOW_TEMP;
8190 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
8191 mask |= MASK_EE_TOO_HIGH_TEMP;
8194 ufshcd_enable_ee(hba, mask);
8195 ufs_hwmon_probe(hba, mask);
8199 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
8201 struct ufs_dev_info *dev_info = &hba->dev_info;
8202 u32 ext_ufs_feature;
8206 /* Only UFS-4.0 and above may support EXT_IID */
8207 if (dev_info->wspecversion < 0x400)
8210 ext_ufs_feature = get_unaligned_be32(desc_buf +
8211 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8212 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
8215 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8216 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
8218 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
8221 dev_info->b_ext_iid_en = ext_iid_en;
8224 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
8225 const struct ufs_dev_quirk *fixups)
8227 const struct ufs_dev_quirk *f;
8228 struct ufs_dev_info *dev_info = &hba->dev_info;
8233 for (f = fixups; f->quirk; f++) {
8234 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
8235 f->wmanufacturerid == UFS_ANY_VENDOR) &&
8236 ((dev_info->model &&
8237 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
8238 !strcmp(f->model, UFS_ANY_MODEL)))
8239 hba->dev_quirks |= f->quirk;
8242 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
8244 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8246 /* fix by general quirk table */
8247 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8249 /* allow vendors to fix quirks */
8250 ufshcd_vops_fixup_dev_quirks(hba);
8253 static void ufshcd_update_rtc(struct ufs_hba *hba)
8255 struct timespec64 ts64;
8259 ktime_get_real_ts64(&ts64);
8261 if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
8262 dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
8267 * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8268 * 2146 is required, it is recommended to choose the relative RTC mode.
8270 val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
8272 ufshcd_rpm_get_sync(hba);
8273 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
8275 ufshcd_rpm_put_sync(hba);
8278 dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
8279 else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
8280 hba->dev_info.rtc_time_baseline = ts64.tv_sec;
8283 static void ufshcd_rtc_work(struct work_struct *work)
8285 struct ufs_hba *hba;
8287 hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
8289 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8290 if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
8291 ufshcd_update_rtc(hba);
8293 if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
8294 schedule_delayed_work(&hba->ufs_rtc_update_work,
8295 msecs_to_jiffies(hba->dev_info.rtc_update_period));
8298 static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
8300 u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
8301 struct ufs_dev_info *dev_info = &hba->dev_info;
8303 if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
8304 dev_info->rtc_type = UFS_RTC_ABSOLUTE;
8307 * The concept of measuring time in Linux as the number of seconds elapsed since
8308 * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8309 * 2010 00:00, here we need to adjust ABS baseline.
8311 dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
8312 mktime64(1970, 1, 1, 0, 0, 0);
8314 dev_info->rtc_type = UFS_RTC_RELATIVE;
8315 dev_info->rtc_time_baseline = 0;
8319 * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
8320 * how to calculate the specific update period for each time unit. And we disable periodic
8321 * RTC update work, let user configure by sysfs node according to specific circumstance.
8323 dev_info->rtc_update_period = 0;
8326 static int ufs_get_device_desc(struct ufs_hba *hba)
8331 struct ufs_dev_info *dev_info = &hba->dev_info;
8333 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8339 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8340 QUERY_DESC_MAX_SIZE);
8342 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8348 * getting vendor (manufacturerID) and Bank Index in big endian
8351 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8352 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8354 /* getting Specification Version in big endian format */
8355 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8356 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8357 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8359 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8361 err = ufshcd_read_string_desc(hba, model_index,
8362 &dev_info->model, SD_ASCII_STD);
8364 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8369 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8370 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8372 ufs_fixup_device_setup(hba);
8374 ufshcd_wb_probe(hba, desc_buf);
8376 ufshcd_temp_notif_probe(hba, desc_buf);
8378 ufs_init_rtc(hba, desc_buf);
8380 if (hba->ext_iid_sup)
8381 ufshcd_ext_iid_probe(hba, desc_buf);
8384 * ufshcd_read_string_desc returns size of the string
8385 * reset the error value
8394 static void ufs_put_device_desc(struct ufs_hba *hba)
8396 struct ufs_dev_info *dev_info = &hba->dev_info;
8398 kfree(dev_info->model);
8399 dev_info->model = NULL;
8403 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8404 * @hba: per-adapter instance
8406 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8407 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8408 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8409 * the hibern8 exit latency.
8411 * Return: zero on success, non-zero error value on failure.
8413 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8416 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8418 ret = ufshcd_dme_peer_get(hba,
8420 RX_MIN_ACTIVATETIME_CAPABILITY,
8421 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8422 &peer_rx_min_activatetime);
8426 /* make sure proper unit conversion is applied */
8427 tuned_pa_tactivate =
8428 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8429 / PA_TACTIVATE_TIME_UNIT_US);
8430 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8431 tuned_pa_tactivate);
8438 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8439 * @hba: per-adapter instance
8441 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8442 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8443 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8444 * This optimal value can help reduce the hibern8 exit latency.
8446 * Return: zero on success, non-zero error value on failure.
8448 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8451 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8452 u32 max_hibern8_time, tuned_pa_hibern8time;
8454 ret = ufshcd_dme_get(hba,
8455 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8456 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8457 &local_tx_hibern8_time_cap);
8461 ret = ufshcd_dme_peer_get(hba,
8462 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8463 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8464 &peer_rx_hibern8_time_cap);
8468 max_hibern8_time = max(local_tx_hibern8_time_cap,
8469 peer_rx_hibern8_time_cap);
8470 /* make sure proper unit conversion is applied */
8471 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8472 / PA_HIBERN8_TIME_UNIT_US);
8473 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8474 tuned_pa_hibern8time);
8480 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8481 * less than device PA_TACTIVATE time.
8482 * @hba: per-adapter instance
8484 * Some UFS devices require host PA_TACTIVATE to be lower than device
8485 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8488 * Return: zero on success, non-zero error value on failure.
8490 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8493 u32 granularity, peer_granularity;
8494 u32 pa_tactivate, peer_pa_tactivate;
8495 u32 pa_tactivate_us, peer_pa_tactivate_us;
8496 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8498 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8503 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8508 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8509 (granularity > PA_GRANULARITY_MAX_VAL)) {
8510 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8511 __func__, granularity);
8515 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8516 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8517 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8518 __func__, peer_granularity);
8522 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8526 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8527 &peer_pa_tactivate);
8531 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8532 peer_pa_tactivate_us = peer_pa_tactivate *
8533 gran_to_us_table[peer_granularity - 1];
8535 if (pa_tactivate_us >= peer_pa_tactivate_us) {
8536 u32 new_peer_pa_tactivate;
8538 new_peer_pa_tactivate = pa_tactivate_us /
8539 gran_to_us_table[peer_granularity - 1];
8540 new_peer_pa_tactivate++;
8541 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8542 new_peer_pa_tactivate);
8549 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8551 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8552 ufshcd_tune_pa_tactivate(hba);
8553 ufshcd_tune_pa_hibern8time(hba);
8556 ufshcd_vops_apply_dev_quirks(hba);
8558 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8559 /* set 1ms timeout for PA_TACTIVATE */
8560 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8562 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8563 ufshcd_quirk_tune_host_pa_tactivate(hba);
8566 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8568 hba->ufs_stats.hibern8_exit_cnt = 0;
8569 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8570 hba->req_abort_count = 0;
8573 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8578 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8584 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8585 desc_buf, QUERY_DESC_MAX_SIZE);
8587 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8592 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8593 hba->dev_info.max_lu_supported = 32;
8594 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8595 hba->dev_info.max_lu_supported = 8;
8602 struct ufs_ref_clk {
8603 unsigned long freq_hz;
8604 enum ufs_ref_clk_freq val;
8607 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8608 {19200000, REF_CLK_FREQ_19_2_MHZ},
8609 {26000000, REF_CLK_FREQ_26_MHZ},
8610 {38400000, REF_CLK_FREQ_38_4_MHZ},
8611 {52000000, REF_CLK_FREQ_52_MHZ},
8612 {0, REF_CLK_FREQ_INVAL},
8615 static enum ufs_ref_clk_freq
8616 ufs_get_bref_clk_from_hz(unsigned long freq)
8620 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8621 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8622 return ufs_ref_clk_freqs[i].val;
8624 return REF_CLK_FREQ_INVAL;
8627 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8631 freq = clk_get_rate(refclk);
8633 hba->dev_ref_clk_freq =
8634 ufs_get_bref_clk_from_hz(freq);
8636 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8638 "invalid ref_clk setting = %ld\n", freq);
8641 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8645 u32 freq = hba->dev_ref_clk_freq;
8647 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8648 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8651 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8656 if (ref_clk == freq)
8657 goto out; /* nothing to update */
8659 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8660 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8663 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8664 ufs_ref_clk_freqs[freq].freq_hz);
8668 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8669 ufs_ref_clk_freqs[freq].freq_hz);
8675 static int ufshcd_device_params_init(struct ufs_hba *hba)
8680 /* Init UFS geometry descriptor related parameters */
8681 ret = ufshcd_device_geo_params_init(hba);
8685 /* Check and apply UFS device quirks */
8686 ret = ufs_get_device_desc(hba);
8688 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8693 ufshcd_get_ref_clk_gating_wait(hba);
8695 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8696 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8697 hba->dev_info.f_power_on_wp_en = flag;
8699 /* Probe maximum power mode co-supported by both UFS host and device */
8700 if (ufshcd_get_max_pwr_mode(hba))
8702 "%s: Failed getting max supported power mode\n",
8708 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
8711 struct ufs_query_req *request = NULL;
8712 struct ufs_query_res *response = NULL;
8713 struct ufs_dev_info *dev_info = &hba->dev_info;
8714 struct utp_upiu_query_v4_0 *upiu_data;
8716 if (dev_info->wspecversion < 0x400)
8721 mutex_lock(&hba->dev_cmd.lock);
8723 ufshcd_init_query(hba, &request, &response,
8724 UPIU_QUERY_OPCODE_WRITE_ATTR,
8725 QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
8727 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
8729 upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
8731 put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
8733 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
8736 dev_err(hba->dev, "%s: failed to set timestamp %d\n",
8739 mutex_unlock(&hba->dev_cmd.lock);
8740 ufshcd_release(hba);
8744 * ufshcd_add_lus - probe and add UFS logical units
8745 * @hba: per-adapter instance
8747 * Return: 0 upon success; < 0 upon failure.
8749 static int ufshcd_add_lus(struct ufs_hba *hba)
8753 /* Add required well known logical units to scsi mid layer */
8754 ret = ufshcd_scsi_add_wlus(hba);
8758 /* Initialize devfreq after UFS device is detected */
8759 if (ufshcd_is_clkscaling_supported(hba)) {
8760 memcpy(&hba->clk_scaling.saved_pwr_info,
8762 sizeof(struct ufs_pa_layer_attr));
8763 hba->clk_scaling.is_allowed = true;
8765 ret = ufshcd_devfreq_init(hba);
8769 hba->clk_scaling.is_enabled = true;
8770 ufshcd_init_clk_scaling_sysfs(hba);
8774 scsi_scan_host(hba->host);
8780 /* SDB - Single Doorbell */
8781 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8783 size_t ucdl_size, utrdl_size;
8785 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
8786 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8787 hba->ucdl_dma_addr);
8789 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8790 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8791 hba->utrdl_dma_addr);
8793 devm_kfree(hba->dev, hba->lrb);
8796 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8799 int old_nutrs = hba->nutrs;
8801 ret = ufshcd_mcq_decide_queue_depth(hba);
8806 ret = ufshcd_mcq_init(hba);
8811 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8812 * Number of supported tags in MCQ mode may be larger than SDB mode.
8814 if (hba->nutrs != old_nutrs) {
8815 ufshcd_release_sdb_queue(hba, old_nutrs);
8816 ret = ufshcd_memory_alloc(hba);
8819 ufshcd_host_memory_configure(hba);
8822 ret = ufshcd_mcq_memory_alloc(hba);
8828 hba->nutrs = old_nutrs;
8832 static void ufshcd_config_mcq(struct ufs_hba *hba)
8837 ret = ufshcd_mcq_vops_config_esi(hba);
8838 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8840 intrs = UFSHCD_ENABLE_MCQ_INTRS;
8841 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
8842 intrs &= ~MCQ_CQ_EVENT_STATUS;
8843 ufshcd_enable_intr(hba, intrs);
8844 ufshcd_mcq_make_queues_operational(hba);
8845 ufshcd_mcq_config_mac(hba, hba->nutrs);
8847 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8848 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8850 ufshcd_mcq_enable(hba);
8851 hba->mcq_enabled = true;
8853 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8854 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8855 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8859 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8862 struct Scsi_Host *host = hba->host;
8864 hba->ufshcd_state = UFSHCD_STATE_RESET;
8866 ret = ufshcd_link_startup(hba);
8870 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8873 /* Debug counters initialization */
8874 ufshcd_clear_dbg_ufs_stats(hba);
8876 /* UniPro link is active now */
8877 ufshcd_set_link_active(hba);
8879 /* Reconfigure MCQ upon reset */
8880 if (is_mcq_enabled(hba) && !init_dev_params)
8881 ufshcd_config_mcq(hba);
8883 /* Verify device initialization by sending NOP OUT UPIU */
8884 ret = ufshcd_verify_dev_init(hba);
8888 /* Initiate UFS initialization, and waiting until completion */
8889 ret = ufshcd_complete_dev_init(hba);
8894 * Initialize UFS device parameters used by driver, these
8895 * parameters are associated with UFS descriptors.
8897 if (init_dev_params) {
8898 ret = ufshcd_device_params_init(hba);
8901 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
8902 ret = ufshcd_alloc_mcq(hba);
8904 ufshcd_config_mcq(hba);
8906 /* Continue with SDB mode */
8907 use_mcq_mode = false;
8908 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8911 ret = scsi_add_host(host, hba->dev);
8913 dev_err(hba->dev, "scsi_add_host failed\n");
8916 hba->scsi_host_added = true;
8917 } else if (is_mcq_supported(hba)) {
8918 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8919 ufshcd_config_mcq(hba);
8923 ufshcd_tune_unipro_params(hba);
8925 /* UFS device is also active now */
8926 ufshcd_set_ufs_dev_active(hba);
8927 ufshcd_force_reset_auto_bkops(hba);
8929 ufshcd_set_timestamp_attr(hba);
8930 schedule_delayed_work(&hba->ufs_rtc_update_work,
8931 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
8933 /* Gear up to HS gear if supported */
8934 if (hba->max_pwr_info.is_valid) {
8936 * Set the right value to bRefClkFreq before attempting to
8937 * switch to HS gears.
8939 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8940 ufshcd_set_dev_ref_clk(hba);
8941 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8943 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8953 * ufshcd_probe_hba - probe hba to detect device and initialize it
8954 * @hba: per-adapter instance
8955 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8957 * Execute link-startup and verify device initialization
8959 * Return: 0 upon success; < 0 upon failure.
8961 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8963 ktime_t start = ktime_get();
8964 unsigned long flags;
8967 ret = ufshcd_device_init(hba, init_dev_params);
8971 if (!hba->pm_op_in_progress &&
8972 (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
8973 /* Reset the device and controller before doing reinit */
8974 ufshcd_device_reset(hba);
8975 ufshcd_hba_stop(hba);
8976 ufshcd_vops_reinit_notify(hba);
8977 ret = ufshcd_hba_enable(hba);
8979 dev_err(hba->dev, "Host controller enable failed\n");
8980 ufshcd_print_evt_hist(hba);
8981 ufshcd_print_host_state(hba);
8985 /* Reinit the device */
8986 ret = ufshcd_device_init(hba, init_dev_params);
8991 ufshcd_print_pwr_info(hba);
8994 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8995 * and for removable UFS card as well, hence always set the parameter.
8996 * Note: Error handler may issue the device reset hence resetting
8997 * bActiveICCLevel as well so it is always safe to set this here.
8999 ufshcd_set_active_icc_lvl(hba);
9001 /* Enable UFS Write Booster if supported */
9002 ufshcd_configure_wb(hba);
9004 if (hba->ee_usr_mask)
9005 ufshcd_write_ee_control(hba);
9006 ufshcd_configure_auto_hibern8(hba);
9009 spin_lock_irqsave(hba->host->host_lock, flags);
9011 hba->ufshcd_state = UFSHCD_STATE_ERROR;
9012 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
9013 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
9014 spin_unlock_irqrestore(hba->host->host_lock, flags);
9016 trace_ufshcd_init(dev_name(hba->dev), ret,
9017 ktime_to_us(ktime_sub(ktime_get(), start)),
9018 hba->curr_dev_pwr_mode, hba->uic_link_state);
9023 * ufshcd_async_scan - asynchronous execution for probing hba
9024 * @data: data pointer to pass to this function
9025 * @cookie: cookie data
9027 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
9029 struct ufs_hba *hba = (struct ufs_hba *)data;
9032 down(&hba->host_sem);
9033 /* Initialize hba, detect and initialize UFS device */
9034 ret = ufshcd_probe_hba(hba, true);
9039 /* Probe and add UFS logical units */
9040 ret = ufshcd_add_lus(hba);
9043 pm_runtime_put_sync(hba->dev);
9046 dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
9049 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
9051 struct ufs_hba *hba = shost_priv(scmd->device->host);
9053 if (!hba->system_suspending) {
9054 /* Activate the error handler in the SCSI core. */
9055 return SCSI_EH_NOT_HANDLED;
9059 * If we get here we know that no TMFs are outstanding and also that
9060 * the only pending command is a START STOP UNIT command. Handle the
9061 * timeout of that command directly to prevent a deadlock between
9062 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
9064 ufshcd_link_recovery(hba);
9065 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
9066 __func__, hba->outstanding_tasks);
9068 return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
9071 static const struct attribute_group *ufshcd_driver_groups[] = {
9072 &ufs_sysfs_unit_descriptor_group,
9073 &ufs_sysfs_lun_attributes_group,
9077 static struct ufs_hba_variant_params ufs_hba_vps = {
9078 .hba_enable_delay_us = 1000,
9079 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
9080 .devfreq_profile.polling_ms = 100,
9081 .devfreq_profile.target = ufshcd_devfreq_target,
9082 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
9083 .ondemand_data.upthreshold = 70,
9084 .ondemand_data.downdifferential = 5,
9087 static const struct scsi_host_template ufshcd_driver_template = {
9088 .module = THIS_MODULE,
9090 .proc_name = UFSHCD,
9091 .map_queues = ufshcd_map_queues,
9092 .queuecommand = ufshcd_queuecommand,
9093 .mq_poll = ufshcd_poll,
9094 .slave_alloc = ufshcd_slave_alloc,
9095 .slave_configure = ufshcd_slave_configure,
9096 .slave_destroy = ufshcd_slave_destroy,
9097 .change_queue_depth = ufshcd_change_queue_depth,
9098 .eh_abort_handler = ufshcd_abort,
9099 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
9100 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
9101 .eh_timed_out = ufshcd_eh_timed_out,
9103 .sg_tablesize = SG_ALL,
9104 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
9105 .can_queue = UFSHCD_CAN_QUEUE,
9106 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
9107 .max_sectors = SZ_1M / SECTOR_SIZE,
9108 .max_host_blocked = 1,
9109 .track_queue_depth = 1,
9110 .skip_settle_delay = 1,
9111 .sdev_groups = ufshcd_driver_groups,
9114 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
9123 * "set_load" operation shall be required on those regulators
9124 * which specifically configured current limitation. Otherwise
9125 * zero max_uA may cause unexpected behavior when regulator is
9126 * enabled or set as high power mode.
9131 ret = regulator_set_load(vreg->reg, ua);
9133 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
9134 __func__, vreg->name, ua, ret);
9140 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
9141 struct ufs_vreg *vreg)
9143 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
9146 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
9147 struct ufs_vreg *vreg)
9152 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
9155 static int ufshcd_config_vreg(struct device *dev,
9156 struct ufs_vreg *vreg, bool on)
9158 if (regulator_count_voltages(vreg->reg) <= 0)
9161 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
9164 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
9168 if (!vreg || vreg->enabled)
9171 ret = ufshcd_config_vreg(dev, vreg, true);
9173 ret = regulator_enable(vreg->reg);
9176 vreg->enabled = true;
9178 dev_err(dev, "%s: %s enable failed, err=%d\n",
9179 __func__, vreg->name, ret);
9184 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
9188 if (!vreg || !vreg->enabled || vreg->always_on)
9191 ret = regulator_disable(vreg->reg);
9194 /* ignore errors on applying disable config */
9195 ufshcd_config_vreg(dev, vreg, false);
9196 vreg->enabled = false;
9198 dev_err(dev, "%s: %s disable failed, err=%d\n",
9199 __func__, vreg->name, ret);
9205 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
9208 struct device *dev = hba->dev;
9209 struct ufs_vreg_info *info = &hba->vreg_info;
9211 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
9215 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
9219 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
9223 ufshcd_toggle_vreg(dev, info->vccq2, false);
9224 ufshcd_toggle_vreg(dev, info->vccq, false);
9225 ufshcd_toggle_vreg(dev, info->vcc, false);
9230 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
9232 struct ufs_vreg_info *info = &hba->vreg_info;
9234 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
9237 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
9244 vreg->reg = devm_regulator_get(dev, vreg->name);
9245 if (IS_ERR(vreg->reg)) {
9246 ret = PTR_ERR(vreg->reg);
9247 dev_err(dev, "%s: %s get failed, err=%d\n",
9248 __func__, vreg->name, ret);
9253 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
9255 static int ufshcd_init_vreg(struct ufs_hba *hba)
9258 struct device *dev = hba->dev;
9259 struct ufs_vreg_info *info = &hba->vreg_info;
9261 ret = ufshcd_get_vreg(dev, info->vcc);
9265 ret = ufshcd_get_vreg(dev, info->vccq);
9267 ret = ufshcd_get_vreg(dev, info->vccq2);
9272 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
9274 struct ufs_vreg_info *info = &hba->vreg_info;
9276 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
9279 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9282 struct ufs_clk_info *clki;
9283 struct list_head *head = &hba->clk_list_head;
9284 unsigned long flags;
9285 ktime_t start = ktime_get();
9286 bool clk_state_changed = false;
9288 if (list_empty(head))
9291 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
9295 list_for_each_entry(clki, head, list) {
9296 if (!IS_ERR_OR_NULL(clki->clk)) {
9298 * Don't disable clocks which are needed
9299 * to keep the link active.
9301 if (ufshcd_is_link_active(hba) &&
9302 clki->keep_link_active)
9305 clk_state_changed = on ^ clki->enabled;
9306 if (on && !clki->enabled) {
9307 ret = clk_prepare_enable(clki->clk);
9309 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
9310 __func__, clki->name, ret);
9313 } else if (!on && clki->enabled) {
9314 clk_disable_unprepare(clki->clk);
9317 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
9318 clki->name, on ? "en" : "dis");
9322 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
9326 if (!ufshcd_is_clkscaling_supported(hba))
9327 ufshcd_pm_qos_update(hba, on);
9330 list_for_each_entry(clki, head, list) {
9331 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
9332 clk_disable_unprepare(clki->clk);
9334 } else if (!ret && on) {
9335 spin_lock_irqsave(hba->host->host_lock, flags);
9336 hba->clk_gating.state = CLKS_ON;
9337 trace_ufshcd_clk_gating(dev_name(hba->dev),
9338 hba->clk_gating.state);
9339 spin_unlock_irqrestore(hba->host->host_lock, flags);
9342 if (clk_state_changed)
9343 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
9344 (on ? "on" : "off"),
9345 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9349 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9352 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9355 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9356 return REF_CLK_FREQ_INVAL;
9359 return ufs_get_bref_clk_from_hz(freq);
9362 static int ufshcd_init_clocks(struct ufs_hba *hba)
9365 struct ufs_clk_info *clki;
9366 struct device *dev = hba->dev;
9367 struct list_head *head = &hba->clk_list_head;
9369 if (list_empty(head))
9372 list_for_each_entry(clki, head, list) {
9376 clki->clk = devm_clk_get(dev, clki->name);
9377 if (IS_ERR(clki->clk)) {
9378 ret = PTR_ERR(clki->clk);
9379 dev_err(dev, "%s: %s clk get failed, %d\n",
9380 __func__, clki->name, ret);
9385 * Parse device ref clk freq as per device tree "ref_clk".
9386 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9387 * in ufshcd_alloc_host().
9389 if (!strcmp(clki->name, "ref_clk"))
9390 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9392 if (clki->max_freq) {
9393 ret = clk_set_rate(clki->clk, clki->max_freq);
9395 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9396 __func__, clki->name,
9397 clki->max_freq, ret);
9400 clki->curr_freq = clki->max_freq;
9402 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9403 clki->name, clk_get_rate(clki->clk));
9406 /* Set Max. frequency for all clocks */
9407 if (hba->use_pm_opp) {
9408 ret = ufshcd_opp_set_rate(hba, ULONG_MAX);
9410 dev_err(hba->dev, "%s: failed to set OPP: %d", __func__,
9420 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9427 err = ufshcd_vops_init(hba);
9429 dev_err_probe(hba->dev, err,
9430 "%s: variant %s init failed with err %d\n",
9431 __func__, ufshcd_get_var_name(hba), err);
9436 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9441 ufshcd_vops_exit(hba);
9444 static int ufshcd_hba_init(struct ufs_hba *hba)
9449 * Handle host controller power separately from the UFS device power
9450 * rails as it will help controlling the UFS host controller power
9451 * collapse easily which is different than UFS device power collapse.
9452 * Also, enable the host controller power before we go ahead with rest
9453 * of the initialization here.
9455 err = ufshcd_init_hba_vreg(hba);
9459 err = ufshcd_setup_hba_vreg(hba, true);
9463 err = ufshcd_init_clocks(hba);
9465 goto out_disable_hba_vreg;
9467 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9468 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9470 err = ufshcd_setup_clocks(hba, true);
9472 goto out_disable_hba_vreg;
9474 err = ufshcd_init_vreg(hba);
9476 goto out_disable_clks;
9478 err = ufshcd_setup_vreg(hba, true);
9480 goto out_disable_clks;
9482 err = ufshcd_variant_hba_init(hba);
9484 goto out_disable_vreg;
9486 ufs_debugfs_hba_init(hba);
9487 ufs_fault_inject_hba_init(hba);
9489 hba->is_powered = true;
9493 ufshcd_setup_vreg(hba, false);
9495 ufshcd_setup_clocks(hba, false);
9496 out_disable_hba_vreg:
9497 ufshcd_setup_hba_vreg(hba, false);
9502 static void ufshcd_hba_exit(struct ufs_hba *hba)
9504 if (hba->is_powered) {
9505 ufshcd_pm_qos_exit(hba);
9506 ufshcd_exit_clk_scaling(hba);
9507 ufshcd_exit_clk_gating(hba);
9509 destroy_workqueue(hba->eh_wq);
9510 ufs_debugfs_hba_exit(hba);
9511 ufshcd_variant_hba_exit(hba);
9512 ufshcd_setup_vreg(hba, false);
9513 ufshcd_setup_clocks(hba, false);
9514 ufshcd_setup_hba_vreg(hba, false);
9515 hba->is_powered = false;
9516 ufs_put_device_desc(hba);
9520 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9521 enum ufs_dev_pwr_mode pwr_mode,
9522 struct scsi_sense_hdr *sshdr)
9524 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9525 struct scsi_failure failure_defs[] = {
9528 .result = SCMD_FAILURE_RESULT_ANY,
9531 struct scsi_failures failures = {
9532 .failure_definitions = failure_defs,
9534 const struct scsi_exec_args args = {
9535 .failures = &failures,
9537 .req_flags = BLK_MQ_REQ_PM,
9538 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9541 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9542 /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
9547 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9549 * @hba: per adapter instance
9550 * @pwr_mode: device power mode to set
9552 * Return: 0 if requested power mode is set successfully;
9553 * < 0 if failed to set the requested power mode.
9555 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9556 enum ufs_dev_pwr_mode pwr_mode)
9558 struct scsi_sense_hdr sshdr;
9559 struct scsi_device *sdp;
9560 unsigned long flags;
9563 spin_lock_irqsave(hba->host->host_lock, flags);
9564 sdp = hba->ufs_device_wlun;
9565 if (sdp && scsi_device_online(sdp))
9566 ret = scsi_device_get(sdp);
9569 spin_unlock_irqrestore(hba->host->host_lock, flags);
9575 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9576 * handling, which would wait for host to be resumed. Since we know
9577 * we are functional while we are here, skip host resume in error
9580 hba->host->eh_noresume = 1;
9583 * Current function would be generally called from the power management
9584 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9585 * already suspended childs.
9587 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9589 sdev_printk(KERN_WARNING, sdp,
9590 "START_STOP failed for power mode: %d, result %x\n",
9593 if (scsi_sense_valid(&sshdr))
9594 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9598 hba->curr_dev_pwr_mode = pwr_mode;
9601 scsi_device_put(sdp);
9602 hba->host->eh_noresume = 0;
9606 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9607 enum uic_link_state req_link_state,
9608 bool check_for_bkops)
9612 if (req_link_state == hba->uic_link_state)
9615 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9616 ret = ufshcd_uic_hibern8_enter(hba);
9618 ufshcd_set_link_hibern8(hba);
9620 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9626 * If autobkops is enabled, link can't be turned off because
9627 * turning off the link would also turn off the device, except in the
9628 * case of DeepSleep where the device is expected to remain powered.
9630 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9631 (!check_for_bkops || !hba->auto_bkops_enabled)) {
9633 * Let's make sure that link is in low power mode, we are doing
9634 * this currently by putting the link in Hibern8. Otherway to
9635 * put the link in low power mode is to send the DME end point
9636 * to device and then send the DME reset command to local
9637 * unipro. But putting the link in hibern8 is much faster.
9639 * Note also that putting the link in Hibern8 is a requirement
9640 * for entering DeepSleep.
9642 ret = ufshcd_uic_hibern8_enter(hba);
9644 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9649 * Change controller state to "reset state" which
9650 * should also put the link in off/reset state
9652 ufshcd_hba_stop(hba);
9654 * TODO: Check if we need any delay to make sure that
9655 * controller is reset
9657 ufshcd_set_link_off(hba);
9664 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9666 bool vcc_off = false;
9669 * It seems some UFS devices may keep drawing more than sleep current
9670 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9671 * To avoid this situation, add 2ms delay before putting these UFS
9672 * rails in LPM mode.
9674 if (!ufshcd_is_link_active(hba) &&
9675 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9676 usleep_range(2000, 2100);
9679 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9682 * If UFS device and link is in OFF state, all power supplies (VCC,
9683 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9684 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9685 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9687 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9688 * in low power state which would save some power.
9690 * If Write Booster is enabled and the device needs to flush the WB
9691 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9693 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9694 !hba->dev_info.is_lu_power_on_wp) {
9695 ufshcd_setup_vreg(hba, false);
9697 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9698 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9700 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9701 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9702 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9707 * Some UFS devices require delay after VCC power rail is turned-off.
9709 if (vcc_off && hba->vreg_info.vcc &&
9710 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9711 usleep_range(5000, 5100);
9715 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9719 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9720 !hba->dev_info.is_lu_power_on_wp) {
9721 ret = ufshcd_setup_vreg(hba, true);
9722 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9723 if (!ufshcd_is_link_active(hba)) {
9724 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9727 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9731 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9736 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9738 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9742 #endif /* CONFIG_PM */
9744 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9746 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9747 ufshcd_setup_hba_vreg(hba, false);
9750 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9752 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9753 ufshcd_setup_hba_vreg(hba, true);
9756 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9759 bool check_for_bkops;
9760 enum ufs_pm_level pm_lvl;
9761 enum ufs_dev_pwr_mode req_dev_pwr_mode;
9762 enum uic_link_state req_link_state;
9764 hba->pm_op_in_progress = true;
9765 if (pm_op != UFS_SHUTDOWN_PM) {
9766 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9767 hba->rpm_lvl : hba->spm_lvl;
9768 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9769 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9771 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9772 req_link_state = UIC_LINK_OFF_STATE;
9776 * If we can't transition into any of the low power modes
9777 * just gate the clocks.
9780 hba->clk_gating.is_suspended = true;
9782 if (ufshcd_is_clkscaling_supported(hba))
9783 ufshcd_clk_scaling_suspend(hba, true);
9785 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9786 req_link_state == UIC_LINK_ACTIVE_STATE) {
9790 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9791 (req_link_state == hba->uic_link_state))
9792 goto enable_scaling;
9794 /* UFS device & link must be active before we enter in this function */
9795 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9796 /* Wait err handler finish or trigger err recovery */
9797 if (!ufshcd_eh_in_progress(hba))
9798 ufshcd_force_error_recovery(hba);
9800 goto enable_scaling;
9803 if (pm_op == UFS_RUNTIME_PM) {
9804 if (ufshcd_can_autobkops_during_suspend(hba)) {
9806 * The device is idle with no requests in the queue,
9807 * allow background operations if bkops status shows
9808 * that performance might be impacted.
9810 ret = ufshcd_urgent_bkops(hba);
9813 * If return err in suspend flow, IO will hang.
9814 * Trigger error handler and break suspend for
9817 ufshcd_force_error_recovery(hba);
9819 goto enable_scaling;
9822 /* make sure that auto bkops is disabled */
9823 ufshcd_disable_auto_bkops(hba);
9826 * If device needs to do BKOP or WB buffer flush during
9827 * Hibern8, keep device power mode as "active power mode"
9830 hba->dev_info.b_rpm_dev_flush_capable =
9831 hba->auto_bkops_enabled ||
9832 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9833 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9834 ufshcd_is_auto_hibern8_enabled(hba))) &&
9835 ufshcd_wb_need_flush(hba));
9838 flush_work(&hba->eeh_work);
9840 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9842 goto enable_scaling;
9844 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9845 if (pm_op != UFS_RUNTIME_PM)
9846 /* ensure that bkops is disabled */
9847 ufshcd_disable_auto_bkops(hba);
9849 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9850 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9851 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9853 * If return err in suspend flow, IO will hang.
9854 * Trigger error handler and break suspend for
9857 ufshcd_force_error_recovery(hba);
9861 goto enable_scaling;
9866 * In the case of DeepSleep, the device is expected to remain powered
9867 * with the link off, so do not check for bkops.
9869 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9870 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9871 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9873 * If return err in suspend flow, IO will hang.
9874 * Trigger error handler and break suspend for
9877 ufshcd_force_error_recovery(hba);
9881 goto set_dev_active;
9885 * Call vendor specific suspend callback. As these callbacks may access
9886 * vendor specific host controller register space call them before the
9887 * host clocks are ON.
9889 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9891 goto set_link_active;
9893 cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
9898 * Device hardware reset is required to exit DeepSleep. Also, for
9899 * DeepSleep, the link is off so host reset and restore will be done
9902 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9903 ufshcd_device_reset(hba);
9904 WARN_ON(!ufshcd_is_link_off(hba));
9906 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9907 ufshcd_set_link_active(hba);
9908 else if (ufshcd_is_link_off(hba))
9909 ufshcd_host_reset_and_restore(hba);
9911 /* Can also get here needing to exit DeepSleep */
9912 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9913 ufshcd_device_reset(hba);
9914 ufshcd_host_reset_and_restore(hba);
9916 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9917 ufshcd_disable_auto_bkops(hba);
9919 if (ufshcd_is_clkscaling_supported(hba))
9920 ufshcd_clk_scaling_suspend(hba, false);
9922 hba->dev_info.b_rpm_dev_flush_capable = false;
9924 if (hba->dev_info.b_rpm_dev_flush_capable) {
9925 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9926 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9930 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9931 hba->clk_gating.is_suspended = false;
9932 ufshcd_release(hba);
9934 hba->pm_op_in_progress = false;
9939 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9942 enum uic_link_state old_link_state = hba->uic_link_state;
9944 hba->pm_op_in_progress = true;
9947 * Call vendor specific resume callback. As these callbacks may access
9948 * vendor specific host controller register space call them when the
9949 * host clocks are ON.
9951 ret = ufshcd_vops_resume(hba, pm_op);
9955 /* For DeepSleep, the only supported option is to have the link off */
9956 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9958 if (ufshcd_is_link_hibern8(hba)) {
9959 ret = ufshcd_uic_hibern8_exit(hba);
9961 ufshcd_set_link_active(hba);
9963 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9965 goto vendor_suspend;
9967 } else if (ufshcd_is_link_off(hba)) {
9969 * A full initialization of the host and the device is
9970 * required since the link was put to off during suspend.
9971 * Note, in the case of DeepSleep, the device will exit
9972 * DeepSleep due to device reset.
9974 ret = ufshcd_reset_and_restore(hba);
9976 * ufshcd_reset_and_restore() should have already
9977 * set the link state as active
9979 if (ret || !ufshcd_is_link_active(hba))
9980 goto vendor_suspend;
9983 if (!ufshcd_is_ufs_dev_active(hba)) {
9984 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9986 goto set_old_link_state;
9987 ufshcd_set_timestamp_attr(hba);
9988 schedule_delayed_work(&hba->ufs_rtc_update_work,
9989 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
9992 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9993 ufshcd_enable_auto_bkops(hba);
9996 * If BKOPs operations are urgently needed at this moment then
9997 * keep auto-bkops enabled or else disable it.
9999 ufshcd_urgent_bkops(hba);
10001 if (hba->ee_usr_mask)
10002 ufshcd_write_ee_control(hba);
10004 if (ufshcd_is_clkscaling_supported(hba))
10005 ufshcd_clk_scaling_suspend(hba, false);
10007 if (hba->dev_info.b_rpm_dev_flush_capable) {
10008 hba->dev_info.b_rpm_dev_flush_capable = false;
10009 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
10012 ufshcd_configure_auto_hibern8(hba);
10016 set_old_link_state:
10017 ufshcd_link_state_transition(hba, old_link_state, 0);
10019 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
10020 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
10023 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
10024 hba->clk_gating.is_suspended = false;
10025 ufshcd_release(hba);
10026 hba->pm_op_in_progress = false;
10030 static int ufshcd_wl_runtime_suspend(struct device *dev)
10032 struct scsi_device *sdev = to_scsi_device(dev);
10033 struct ufs_hba *hba;
10035 ktime_t start = ktime_get();
10037 hba = shost_priv(sdev->host);
10039 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
10041 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10043 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
10044 ktime_to_us(ktime_sub(ktime_get(), start)),
10045 hba->curr_dev_pwr_mode, hba->uic_link_state);
10050 static int ufshcd_wl_runtime_resume(struct device *dev)
10052 struct scsi_device *sdev = to_scsi_device(dev);
10053 struct ufs_hba *hba;
10055 ktime_t start = ktime_get();
10057 hba = shost_priv(sdev->host);
10059 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
10061 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10063 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
10064 ktime_to_us(ktime_sub(ktime_get(), start)),
10065 hba->curr_dev_pwr_mode, hba->uic_link_state);
10071 #ifdef CONFIG_PM_SLEEP
10072 static int ufshcd_wl_suspend(struct device *dev)
10074 struct scsi_device *sdev = to_scsi_device(dev);
10075 struct ufs_hba *hba;
10077 ktime_t start = ktime_get();
10079 hba = shost_priv(sdev->host);
10080 down(&hba->host_sem);
10081 hba->system_suspending = true;
10083 if (pm_runtime_suspended(dev))
10086 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
10088 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10089 up(&hba->host_sem);
10094 hba->is_sys_suspended = true;
10095 trace_ufshcd_wl_suspend(dev_name(dev), ret,
10096 ktime_to_us(ktime_sub(ktime_get(), start)),
10097 hba->curr_dev_pwr_mode, hba->uic_link_state);
10102 static int ufshcd_wl_resume(struct device *dev)
10104 struct scsi_device *sdev = to_scsi_device(dev);
10105 struct ufs_hba *hba;
10107 ktime_t start = ktime_get();
10109 hba = shost_priv(sdev->host);
10111 if (pm_runtime_suspended(dev))
10114 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
10116 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10118 trace_ufshcd_wl_resume(dev_name(dev), ret,
10119 ktime_to_us(ktime_sub(ktime_get(), start)),
10120 hba->curr_dev_pwr_mode, hba->uic_link_state);
10122 hba->is_sys_suspended = false;
10123 hba->system_suspending = false;
10124 up(&hba->host_sem);
10130 * ufshcd_suspend - helper function for suspend operations
10131 * @hba: per adapter instance
10133 * This function will put disable irqs, turn off clocks
10134 * and set vreg and hba-vreg in lpm mode.
10136 * Return: 0 upon success; < 0 upon failure.
10138 static int ufshcd_suspend(struct ufs_hba *hba)
10142 if (!hba->is_powered)
10145 * Disable the host irq as host controller as there won't be any
10146 * host controller transaction expected till resume.
10148 ufshcd_disable_irq(hba);
10149 ret = ufshcd_setup_clocks(hba, false);
10151 ufshcd_enable_irq(hba);
10154 if (ufshcd_is_clkgating_allowed(hba)) {
10155 hba->clk_gating.state = CLKS_OFF;
10156 trace_ufshcd_clk_gating(dev_name(hba->dev),
10157 hba->clk_gating.state);
10160 ufshcd_vreg_set_lpm(hba);
10161 /* Put the host controller in low power mode if possible */
10162 ufshcd_hba_vreg_set_lpm(hba);
10163 ufshcd_pm_qos_update(hba, false);
10169 * ufshcd_resume - helper function for resume operations
10170 * @hba: per adapter instance
10172 * This function basically turns on the regulators, clocks and
10175 * Return: 0 for success and non-zero for failure.
10177 static int ufshcd_resume(struct ufs_hba *hba)
10181 if (!hba->is_powered)
10184 ufshcd_hba_vreg_set_hpm(hba);
10185 ret = ufshcd_vreg_set_hpm(hba);
10189 /* Make sure clocks are enabled before accessing controller */
10190 ret = ufshcd_setup_clocks(hba, true);
10194 /* enable the host irq as host controller would be active soon */
10195 ufshcd_enable_irq(hba);
10200 ufshcd_vreg_set_lpm(hba);
10203 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
10206 #endif /* CONFIG_PM */
10208 #ifdef CONFIG_PM_SLEEP
10210 * ufshcd_system_suspend - system suspend callback
10211 * @dev: Device associated with the UFS controller.
10213 * Executed before putting the system into a sleep state in which the contents
10214 * of main memory are preserved.
10216 * Return: 0 for success and non-zero for failure.
10218 int ufshcd_system_suspend(struct device *dev)
10220 struct ufs_hba *hba = dev_get_drvdata(dev);
10222 ktime_t start = ktime_get();
10224 if (pm_runtime_suspended(hba->dev))
10227 ret = ufshcd_suspend(hba);
10229 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
10230 ktime_to_us(ktime_sub(ktime_get(), start)),
10231 hba->curr_dev_pwr_mode, hba->uic_link_state);
10234 EXPORT_SYMBOL(ufshcd_system_suspend);
10237 * ufshcd_system_resume - system resume callback
10238 * @dev: Device associated with the UFS controller.
10240 * Executed after waking the system up from a sleep state in which the contents
10241 * of main memory were preserved.
10243 * Return: 0 for success and non-zero for failure.
10245 int ufshcd_system_resume(struct device *dev)
10247 struct ufs_hba *hba = dev_get_drvdata(dev);
10248 ktime_t start = ktime_get();
10251 if (pm_runtime_suspended(hba->dev))
10254 ret = ufshcd_resume(hba);
10257 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
10258 ktime_to_us(ktime_sub(ktime_get(), start)),
10259 hba->curr_dev_pwr_mode, hba->uic_link_state);
10263 EXPORT_SYMBOL(ufshcd_system_resume);
10264 #endif /* CONFIG_PM_SLEEP */
10268 * ufshcd_runtime_suspend - runtime suspend callback
10269 * @dev: Device associated with the UFS controller.
10271 * Check the description of ufshcd_suspend() function for more details.
10273 * Return: 0 for success and non-zero for failure.
10275 int ufshcd_runtime_suspend(struct device *dev)
10277 struct ufs_hba *hba = dev_get_drvdata(dev);
10279 ktime_t start = ktime_get();
10281 ret = ufshcd_suspend(hba);
10283 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
10284 ktime_to_us(ktime_sub(ktime_get(), start)),
10285 hba->curr_dev_pwr_mode, hba->uic_link_state);
10288 EXPORT_SYMBOL(ufshcd_runtime_suspend);
10291 * ufshcd_runtime_resume - runtime resume routine
10292 * @dev: Device associated with the UFS controller.
10294 * This function basically brings controller
10295 * to active state. Following operations are done in this function:
10297 * 1. Turn on all the controller related clocks
10298 * 2. Turn ON VCC rail
10300 * Return: 0 upon success; < 0 upon failure.
10302 int ufshcd_runtime_resume(struct device *dev)
10304 struct ufs_hba *hba = dev_get_drvdata(dev);
10306 ktime_t start = ktime_get();
10308 ret = ufshcd_resume(hba);
10310 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
10311 ktime_to_us(ktime_sub(ktime_get(), start)),
10312 hba->curr_dev_pwr_mode, hba->uic_link_state);
10315 EXPORT_SYMBOL(ufshcd_runtime_resume);
10316 #endif /* CONFIG_PM */
10318 static void ufshcd_wl_shutdown(struct device *dev)
10320 struct scsi_device *sdev = to_scsi_device(dev);
10321 struct ufs_hba *hba = shost_priv(sdev->host);
10323 down(&hba->host_sem);
10324 hba->shutting_down = true;
10325 up(&hba->host_sem);
10327 /* Turn on everything while shutting down */
10328 ufshcd_rpm_get_sync(hba);
10329 scsi_device_quiesce(sdev);
10330 shost_for_each_device(sdev, hba->host) {
10331 if (sdev == hba->ufs_device_wlun)
10333 scsi_device_quiesce(sdev);
10335 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10338 * Next, turn off the UFS controller and the UFS regulators. Disable
10341 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
10342 ufshcd_suspend(hba);
10344 hba->is_powered = false;
10348 * ufshcd_remove - de-allocate SCSI host and host memory space
10349 * data structure memory
10350 * @hba: per adapter instance
10352 void ufshcd_remove(struct ufs_hba *hba)
10354 if (hba->ufs_device_wlun)
10355 ufshcd_rpm_get_sync(hba);
10356 ufs_hwmon_remove(hba);
10357 ufs_bsg_remove(hba);
10358 ufs_sysfs_remove_nodes(hba->dev);
10359 blk_mq_destroy_queue(hba->tmf_queue);
10360 blk_put_queue(hba->tmf_queue);
10361 blk_mq_free_tag_set(&hba->tmf_tag_set);
10362 scsi_remove_host(hba->host);
10363 /* disable interrupts */
10364 ufshcd_disable_intr(hba, hba->intr_mask);
10365 ufshcd_hba_stop(hba);
10366 ufshcd_hba_exit(hba);
10368 EXPORT_SYMBOL_GPL(ufshcd_remove);
10370 #ifdef CONFIG_PM_SLEEP
10371 int ufshcd_system_freeze(struct device *dev)
10374 return ufshcd_system_suspend(dev);
10377 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10379 int ufshcd_system_restore(struct device *dev)
10382 struct ufs_hba *hba = dev_get_drvdata(dev);
10385 ret = ufshcd_system_resume(dev);
10389 /* Configure UTRL and UTMRL base address registers */
10390 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10391 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10392 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10393 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10394 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10395 REG_UTP_TASK_REQ_LIST_BASE_L);
10396 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10397 REG_UTP_TASK_REQ_LIST_BASE_H);
10399 * Make sure that UTRL and UTMRL base address registers
10400 * are updated with the latest queue addresses. Only after
10401 * updating these addresses, we can queue the new commands.
10405 /* Resuming from hibernate, assume that link was OFF */
10406 ufshcd_set_link_off(hba);
10411 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10413 int ufshcd_system_thaw(struct device *dev)
10415 return ufshcd_system_resume(dev);
10417 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10418 #endif /* CONFIG_PM_SLEEP */
10421 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10422 * @hba: pointer to Host Bus Adapter (HBA)
10424 void ufshcd_dealloc_host(struct ufs_hba *hba)
10426 scsi_host_put(hba->host);
10428 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
10431 * ufshcd_set_dma_mask - Set dma mask based on the controller
10432 * addressing capability
10433 * @hba: per adapter instance
10435 * Return: 0 for success, non-zero for failure.
10437 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10439 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10440 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10443 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10447 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10448 * @dev: pointer to device handle
10449 * @hba_handle: driver private handle
10451 * Return: 0 on success, non-zero value on failure.
10453 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10455 struct Scsi_Host *host;
10456 struct ufs_hba *hba;
10461 "Invalid memory reference for dev is NULL\n");
10466 host = scsi_host_alloc(&ufshcd_driver_template,
10467 sizeof(struct ufs_hba));
10469 dev_err(dev, "scsi_host_alloc failed\n");
10473 host->nr_maps = HCTX_TYPE_POLL + 1;
10474 hba = shost_priv(host);
10477 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10478 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10479 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10480 INIT_LIST_HEAD(&hba->clk_list_head);
10481 spin_lock_init(&hba->outstanding_lock);
10488 EXPORT_SYMBOL(ufshcd_alloc_host);
10490 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10491 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10492 const struct blk_mq_queue_data *qd)
10494 WARN_ON_ONCE(true);
10495 return BLK_STS_NOTSUPP;
10498 static const struct blk_mq_ops ufshcd_tmf_ops = {
10499 .queue_rq = ufshcd_queue_tmf,
10503 * ufshcd_init - Driver initialization routine
10504 * @hba: per-adapter instance
10505 * @mmio_base: base register address
10506 * @irq: Interrupt line of device
10508 * Return: 0 on success, non-zero value on failure.
10510 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10513 struct Scsi_Host *host = hba->host;
10514 struct device *dev = hba->dev;
10515 char eh_wq_name[sizeof("ufs_eh_wq_00")];
10518 * dev_set_drvdata() must be called before any callbacks are registered
10519 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10522 dev_set_drvdata(dev, hba);
10526 "Invalid memory reference for mmio_base is NULL\n");
10531 hba->mmio_base = mmio_base;
10533 hba->vps = &ufs_hba_vps;
10535 err = ufshcd_hba_init(hba);
10539 /* Read capabilities registers */
10540 err = ufshcd_hba_capabilities(hba);
10544 /* Get UFS version supported by the controller */
10545 hba->ufs_version = ufshcd_get_ufs_version(hba);
10547 /* Get Interrupt bit mask per version */
10548 hba->intr_mask = ufshcd_get_intr_mask(hba);
10550 err = ufshcd_set_dma_mask(hba);
10552 dev_err(hba->dev, "set dma mask failed\n");
10556 /* Allocate memory for host memory space */
10557 err = ufshcd_memory_alloc(hba);
10559 dev_err(hba->dev, "Memory allocation failed\n");
10563 /* Configure LRB */
10564 ufshcd_host_memory_configure(hba);
10566 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10567 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10568 host->max_id = UFSHCD_MAX_ID;
10569 host->max_lun = UFS_MAX_LUNS;
10570 host->max_channel = UFSHCD_MAX_CHANNEL;
10571 host->unique_id = host->host_no;
10572 host->max_cmd_len = UFS_CDB_SIZE;
10573 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
10575 /* Use default RPM delay if host not set */
10576 if (host->rpm_autosuspend_delay == 0)
10577 host->rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS;
10579 hba->max_pwr_info.is_valid = false;
10581 /* Initialize work queues */
10582 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10583 hba->host->host_no);
10584 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10586 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10591 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10592 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10594 sema_init(&hba->host_sem, 1);
10596 /* Initialize UIC command mutex */
10597 mutex_init(&hba->uic_cmd_mutex);
10599 /* Initialize mutex for device management commands */
10600 mutex_init(&hba->dev_cmd.lock);
10602 /* Initialize mutex for exception event control */
10603 mutex_init(&hba->ee_ctrl_mutex);
10605 mutex_init(&hba->wb_mutex);
10606 init_rwsem(&hba->clk_scaling_lock);
10608 ufshcd_init_clk_gating(hba);
10610 ufshcd_init_clk_scaling(hba);
10613 * In order to avoid any spurious interrupt immediately after
10614 * registering UFS controller interrupt handler, clear any pending UFS
10615 * interrupt status and disable all the UFS interrupts.
10617 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10618 REG_INTERRUPT_STATUS);
10619 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10621 * Make sure that UFS interrupts are disabled and any pending interrupt
10622 * status is cleared before registering UFS interrupt handler.
10626 /* IRQ registration */
10627 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10629 dev_err(hba->dev, "request irq failed\n");
10632 hba->is_irq_enabled = true;
10635 if (!is_mcq_supported(hba)) {
10636 err = scsi_add_host(host, hba->dev);
10638 dev_err(hba->dev, "scsi_add_host failed\n");
10643 hba->tmf_tag_set = (struct blk_mq_tag_set) {
10645 .queue_depth = hba->nutmrs,
10646 .ops = &ufshcd_tmf_ops,
10647 .flags = BLK_MQ_F_NO_SCHED,
10649 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10651 goto out_remove_scsi_host;
10652 hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
10653 if (IS_ERR(hba->tmf_queue)) {
10654 err = PTR_ERR(hba->tmf_queue);
10655 goto free_tmf_tag_set;
10657 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10658 sizeof(*hba->tmf_rqs), GFP_KERNEL);
10659 if (!hba->tmf_rqs) {
10661 goto free_tmf_queue;
10664 /* Reset the attached device */
10665 ufshcd_device_reset(hba);
10667 ufshcd_init_crypto(hba);
10669 /* Host controller enable */
10670 err = ufshcd_hba_enable(hba);
10672 dev_err(hba->dev, "Host controller enable failed\n");
10673 ufshcd_print_evt_hist(hba);
10674 ufshcd_print_host_state(hba);
10675 goto free_tmf_queue;
10679 * Set the default power management level for runtime and system PM.
10680 * Default power saving mode is to keep UFS link in Hibern8 state
10681 * and UFS device in sleep state.
10683 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10684 UFS_SLEEP_PWR_MODE,
10685 UIC_LINK_HIBERN8_STATE);
10686 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10687 UFS_SLEEP_PWR_MODE,
10688 UIC_LINK_HIBERN8_STATE);
10690 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
10691 INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
10693 /* Set the default auto-hiberate idle timer value to 150 ms */
10694 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10695 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10696 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10699 /* Hold auto suspend until async scan completes */
10700 pm_runtime_get_sync(dev);
10701 atomic_set(&hba->scsi_block_reqs_cnt, 0);
10703 * We are assuming that device wasn't put in sleep/power-down
10704 * state exclusively during the boot stage before kernel.
10705 * This assumption helps avoid doing link startup twice during
10706 * ufshcd_probe_hba().
10708 ufshcd_set_ufs_dev_active(hba);
10710 async_schedule(ufshcd_async_scan, hba);
10711 ufs_sysfs_add_nodes(hba->dev);
10713 device_enable_async_suspend(dev);
10714 ufshcd_pm_qos_init(hba);
10718 blk_mq_destroy_queue(hba->tmf_queue);
10719 blk_put_queue(hba->tmf_queue);
10721 blk_mq_free_tag_set(&hba->tmf_tag_set);
10722 out_remove_scsi_host:
10723 scsi_remove_host(hba->host);
10725 hba->is_irq_enabled = false;
10726 ufshcd_hba_exit(hba);
10730 EXPORT_SYMBOL_GPL(ufshcd_init);
10732 void ufshcd_resume_complete(struct device *dev)
10734 struct ufs_hba *hba = dev_get_drvdata(dev);
10736 if (hba->complete_put) {
10737 ufshcd_rpm_put(hba);
10738 hba->complete_put = false;
10741 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10743 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10745 struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10746 enum ufs_dev_pwr_mode dev_pwr_mode;
10747 enum uic_link_state link_state;
10748 unsigned long flags;
10751 spin_lock_irqsave(&dev->power.lock, flags);
10752 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10753 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10754 res = pm_runtime_suspended(dev) &&
10755 hba->curr_dev_pwr_mode == dev_pwr_mode &&
10756 hba->uic_link_state == link_state &&
10757 !hba->dev_info.b_rpm_dev_flush_capable;
10758 spin_unlock_irqrestore(&dev->power.lock, flags);
10763 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10765 struct ufs_hba *hba = dev_get_drvdata(dev);
10769 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10770 * are same. And it doesn't wake up the device for system-suspend
10771 * if it's runtime suspended. But ufs doesn't follow that.
10772 * Refer ufshcd_resume_complete()
10774 if (hba->ufs_device_wlun) {
10775 /* Prevent runtime suspend */
10776 ufshcd_rpm_get_noresume(hba);
10778 * Check if already runtime suspended in same state as system
10779 * suspend would be.
10781 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10782 /* RPM state is not ok for SPM, so runtime resume */
10783 ret = ufshcd_rpm_resume(hba);
10784 if (ret < 0 && ret != -EACCES) {
10785 ufshcd_rpm_put(hba);
10789 hba->complete_put = true;
10793 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10795 int ufshcd_suspend_prepare(struct device *dev)
10797 return __ufshcd_suspend_prepare(dev, true);
10799 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10801 #ifdef CONFIG_PM_SLEEP
10802 static int ufshcd_wl_poweroff(struct device *dev)
10804 struct scsi_device *sdev = to_scsi_device(dev);
10805 struct ufs_hba *hba = shost_priv(sdev->host);
10807 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10812 static int ufshcd_wl_probe(struct device *dev)
10814 struct scsi_device *sdev = to_scsi_device(dev);
10816 if (!is_device_wlun(sdev))
10819 blk_pm_runtime_init(sdev->request_queue, dev);
10820 pm_runtime_set_autosuspend_delay(dev, 0);
10821 pm_runtime_allow(dev);
10826 static int ufshcd_wl_remove(struct device *dev)
10828 pm_runtime_forbid(dev);
10832 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10833 #ifdef CONFIG_PM_SLEEP
10834 .suspend = ufshcd_wl_suspend,
10835 .resume = ufshcd_wl_resume,
10836 .freeze = ufshcd_wl_suspend,
10837 .thaw = ufshcd_wl_resume,
10838 .poweroff = ufshcd_wl_poweroff,
10839 .restore = ufshcd_wl_resume,
10841 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10844 static void ufshcd_check_header_layout(void)
10847 * gcc compilers before version 10 cannot do constant-folding for
10848 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10851 if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
10854 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10855 .cci = 3})[0] != 3);
10857 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10858 .ehs_length = 2})[1] != 2);
10860 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10861 .enable_crypto = 1})[2]
10864 BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
10866 .data_direction = 3,
10868 })[3]) != ((5 << 4) | (3 << 1) | 1));
10870 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10871 .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
10872 cpu_to_le32(0xdeadbeef));
10874 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10875 .ocs = 4})[8] != 4);
10877 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10878 .cds = 5})[9] != 5);
10880 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10881 .dunu = cpu_to_le32(0xbadcafe)})[3] !=
10882 cpu_to_le32(0xbadcafe));
10884 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10885 .iid = 0xf })[4] != 0xf0);
10887 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10888 .command_set_type = 0xf })[4] != 0xf);
10892 * ufs_dev_wlun_template - describes ufs device wlun
10893 * ufs-device wlun - used to send pm commands
10894 * All luns are consumers of ufs-device wlun.
10896 * Currently, no sd driver is present for wluns.
10897 * Hence the no specific pm operations are performed.
10898 * With ufs design, SSU should be sent to ufs-device wlun.
10899 * Hence register a scsi driver for ufs wluns only.
10901 static struct scsi_driver ufs_dev_wlun_template = {
10903 .name = "ufs_device_wlun",
10904 .owner = THIS_MODULE,
10905 .probe = ufshcd_wl_probe,
10906 .remove = ufshcd_wl_remove,
10907 .pm = &ufshcd_wl_pm_ops,
10908 .shutdown = ufshcd_wl_shutdown,
10912 static int __init ufshcd_core_init(void)
10916 ufshcd_check_header_layout();
10918 ufs_debugfs_init();
10920 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10922 ufs_debugfs_exit();
10926 static void __exit ufshcd_core_exit(void)
10928 ufs_debugfs_exit();
10929 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10932 module_init(ufshcd_core_init);
10933 module_exit(ufshcd_core_exit);
10935 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10936 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10937 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10938 MODULE_SOFTDEP("pre: governor_simpleondemand");
10939 MODULE_LICENSE("GPL");