2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
45 #include "ufs_quirks.h"
48 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
51 /* UIC command timeout, unit: ms */
52 #define UIC_CMD_TIMEOUT 500
54 /* NOP OUT retries waiting for NOP IN response */
55 #define NOP_OUT_RETRIES 10
56 /* Timeout after 30 msecs if NOP OUT hangs without response */
57 #define NOP_OUT_TIMEOUT 30 /* msecs */
59 /* Query request retries */
60 #define QUERY_REQ_RETRIES 10
61 /* Query request timeout */
62 #define QUERY_REQ_TIMEOUT 30 /* msec */
64 * Query request timeout for fDeviceInit flag
65 * fDeviceInit query response time for some devices is too large that default
66 * QUERY_REQ_TIMEOUT may not be enough for such devices.
68 #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
70 /* Task management command timeout */
71 #define TM_CMD_TIMEOUT 100 /* msecs */
73 /* maximum number of retries for a general UIC command */
74 #define UFS_UIC_COMMAND_RETRIES 3
76 /* maximum number of link-startup retries */
77 #define DME_LINKSTARTUP_RETRIES 3
79 /* Maximum retries for Hibern8 enter */
80 #define UIC_HIBERN8_ENTER_RETRIES 3
82 /* maximum number of reset retries before giving up */
83 #define MAX_HOST_RESET_RETRIES 5
85 /* Expose the flag value from utp_upiu_query.value */
86 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
88 /* Interrupt aggregation default timeout, unit: 40us */
89 #define INT_AGGR_DEF_TO 0x02
91 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
102 UFSHCD_MAX_CHANNEL = 0,
104 UFSHCD_CMD_PER_LUN = 32,
105 UFSHCD_CAN_QUEUE = 32,
112 UFSHCD_STATE_OPERATIONAL,
113 UFSHCD_STATE_EH_SCHEDULED,
116 /* UFSHCD error handling flags */
118 UFSHCD_EH_IN_PROGRESS = (1 << 0),
121 /* UFSHCD UIC layer error flags */
123 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
124 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
125 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
126 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
127 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
128 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
131 /* Interrupt configuration options */
138 #define ufshcd_set_eh_in_progress(h) \
139 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
140 #define ufshcd_eh_in_progress(h) \
141 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
142 #define ufshcd_clear_eh_in_progress(h) \
143 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
145 #define ufshcd_set_ufs_dev_active(h) \
146 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
147 #define ufshcd_set_ufs_dev_sleep(h) \
148 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
149 #define ufshcd_set_ufs_dev_poweroff(h) \
150 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
151 #define ufshcd_is_ufs_dev_active(h) \
152 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
153 #define ufshcd_is_ufs_dev_sleep(h) \
154 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
155 #define ufshcd_is_ufs_dev_poweroff(h) \
156 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
158 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
159 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
160 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
161 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
162 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
163 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
164 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
167 static inline enum ufs_dev_pwr_mode
168 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
170 return ufs_pm_lvl_states[lvl].dev_state;
173 static inline enum uic_link_state
174 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
176 return ufs_pm_lvl_states[lvl].link_state;
179 static struct ufs_dev_fix ufs_fixups[] = {
180 /* UFS cards deviations table */
181 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
182 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
183 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
184 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
185 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
186 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
187 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
188 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
189 UFS_DEVICE_NO_FASTAUTO),
190 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
191 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
192 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
193 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
194 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
195 UFS_DEVICE_QUIRK_PA_TACTIVATE),
196 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
197 UFS_DEVICE_QUIRK_PA_TACTIVATE),
198 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
199 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
200 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
205 static void ufshcd_tmc_handler(struct ufs_hba *hba);
206 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
207 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
208 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
209 static void ufshcd_hba_exit(struct ufs_hba *hba);
210 static int ufshcd_probe_hba(struct ufs_hba *hba);
211 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
213 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
214 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
215 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
216 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
217 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
218 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
219 static irqreturn_t ufshcd_intr(int irq, void *__hba);
220 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
221 struct ufs_pa_layer_attr *desired_pwr_mode);
222 static int ufshcd_change_power_mode(struct ufs_hba *hba,
223 struct ufs_pa_layer_attr *pwr_mode);
224 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
226 return tag >= 0 && tag < hba->nutrs;
229 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
233 if (!hba->is_irq_enabled) {
234 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
237 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
239 hba->is_irq_enabled = true;
245 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
247 if (hba->is_irq_enabled) {
248 free_irq(hba->irq, hba);
249 hba->is_irq_enabled = false;
253 /* replace non-printable or non-ASCII characters with spaces */
254 static inline void ufshcd_remove_non_printable(char *val)
259 if (*val < 0x20 || *val > 0x7e)
264 * ufshcd_wait_for_register - wait for register value to change
265 * @hba - per-adapter interface
266 * @reg - mmio register offset
267 * @mask - mask to apply to read register value
268 * @val - wait condition
269 * @interval_us - polling interval in microsecs
270 * @timeout_ms - timeout in millisecs
271 * @can_sleep - perform sleep or just spin
273 * Returns -ETIMEDOUT on error, zero on success
275 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
276 u32 val, unsigned long interval_us,
277 unsigned long timeout_ms, bool can_sleep)
280 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
282 /* ignore bits that we don't intend to wait on */
285 while ((ufshcd_readl(hba, reg) & mask) != val) {
287 usleep_range(interval_us, interval_us + 50);
290 if (time_after(jiffies, timeout)) {
291 if ((ufshcd_readl(hba, reg) & mask) != val)
301 * ufshcd_get_intr_mask - Get the interrupt bit mask
302 * @hba - Pointer to adapter instance
304 * Returns interrupt bit mask per version
306 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
308 if (hba->ufs_version == UFSHCI_VERSION_10)
309 return INTERRUPT_MASK_ALL_VER_10;
311 return INTERRUPT_MASK_ALL_VER_11;
315 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
316 * @hba - Pointer to adapter instance
318 * Returns UFSHCI version supported by the controller
320 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
322 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
323 return ufshcd_vops_get_ufs_hci_version(hba);
325 return ufshcd_readl(hba, REG_UFS_VERSION);
329 * ufshcd_is_device_present - Check if any device connected to
330 * the host controller
331 * @hba: pointer to adapter instance
333 * Returns 1 if device present, 0 if no device detected
335 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
337 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
338 DEVICE_PRESENT) ? 1 : 0;
342 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
343 * @lrb: pointer to local command reference block
345 * This function is used to get the OCS field from UTRD
346 * Returns the OCS field in the UTRD
348 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
350 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
354 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
355 * @task_req_descp: pointer to utp_task_req_desc structure
357 * This function is used to get the OCS field from UTMRD
358 * Returns the OCS field in the UTMRD
361 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
363 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
367 * ufshcd_get_tm_free_slot - get a free slot for task management request
368 * @hba: per adapter instance
369 * @free_slot: pointer to variable with available slot value
371 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
372 * Returns 0 if free slot is not available, else return 1 with tag value
375 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
384 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
385 if (tag >= hba->nutmrs)
387 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
395 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
397 clear_bit_unlock(slot, &hba->tm_slots_in_use);
401 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
402 * @hba: per adapter instance
403 * @pos: position of the bit to be cleared
405 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
407 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
411 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
412 * @hba: per adapter instance
413 * @tag: position of the bit to be cleared
415 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
417 __clear_bit(tag, &hba->outstanding_reqs);
421 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
422 * @reg: Register value of host controller status
424 * Returns integer, 0 on Success and positive value if failed
426 static inline int ufshcd_get_lists_status(u32 reg)
429 * The mask 0xFF is for the following HCS register bits
437 return ((reg & 0xFF) >> 1) ^ 0x07;
441 * ufshcd_get_uic_cmd_result - Get the UIC command result
442 * @hba: Pointer to adapter instance
444 * This function gets the result of UIC command completion
445 * Returns 0 on success, non zero value on error
447 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
449 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
450 MASK_UIC_COMMAND_RESULT;
454 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
455 * @hba: Pointer to adapter instance
457 * This function gets UIC command argument3
458 * Returns 0 on success, non zero value on error
460 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
462 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
466 * ufshcd_get_req_rsp - returns the TR response transaction type
467 * @ucd_rsp_ptr: pointer to response UPIU
470 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
472 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
476 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
477 * @ucd_rsp_ptr: pointer to response UPIU
479 * This function gets the response status and scsi_status from response UPIU
480 * Returns the response result code.
483 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
485 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
489 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
491 * @ucd_rsp_ptr: pointer to response UPIU
493 * Return the data segment length.
495 static inline unsigned int
496 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
498 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
499 MASK_RSP_UPIU_DATA_SEG_LEN;
503 * ufshcd_is_exception_event - Check if the device raised an exception event
504 * @ucd_rsp_ptr: pointer to response UPIU
506 * The function checks if the device raised an exception event indicated in
507 * the Device Information field of response UPIU.
509 * Returns true if exception is raised, false otherwise.
511 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
513 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
514 MASK_RSP_EXCEPTION_EVENT ? true : false;
518 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
519 * @hba: per adapter instance
522 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
524 ufshcd_writel(hba, INT_AGGR_ENABLE |
525 INT_AGGR_COUNTER_AND_TIMER_RESET,
526 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
530 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
531 * @hba: per adapter instance
532 * @cnt: Interrupt aggregation counter threshold
533 * @tmout: Interrupt aggregation timeout value
536 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
538 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
539 INT_AGGR_COUNTER_THLD_VAL(cnt) |
540 INT_AGGR_TIMEOUT_VAL(tmout),
541 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
545 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
546 * @hba: per adapter instance
548 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
550 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
554 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
555 * When run-stop registers are set to 1, it indicates the
556 * host controller that it can process the requests
557 * @hba: per adapter instance
559 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
561 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
562 REG_UTP_TASK_REQ_LIST_RUN_STOP);
563 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
564 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
568 * ufshcd_hba_start - Start controller initialization sequence
569 * @hba: per adapter instance
571 static inline void ufshcd_hba_start(struct ufs_hba *hba)
573 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
577 * ufshcd_is_hba_active - Get controller state
578 * @hba: per adapter instance
580 * Returns zero if controller is active, 1 otherwise
582 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
584 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
587 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
589 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
590 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
591 (hba->ufs_version == UFSHCI_VERSION_11))
592 return UFS_UNIPRO_VER_1_41;
594 return UFS_UNIPRO_VER_1_6;
596 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
598 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
601 * If both host and device support UniPro ver1.6 or later, PA layer
602 * parameters tuning happens during link startup itself.
604 * We can manually tune PA layer parameters if either host or device
605 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
606 * logic simple, we will only do manual tuning if local unipro version
607 * doesn't support ver1.6 or later.
609 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
615 static void ufshcd_ungate_work(struct work_struct *work)
619 struct ufs_hba *hba = container_of(work, struct ufs_hba,
620 clk_gating.ungate_work);
622 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
624 spin_lock_irqsave(hba->host->host_lock, flags);
625 if (hba->clk_gating.state == CLKS_ON) {
626 spin_unlock_irqrestore(hba->host->host_lock, flags);
630 spin_unlock_irqrestore(hba->host->host_lock, flags);
631 ufshcd_setup_clocks(hba, true);
633 /* Exit from hibern8 */
634 if (ufshcd_can_hibern8_during_gating(hba)) {
635 /* Prevent gating in this path */
636 hba->clk_gating.is_suspended = true;
637 if (ufshcd_is_link_hibern8(hba)) {
638 ret = ufshcd_uic_hibern8_exit(hba);
640 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
643 ufshcd_set_link_active(hba);
645 hba->clk_gating.is_suspended = false;
648 if (ufshcd_is_clkscaling_enabled(hba))
649 devfreq_resume_device(hba->devfreq);
650 scsi_unblock_requests(hba->host);
654 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
655 * Also, exit from hibern8 mode and set the link as active.
656 * @hba: per adapter instance
657 * @async: This indicates whether caller should ungate clocks asynchronously.
659 int ufshcd_hold(struct ufs_hba *hba, bool async)
665 if (!ufshcd_is_clkgating_allowed(hba))
667 spin_lock_irqsave(hba->host->host_lock, flags);
668 hba->clk_gating.active_reqs++;
670 if (ufshcd_eh_in_progress(hba)) {
671 spin_unlock_irqrestore(hba->host->host_lock, flags);
676 switch (hba->clk_gating.state) {
679 * Wait for the ungate work to complete if in progress.
680 * Though the clocks may be in ON state, the link could
681 * still be in hibner8 state if hibern8 is allowed
682 * during clock gating.
683 * Make sure we exit hibern8 state also in addition to
686 if (ufshcd_can_hibern8_during_gating(hba) &&
687 ufshcd_is_link_hibern8(hba)) {
690 hba->clk_gating.active_reqs--;
693 spin_unlock_irqrestore(hba->host->host_lock, flags);
694 flush_result = flush_work(&hba->clk_gating.ungate_work);
695 if (hba->clk_gating.is_suspended && !flush_result)
697 spin_lock_irqsave(hba->host->host_lock, flags);
702 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
703 hba->clk_gating.state = CLKS_ON;
707 * If we here, it means gating work is either done or
708 * currently running. Hence, fall through to cancel gating
709 * work and to enable clocks.
712 scsi_block_requests(hba->host);
713 hba->clk_gating.state = REQ_CLKS_ON;
714 schedule_work(&hba->clk_gating.ungate_work);
716 * fall through to check if we should wait for this
717 * work to be done or not.
722 hba->clk_gating.active_reqs--;
726 spin_unlock_irqrestore(hba->host->host_lock, flags);
727 flush_work(&hba->clk_gating.ungate_work);
728 /* Make sure state is CLKS_ON before returning */
729 spin_lock_irqsave(hba->host->host_lock, flags);
732 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
733 __func__, hba->clk_gating.state);
736 spin_unlock_irqrestore(hba->host->host_lock, flags);
740 EXPORT_SYMBOL_GPL(ufshcd_hold);
742 static void ufshcd_gate_work(struct work_struct *work)
744 struct ufs_hba *hba = container_of(work, struct ufs_hba,
745 clk_gating.gate_work.work);
748 spin_lock_irqsave(hba->host->host_lock, flags);
749 if (hba->clk_gating.is_suspended) {
750 hba->clk_gating.state = CLKS_ON;
754 if (hba->clk_gating.active_reqs
755 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
756 || hba->lrb_in_use || hba->outstanding_tasks
757 || hba->active_uic_cmd || hba->uic_async_done)
760 spin_unlock_irqrestore(hba->host->host_lock, flags);
762 /* put the link into hibern8 mode before turning off clocks */
763 if (ufshcd_can_hibern8_during_gating(hba)) {
764 if (ufshcd_uic_hibern8_enter(hba)) {
765 hba->clk_gating.state = CLKS_ON;
768 ufshcd_set_link_hibern8(hba);
771 if (ufshcd_is_clkscaling_enabled(hba)) {
772 devfreq_suspend_device(hba->devfreq);
773 hba->clk_scaling.window_start_t = 0;
776 if (!ufshcd_is_link_active(hba))
777 ufshcd_setup_clocks(hba, false);
779 /* If link is active, device ref_clk can't be switched off */
780 __ufshcd_setup_clocks(hba, false, true);
783 * In case you are here to cancel this work the gating state
784 * would be marked as REQ_CLKS_ON. In this case keep the state
785 * as REQ_CLKS_ON which would anyway imply that clocks are off
786 * and a request to turn them on is pending. By doing this way,
787 * we keep the state machine in tact and this would ultimately
788 * prevent from doing cancel work multiple times when there are
789 * new requests arriving before the current cancel work is done.
791 spin_lock_irqsave(hba->host->host_lock, flags);
792 if (hba->clk_gating.state == REQ_CLKS_OFF)
793 hba->clk_gating.state = CLKS_OFF;
796 spin_unlock_irqrestore(hba->host->host_lock, flags);
801 /* host lock must be held before calling this variant */
802 static void __ufshcd_release(struct ufs_hba *hba)
804 if (!ufshcd_is_clkgating_allowed(hba))
807 hba->clk_gating.active_reqs--;
809 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
810 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
811 || hba->lrb_in_use || hba->outstanding_tasks
812 || hba->active_uic_cmd || hba->uic_async_done
813 || ufshcd_eh_in_progress(hba))
816 hba->clk_gating.state = REQ_CLKS_OFF;
817 schedule_delayed_work(&hba->clk_gating.gate_work,
818 msecs_to_jiffies(hba->clk_gating.delay_ms));
821 void ufshcd_release(struct ufs_hba *hba)
825 spin_lock_irqsave(hba->host->host_lock, flags);
826 __ufshcd_release(hba);
827 spin_unlock_irqrestore(hba->host->host_lock, flags);
829 EXPORT_SYMBOL_GPL(ufshcd_release);
831 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
832 struct device_attribute *attr, char *buf)
834 struct ufs_hba *hba = dev_get_drvdata(dev);
836 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
839 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
840 struct device_attribute *attr, const char *buf, size_t count)
842 struct ufs_hba *hba = dev_get_drvdata(dev);
843 unsigned long flags, value;
845 if (kstrtoul(buf, 0, &value))
848 spin_lock_irqsave(hba->host->host_lock, flags);
849 hba->clk_gating.delay_ms = value;
850 spin_unlock_irqrestore(hba->host->host_lock, flags);
854 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
856 if (!ufshcd_is_clkgating_allowed(hba))
859 hba->clk_gating.delay_ms = 150;
860 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
861 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
863 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
864 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
865 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
866 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
867 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
868 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
869 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
872 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
874 if (!ufshcd_is_clkgating_allowed(hba))
876 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
877 cancel_work_sync(&hba->clk_gating.ungate_work);
878 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
881 /* Must be called with host lock acquired */
882 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
884 if (!ufshcd_is_clkscaling_enabled(hba))
887 if (!hba->clk_scaling.is_busy_started) {
888 hba->clk_scaling.busy_start_t = ktime_get();
889 hba->clk_scaling.is_busy_started = true;
893 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
895 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
897 if (!ufshcd_is_clkscaling_enabled(hba))
900 if (!hba->outstanding_reqs && scaling->is_busy_started) {
901 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
902 scaling->busy_start_t));
903 scaling->busy_start_t = ktime_set(0, 0);
904 scaling->is_busy_started = false;
908 * ufshcd_send_command - Send SCSI or device management commands
909 * @hba: per adapter instance
910 * @task_tag: Task tag of the command
913 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
915 ufshcd_clk_scaling_start_busy(hba);
916 __set_bit(task_tag, &hba->outstanding_reqs);
917 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
921 * ufshcd_copy_sense_data - Copy sense data in case of check condition
922 * @lrb - pointer to local reference block
924 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
927 if (lrbp->sense_buffer &&
928 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
931 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
932 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
934 memcpy(lrbp->sense_buffer,
935 lrbp->ucd_rsp_ptr->sr.sense_data,
936 min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
941 * ufshcd_copy_query_response() - Copy the Query Response and the data
943 * @hba: per adapter instance
944 * @lrb - pointer to local reference block
947 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
949 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
951 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
953 /* Get the descriptor */
954 if (hba->dev_cmd.query.descriptor &&
955 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
956 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
957 GENERAL_UPIU_REQUEST_SIZE;
961 /* data segment length */
962 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
963 MASK_QUERY_DATA_SEG_LEN;
964 buf_len = be16_to_cpu(
965 hba->dev_cmd.query.request.upiu_req.length);
966 if (likely(buf_len >= resp_len)) {
967 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
970 "%s: Response size is bigger than buffer",
980 * ufshcd_hba_capabilities - Read controller capabilities
981 * @hba: per adapter instance
983 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
985 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
987 /* nutrs and nutmrs are 0 based values */
988 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
990 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
994 * ufshcd_ready_for_uic_cmd - Check if controller is ready
995 * to accept UIC commands
996 * @hba: per adapter instance
997 * Return true on success, else false
999 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1001 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1008 * ufshcd_get_upmcrs - Get the power mode change request status
1009 * @hba: Pointer to adapter instance
1011 * This function gets the UPMCRS field of HCS register
1012 * Returns value of UPMCRS field
1014 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1016 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1020 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1021 * @hba: per adapter instance
1022 * @uic_cmd: UIC command
1024 * Mutex must be held.
1027 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1029 WARN_ON(hba->active_uic_cmd);
1031 hba->active_uic_cmd = uic_cmd;
1034 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1035 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1036 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1039 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1044 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1045 * @hba: per adapter instance
1046 * @uic_command: UIC command
1048 * Must be called with mutex held.
1049 * Returns 0 only if success.
1052 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1055 unsigned long flags;
1057 if (wait_for_completion_timeout(&uic_cmd->done,
1058 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1059 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1063 spin_lock_irqsave(hba->host->host_lock, flags);
1064 hba->active_uic_cmd = NULL;
1065 spin_unlock_irqrestore(hba->host->host_lock, flags);
1071 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1072 * @hba: per adapter instance
1073 * @uic_cmd: UIC command
1074 * @completion: initialize the completion only if this is set to true
1076 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
1077 * with mutex held and host_lock locked.
1078 * Returns 0 only if success.
1081 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1084 if (!ufshcd_ready_for_uic_cmd(hba)) {
1086 "Controller not ready to accept UIC commands\n");
1091 init_completion(&uic_cmd->done);
1093 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
1099 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1100 * @hba: per adapter instance
1101 * @uic_cmd: UIC command
1103 * Returns 0 only if success.
1106 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1109 unsigned long flags;
1111 ufshcd_hold(hba, false);
1112 mutex_lock(&hba->uic_cmd_mutex);
1113 ufshcd_add_delay_before_dme_cmd(hba);
1115 spin_lock_irqsave(hba->host->host_lock, flags);
1116 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
1117 spin_unlock_irqrestore(hba->host->host_lock, flags);
1119 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1121 mutex_unlock(&hba->uic_cmd_mutex);
1123 ufshcd_release(hba);
1128 * ufshcd_map_sg - Map scatter-gather list to prdt
1129 * @lrbp - pointer to local reference block
1131 * Returns 0 in case of success, non-zero value in case of failure
1133 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1135 struct ufshcd_sg_entry *prd_table;
1136 struct scatterlist *sg;
1137 struct scsi_cmnd *cmd;
1142 sg_segments = scsi_dma_map(cmd);
1143 if (sg_segments < 0)
1147 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1148 lrbp->utr_descriptor_ptr->prd_table_length =
1149 cpu_to_le16((u16)(sg_segments *
1150 sizeof(struct ufshcd_sg_entry)));
1152 lrbp->utr_descriptor_ptr->prd_table_length =
1153 cpu_to_le16((u16) (sg_segments));
1155 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1157 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1159 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1160 prd_table[i].base_addr =
1161 cpu_to_le32(lower_32_bits(sg->dma_address));
1162 prd_table[i].upper_addr =
1163 cpu_to_le32(upper_32_bits(sg->dma_address));
1164 prd_table[i].reserved = 0;
1167 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1174 * ufshcd_enable_intr - enable interrupts
1175 * @hba: per adapter instance
1176 * @intrs: interrupt bits
1178 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
1180 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1182 if (hba->ufs_version == UFSHCI_VERSION_10) {
1184 rw = set & INTERRUPT_MASK_RW_VER_10;
1185 set = rw | ((set ^ intrs) & intrs);
1190 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1194 * ufshcd_disable_intr - disable interrupts
1195 * @hba: per adapter instance
1196 * @intrs: interrupt bits
1198 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1200 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1202 if (hba->ufs_version == UFSHCI_VERSION_10) {
1204 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1205 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1206 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1212 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1216 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1217 * descriptor according to request
1218 * @lrbp: pointer to local reference block
1219 * @upiu_flags: flags required in the header
1220 * @cmd_dir: requests data direction
1222 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
1223 u32 *upiu_flags, enum dma_data_direction cmd_dir)
1225 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1229 if (cmd_dir == DMA_FROM_DEVICE) {
1230 data_direction = UTP_DEVICE_TO_HOST;
1231 *upiu_flags = UPIU_CMD_FLAGS_READ;
1232 } else if (cmd_dir == DMA_TO_DEVICE) {
1233 data_direction = UTP_HOST_TO_DEVICE;
1234 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1236 data_direction = UTP_NO_DATA_TRANSFER;
1237 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1240 dword_0 = data_direction | (lrbp->command_type
1241 << UPIU_COMMAND_TYPE_OFFSET);
1243 dword_0 |= UTP_REQ_DESC_INT_CMD;
1245 /* Transfer request descriptor header fields */
1246 req_desc->header.dword_0 = cpu_to_le32(dword_0);
1247 /* dword_1 is reserved, hence it is set to 0 */
1248 req_desc->header.dword_1 = 0;
1250 * assigning invalid value for command status. Controller
1251 * updates OCS on command completion, with the command
1254 req_desc->header.dword_2 =
1255 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1256 /* dword_3 is reserved, hence it is set to 0 */
1257 req_desc->header.dword_3 = 0;
1259 req_desc->prd_table_length = 0;
1263 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1265 * @lrbp - local reference block pointer
1266 * @upiu_flags - flags
1269 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1271 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1272 unsigned short cdb_len;
1274 /* command descriptor fields */
1275 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1276 UPIU_TRANSACTION_COMMAND, upiu_flags,
1277 lrbp->lun, lrbp->task_tag);
1278 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1279 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1281 /* Total EHS length and Data segment length will be zero */
1282 ucd_req_ptr->header.dword_2 = 0;
1284 ucd_req_ptr->sc.exp_data_transfer_len =
1285 cpu_to_be32(lrbp->cmd->sdb.length);
1287 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1288 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1289 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1291 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1295 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1298 * @lrbp: local reference block pointer
1299 * @upiu_flags: flags
1301 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1302 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1304 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1305 struct ufs_query *query = &hba->dev_cmd.query;
1306 u16 len = be16_to_cpu(query->request.upiu_req.length);
1307 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1309 /* Query request header */
1310 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1311 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1312 lrbp->lun, lrbp->task_tag);
1313 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1314 0, query->request.query_func, 0, 0);
1316 /* Data segment length only need for WRITE_DESC */
1317 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1318 ucd_req_ptr->header.dword_2 =
1319 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1321 ucd_req_ptr->header.dword_2 = 0;
1323 /* Copy the Query Request buffer as is */
1324 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1327 /* Copy the Descriptor */
1328 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1329 memcpy(descp, query->descriptor, len);
1331 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1334 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1336 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1338 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1340 /* command descriptor fields */
1341 ucd_req_ptr->header.dword_0 =
1343 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
1344 /* clear rest of the fields of basic header */
1345 ucd_req_ptr->header.dword_1 = 0;
1346 ucd_req_ptr->header.dword_2 = 0;
1348 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1352 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1353 * for Device Management Purposes
1354 * @hba - per adapter instance
1355 * @lrb - pointer to local reference block
1357 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1362 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1363 (hba->ufs_version == UFSHCI_VERSION_11))
1364 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1366 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1368 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1369 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1370 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1371 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1372 ufshcd_prepare_utp_nop_upiu(lrbp);
1380 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1382 * @hba - per adapter instance
1383 * @lrb - pointer to local reference block
1385 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1390 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1391 (hba->ufs_version == UFSHCI_VERSION_11))
1392 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1394 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1396 if (likely(lrbp->cmd)) {
1397 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1398 lrbp->cmd->sc_data_direction);
1399 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1408 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1409 * @scsi_lun: scsi LUN id
1411 * Returns UPIU LUN id
1413 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1415 if (scsi_is_wlun(scsi_lun))
1416 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1419 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1423 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1424 * @scsi_lun: UPIU W-LUN id
1426 * Returns SCSI W-LUN id
1428 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1430 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1434 * ufshcd_queuecommand - main entry point for SCSI requests
1435 * @cmd: command from SCSI Midlayer
1436 * @done: call back function
1438 * Returns 0 for success, non-zero in case of failure
1440 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1442 struct ufshcd_lrb *lrbp;
1443 struct ufs_hba *hba;
1444 unsigned long flags;
1448 hba = shost_priv(host);
1450 tag = cmd->request->tag;
1451 if (!ufshcd_valid_tag(hba, tag)) {
1453 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1454 __func__, tag, cmd, cmd->request);
1458 spin_lock_irqsave(hba->host->host_lock, flags);
1459 switch (hba->ufshcd_state) {
1460 case UFSHCD_STATE_OPERATIONAL:
1462 case UFSHCD_STATE_EH_SCHEDULED:
1463 case UFSHCD_STATE_RESET:
1464 err = SCSI_MLQUEUE_HOST_BUSY;
1466 case UFSHCD_STATE_ERROR:
1467 set_host_byte(cmd, DID_ERROR);
1468 cmd->scsi_done(cmd);
1471 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1472 __func__, hba->ufshcd_state);
1473 set_host_byte(cmd, DID_BAD_TARGET);
1474 cmd->scsi_done(cmd);
1478 /* if error handling is in progress, don't issue commands */
1479 if (ufshcd_eh_in_progress(hba)) {
1480 set_host_byte(cmd, DID_ERROR);
1481 cmd->scsi_done(cmd);
1484 spin_unlock_irqrestore(hba->host->host_lock, flags);
1486 /* acquire the tag to make sure device cmds don't use it */
1487 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1489 * Dev manage command in progress, requeue the command.
1490 * Requeuing the command helps in cases where the request *may*
1491 * find different tag instead of waiting for dev manage command
1494 err = SCSI_MLQUEUE_HOST_BUSY;
1498 err = ufshcd_hold(hba, true);
1500 err = SCSI_MLQUEUE_HOST_BUSY;
1501 clear_bit_unlock(tag, &hba->lrb_in_use);
1504 WARN_ON(hba->clk_gating.state != CLKS_ON);
1506 lrbp = &hba->lrb[tag];
1510 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1511 lrbp->sense_buffer = cmd->sense_buffer;
1512 lrbp->task_tag = tag;
1513 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1514 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
1516 ufshcd_comp_scsi_upiu(hba, lrbp);
1518 err = ufshcd_map_sg(hba, lrbp);
1520 ufshcd_release(hba);
1522 clear_bit_unlock(tag, &hba->lrb_in_use);
1526 /* issue command to the controller */
1527 spin_lock_irqsave(hba->host->host_lock, flags);
1528 ufshcd_send_command(hba, tag);
1530 spin_unlock_irqrestore(hba->host->host_lock, flags);
1535 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1536 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1539 lrbp->sense_bufflen = 0;
1540 lrbp->sense_buffer = NULL;
1541 lrbp->task_tag = tag;
1542 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
1543 lrbp->intr_cmd = true; /* No interrupt aggregation */
1544 hba->dev_cmd.type = cmd_type;
1546 return ufshcd_comp_devman_upiu(hba, lrbp);
1550 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1553 unsigned long flags;
1554 u32 mask = 1 << tag;
1556 /* clear outstanding transaction before retry */
1557 spin_lock_irqsave(hba->host->host_lock, flags);
1558 ufshcd_utrl_clear(hba, tag);
1559 spin_unlock_irqrestore(hba->host->host_lock, flags);
1562 * wait for for h/w to clear corresponding bit in door-bell.
1563 * max. wait is 1 sec.
1565 err = ufshcd_wait_for_register(hba,
1566 REG_UTP_TRANSFER_REQ_DOOR_BELL,
1567 mask, ~mask, 1000, 1000, true);
1573 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1575 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1577 /* Get the UPIU response */
1578 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1579 UPIU_RSP_CODE_OFFSET;
1580 return query_res->response;
1584 * ufshcd_dev_cmd_completion() - handles device management command responses
1585 * @hba: per adapter instance
1586 * @lrbp: pointer to local reference block
1589 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1594 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1597 case UPIU_TRANSACTION_NOP_IN:
1598 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1600 dev_err(hba->dev, "%s: unexpected response %x\n",
1604 case UPIU_TRANSACTION_QUERY_RSP:
1605 err = ufshcd_check_query_response(hba, lrbp);
1607 err = ufshcd_copy_query_response(hba, lrbp);
1609 case UPIU_TRANSACTION_REJECT_UPIU:
1610 /* TODO: handle Reject UPIU Response */
1612 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1617 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1625 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1626 struct ufshcd_lrb *lrbp, int max_timeout)
1629 unsigned long time_left;
1630 unsigned long flags;
1632 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1633 msecs_to_jiffies(max_timeout));
1635 spin_lock_irqsave(hba->host->host_lock, flags);
1636 hba->dev_cmd.complete = NULL;
1637 if (likely(time_left)) {
1638 err = ufshcd_get_tr_ocs(lrbp);
1640 err = ufshcd_dev_cmd_completion(hba, lrbp);
1642 spin_unlock_irqrestore(hba->host->host_lock, flags);
1646 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1647 __func__, lrbp->task_tag);
1648 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1649 /* successfully cleared the command, retry if needed */
1652 * in case of an error, after clearing the doorbell,
1653 * we also need to clear the outstanding_request
1656 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
1663 * ufshcd_get_dev_cmd_tag - Get device management command tag
1664 * @hba: per-adapter instance
1665 * @tag: pointer to variable with available slot value
1667 * Get a free slot and lock it until device management command
1670 * Returns false if free slot is unavailable for locking, else
1671 * return true with tag value in @tag.
1673 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1683 tmp = ~hba->lrb_in_use;
1684 tag = find_last_bit(&tmp, hba->nutrs);
1685 if (tag >= hba->nutrs)
1687 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1695 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1697 clear_bit_unlock(tag, &hba->lrb_in_use);
1701 * ufshcd_exec_dev_cmd - API for sending device management requests
1703 * @cmd_type - specifies the type (NOP, Query...)
1704 * @timeout - time in seconds
1706 * NOTE: Since there is only one available tag for device management commands,
1707 * it is expected you hold the hba->dev_cmd.lock mutex.
1709 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1710 enum dev_cmd_type cmd_type, int timeout)
1712 struct ufshcd_lrb *lrbp;
1715 struct completion wait;
1716 unsigned long flags;
1719 * Get free slot, sleep if slots are unavailable.
1720 * Even though we use wait_event() which sleeps indefinitely,
1721 * the maximum wait time is bounded by SCSI request timeout.
1723 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1725 init_completion(&wait);
1726 lrbp = &hba->lrb[tag];
1728 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1732 hba->dev_cmd.complete = &wait;
1734 /* Make sure descriptors are ready before ringing the doorbell */
1736 spin_lock_irqsave(hba->host->host_lock, flags);
1737 ufshcd_send_command(hba, tag);
1738 spin_unlock_irqrestore(hba->host->host_lock, flags);
1740 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1743 ufshcd_put_dev_cmd_tag(hba, tag);
1744 wake_up(&hba->dev_cmd.tag_wq);
1749 * ufshcd_init_query() - init the query response and request parameters
1750 * @hba: per-adapter instance
1751 * @request: address of the request pointer to be initialized
1752 * @response: address of the response pointer to be initialized
1753 * @opcode: operation to perform
1754 * @idn: flag idn to access
1755 * @index: LU number to access
1756 * @selector: query/flag/descriptor further identification
1758 static inline void ufshcd_init_query(struct ufs_hba *hba,
1759 struct ufs_query_req **request, struct ufs_query_res **response,
1760 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1762 *request = &hba->dev_cmd.query.request;
1763 *response = &hba->dev_cmd.query.response;
1764 memset(*request, 0, sizeof(struct ufs_query_req));
1765 memset(*response, 0, sizeof(struct ufs_query_res));
1766 (*request)->upiu_req.opcode = opcode;
1767 (*request)->upiu_req.idn = idn;
1768 (*request)->upiu_req.index = index;
1769 (*request)->upiu_req.selector = selector;
1772 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1773 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
1778 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1779 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1782 "%s: failed with error %d, retries %d\n",
1783 __func__, ret, retries);
1790 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1791 __func__, opcode, idn, ret, retries);
1796 * ufshcd_query_flag() - API function for sending flag query requests
1797 * hba: per-adapter instance
1798 * query_opcode: flag query to perform
1799 * idn: flag idn to access
1800 * flag_res: the flag value after the query request completes
1802 * Returns 0 for success, non-zero in case of failure
1804 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1805 enum flag_idn idn, bool *flag_res)
1807 struct ufs_query_req *request = NULL;
1808 struct ufs_query_res *response = NULL;
1809 int err, index = 0, selector = 0;
1810 int timeout = QUERY_REQ_TIMEOUT;
1814 ufshcd_hold(hba, false);
1815 mutex_lock(&hba->dev_cmd.lock);
1816 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1820 case UPIU_QUERY_OPCODE_SET_FLAG:
1821 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1822 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1823 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1825 case UPIU_QUERY_OPCODE_READ_FLAG:
1826 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1828 /* No dummy reads */
1829 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1837 "%s: Expected query flag opcode but got = %d\n",
1843 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
1844 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
1846 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1850 "%s: Sending flag query for idn %d failed, err = %d\n",
1851 __func__, idn, err);
1856 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1857 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1860 mutex_unlock(&hba->dev_cmd.lock);
1861 ufshcd_release(hba);
1866 * ufshcd_query_attr - API function for sending attribute requests
1867 * hba: per-adapter instance
1868 * opcode: attribute opcode
1869 * idn: attribute idn to access
1870 * index: index field
1871 * selector: selector field
1872 * attr_val: the attribute value after the query request completes
1874 * Returns 0 for success, non-zero in case of failure
1876 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1877 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1879 struct ufs_query_req *request = NULL;
1880 struct ufs_query_res *response = NULL;
1885 ufshcd_hold(hba, false);
1887 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1893 mutex_lock(&hba->dev_cmd.lock);
1894 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1898 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1899 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1900 request->upiu_req.value = cpu_to_be32(*attr_val);
1902 case UPIU_QUERY_OPCODE_READ_ATTR:
1903 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1906 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1912 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1915 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1916 __func__, opcode, idn, err);
1920 *attr_val = be32_to_cpu(response->upiu_res.value);
1923 mutex_unlock(&hba->dev_cmd.lock);
1925 ufshcd_release(hba);
1930 * ufshcd_query_attr_retry() - API function for sending query
1931 * attribute with retries
1932 * @hba: per-adapter instance
1933 * @opcode: attribute opcode
1934 * @idn: attribute idn to access
1935 * @index: index field
1936 * @selector: selector field
1937 * @attr_val: the attribute value after the query request
1940 * Returns 0 for success, non-zero in case of failure
1942 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
1943 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
1949 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1950 ret = ufshcd_query_attr(hba, opcode, idn, index,
1951 selector, attr_val);
1953 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
1954 __func__, ret, retries);
1961 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
1962 __func__, idn, ret, QUERY_REQ_RETRIES);
1966 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1967 enum query_opcode opcode, enum desc_idn idn, u8 index,
1968 u8 selector, u8 *desc_buf, int *buf_len)
1970 struct ufs_query_req *request = NULL;
1971 struct ufs_query_res *response = NULL;
1976 ufshcd_hold(hba, false);
1978 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1984 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1985 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1986 __func__, *buf_len);
1991 mutex_lock(&hba->dev_cmd.lock);
1992 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1994 hba->dev_cmd.query.descriptor = desc_buf;
1995 request->upiu_req.length = cpu_to_be16(*buf_len);
1998 case UPIU_QUERY_OPCODE_WRITE_DESC:
1999 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2001 case UPIU_QUERY_OPCODE_READ_DESC:
2002 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2006 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2012 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2015 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
2016 __func__, opcode, idn, err);
2020 *buf_len = be16_to_cpu(response->upiu_res.length);
2023 hba->dev_cmd.query.descriptor = NULL;
2024 mutex_unlock(&hba->dev_cmd.lock);
2026 ufshcd_release(hba);
2031 * ufshcd_query_descriptor_retry - API function for sending descriptor
2033 * hba: per-adapter instance
2034 * opcode: attribute opcode
2035 * idn: attribute idn to access
2036 * index: index field
2037 * selector: selector field
2038 * desc_buf: the buffer that contains the descriptor
2039 * buf_len: length parameter passed to the device
2041 * Returns 0 for success, non-zero in case of failure.
2042 * The buf_len parameter will contain, on return, the length parameter
2043 * received on the response.
2045 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2046 enum query_opcode opcode, enum desc_idn idn, u8 index,
2047 u8 selector, u8 *desc_buf, int *buf_len)
2052 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2053 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2054 selector, desc_buf, buf_len);
2055 if (!err || err == -EINVAL)
2061 EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2064 * ufshcd_read_desc_length - read the specified descriptor length from header
2065 * @hba: Pointer to adapter instance
2066 * @desc_id: descriptor idn value
2067 * @desc_index: descriptor index
2068 * @desc_length: pointer to variable to read the length of descriptor
2070 * Return 0 in case of success, non-zero otherwise
2072 static int ufshcd_read_desc_length(struct ufs_hba *hba,
2073 enum desc_idn desc_id,
2078 u8 header[QUERY_DESC_HDR_SIZE];
2079 int header_len = QUERY_DESC_HDR_SIZE;
2081 if (desc_id >= QUERY_DESC_IDN_MAX)
2084 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2085 desc_id, desc_index, 0, header,
2089 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2092 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2093 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2094 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2099 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2105 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2106 * @hba: Pointer to adapter instance
2107 * @desc_id: descriptor idn value
2108 * @desc_len: mapped desc length (out)
2110 * Return 0 in case of success, non-zero otherwise
2112 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2113 enum desc_idn desc_id, int *desc_len)
2116 case QUERY_DESC_IDN_DEVICE:
2117 *desc_len = hba->desc_size.dev_desc;
2119 case QUERY_DESC_IDN_POWER:
2120 *desc_len = hba->desc_size.pwr_desc;
2122 case QUERY_DESC_IDN_GEOMETRY:
2123 *desc_len = hba->desc_size.geom_desc;
2125 case QUERY_DESC_IDN_CONFIGURATION:
2126 *desc_len = hba->desc_size.conf_desc;
2128 case QUERY_DESC_IDN_UNIT:
2129 *desc_len = hba->desc_size.unit_desc;
2131 case QUERY_DESC_IDN_INTERCONNECT:
2132 *desc_len = hba->desc_size.interc_desc;
2134 case QUERY_DESC_IDN_STRING:
2135 *desc_len = QUERY_DESC_MAX_SIZE;
2137 case QUERY_DESC_IDN_RFU_0:
2138 case QUERY_DESC_IDN_RFU_1:
2147 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
2150 * ufshcd_read_desc_param - read the specified descriptor parameter
2151 * @hba: Pointer to adapter instance
2152 * @desc_id: descriptor idn value
2153 * @desc_index: descriptor index
2154 * @param_offset: offset of the parameter to read
2155 * @param_read_buf: pointer to buffer where parameter would be read
2156 * @param_size: sizeof(param_read_buf)
2158 * Return 0 in case of success, non-zero otherwise
2160 static int ufshcd_read_desc_param(struct ufs_hba *hba,
2161 enum desc_idn desc_id,
2170 bool is_kmalloc = true;
2173 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
2176 /* Get the max length of descriptor from structure filled up at probe
2179 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
2182 if (ret || !buff_len) {
2183 dev_err(hba->dev, "%s: Failed to get full descriptor length",
2188 /* Check whether we need temp memory */
2189 if (param_offset != 0 || param_size < buff_len) {
2190 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2194 desc_buf = param_read_buf;
2198 /* Request for full descriptor */
2199 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2200 desc_id, desc_index, 0,
2201 desc_buf, &buff_len);
2204 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2205 __func__, desc_id, desc_index, param_offset, ret);
2210 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
2211 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
2212 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
2217 /* Check wherher we will not copy more data, than available */
2218 if (is_kmalloc && param_size > buff_len)
2219 param_size = buff_len;
2222 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2229 static inline int ufshcd_read_desc(struct ufs_hba *hba,
2230 enum desc_idn desc_id,
2235 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2238 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2242 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2245 int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2247 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2249 EXPORT_SYMBOL(ufshcd_read_device_desc);
2252 * ufshcd_read_string_desc - read string descriptor
2253 * @hba: pointer to adapter instance
2254 * @desc_index: descriptor index
2255 * @buf: pointer to buffer where descriptor would be read
2256 * @size: size of buf
2257 * @ascii: if true convert from unicode to ascii characters
2259 * Return 0 in case of success, non-zero otherwise
2261 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2262 u32 size, bool ascii)
2266 err = ufshcd_read_desc(hba,
2267 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2270 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2271 __func__, QUERY_REQ_RETRIES, err);
2282 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2283 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2284 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2285 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2291 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2298 * the descriptor contains string in UTF16 format
2299 * we need to convert to utf-8 so it can be displayed
2301 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2302 desc_len - QUERY_DESC_HDR_SIZE,
2303 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2305 /* replace non-printable or non-ASCII characters with spaces */
2306 for (i = 0; i < ascii_len; i++)
2307 ufshcd_remove_non_printable(&buff_ascii[i]);
2309 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2310 size - QUERY_DESC_HDR_SIZE);
2311 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2312 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
2318 EXPORT_SYMBOL(ufshcd_read_string_desc);
2321 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2322 * @hba: Pointer to adapter instance
2324 * @param_offset: offset of the parameter to read
2325 * @param_read_buf: pointer to buffer where parameter would be read
2326 * @param_size: sizeof(param_read_buf)
2328 * Return 0 in case of success, non-zero otherwise
2330 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2332 enum unit_desc_param param_offset,
2337 * Unit descriptors are only available for general purpose LUs (LUN id
2338 * from 0 to 7) and RPMB Well known LU.
2340 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
2343 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2344 param_offset, param_read_buf, param_size);
2348 * ufshcd_memory_alloc - allocate memory for host memory space data structures
2349 * @hba: per adapter instance
2351 * 1. Allocate DMA memory for Command Descriptor array
2352 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2353 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2354 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2356 * 4. Allocate memory for local reference block(lrb).
2358 * Returns 0 for success, non-zero in case of failure
2360 static int ufshcd_memory_alloc(struct ufs_hba *hba)
2362 size_t utmrdl_size, utrdl_size, ucdl_size;
2364 /* Allocate memory for UTP command descriptors */
2365 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2366 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2368 &hba->ucdl_dma_addr,
2372 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2373 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2374 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2375 * be aligned to 128 bytes as well
2377 if (!hba->ucdl_base_addr ||
2378 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
2380 "Command Descriptor Memory allocation failed\n");
2385 * Allocate memory for UTP Transfer descriptors
2386 * UFSHCI requires 1024 byte alignment of UTRD
2388 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2389 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2391 &hba->utrdl_dma_addr,
2393 if (!hba->utrdl_base_addr ||
2394 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
2396 "Transfer Descriptor Memory allocation failed\n");
2401 * Allocate memory for UTP Task Management descriptors
2402 * UFSHCI requires 1024 byte alignment of UTMRD
2404 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2405 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2407 &hba->utmrdl_dma_addr,
2409 if (!hba->utmrdl_base_addr ||
2410 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
2412 "Task Management Descriptor Memory allocation failed\n");
2416 /* Allocate memory for local reference block */
2417 hba->lrb = devm_kzalloc(hba->dev,
2418 hba->nutrs * sizeof(struct ufshcd_lrb),
2421 dev_err(hba->dev, "LRB Memory allocation failed\n");
2430 * ufshcd_host_memory_configure - configure local reference block with
2432 * @hba: per adapter instance
2434 * Configure Host memory space
2435 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2437 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2439 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2440 * into local reference block.
2442 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2444 struct utp_transfer_cmd_desc *cmd_descp;
2445 struct utp_transfer_req_desc *utrdlp;
2446 dma_addr_t cmd_desc_dma_addr;
2447 dma_addr_t cmd_desc_element_addr;
2448 u16 response_offset;
2453 utrdlp = hba->utrdl_base_addr;
2454 cmd_descp = hba->ucdl_base_addr;
2457 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2459 offsetof(struct utp_transfer_cmd_desc, prd_table);
2461 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2462 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2464 for (i = 0; i < hba->nutrs; i++) {
2465 /* Configure UTRD with command descriptor base address */
2466 cmd_desc_element_addr =
2467 (cmd_desc_dma_addr + (cmd_desc_size * i));
2468 utrdlp[i].command_desc_base_addr_lo =
2469 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2470 utrdlp[i].command_desc_base_addr_hi =
2471 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2473 /* Response upiu and prdt offset should be in double words */
2474 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2475 utrdlp[i].response_upiu_offset =
2476 cpu_to_le16(response_offset);
2477 utrdlp[i].prd_table_offset =
2478 cpu_to_le16(prdt_offset);
2479 utrdlp[i].response_upiu_length =
2480 cpu_to_le16(ALIGNED_UPIU_SIZE);
2482 utrdlp[i].response_upiu_offset =
2483 cpu_to_le16((response_offset >> 2));
2484 utrdlp[i].prd_table_offset =
2485 cpu_to_le16((prdt_offset >> 2));
2486 utrdlp[i].response_upiu_length =
2487 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
2490 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2491 hba->lrb[i].ucd_req_ptr =
2492 (struct utp_upiu_req *)(cmd_descp + i);
2493 hba->lrb[i].ucd_rsp_ptr =
2494 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2495 hba->lrb[i].ucd_prdt_ptr =
2496 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2501 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2502 * @hba: per adapter instance
2504 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2505 * in order to initialize the Unipro link startup procedure.
2506 * Once the Unipro links are up, the device connected to the controller
2509 * Returns 0 on success, non-zero value on failure
2511 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2513 struct uic_command uic_cmd = {0};
2516 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2518 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2521 "dme-link-startup: error code %d\n", ret);
2525 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2527 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2528 unsigned long min_sleep_time_us;
2530 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2534 * last_dme_cmd_tstamp will be 0 only for 1st call to
2537 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2538 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2540 unsigned long delta =
2541 (unsigned long) ktime_to_us(
2542 ktime_sub(ktime_get(),
2543 hba->last_dme_cmd_tstamp));
2545 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2547 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2549 return; /* no more delay required */
2552 /* allow sleep for extra 50us if needed */
2553 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2557 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2558 * @hba: per adapter instance
2559 * @attr_sel: uic command argument1
2560 * @attr_set: attribute set type as uic command argument2
2561 * @mib_val: setting value as uic command argument3
2562 * @peer: indicate whether peer or local
2564 * Returns 0 on success, non-zero value on failure
2566 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2567 u8 attr_set, u32 mib_val, u8 peer)
2569 struct uic_command uic_cmd = {0};
2570 static const char *const action[] = {
2574 const char *set = action[!!peer];
2576 int retries = UFS_UIC_COMMAND_RETRIES;
2578 uic_cmd.command = peer ?
2579 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2580 uic_cmd.argument1 = attr_sel;
2581 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2582 uic_cmd.argument3 = mib_val;
2585 /* for peer attributes we retry upon failure */
2586 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2588 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2589 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2590 } while (ret && peer && --retries);
2593 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2594 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2599 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2602 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2603 * @hba: per adapter instance
2604 * @attr_sel: uic command argument1
2605 * @mib_val: the value of the attribute as returned by the UIC command
2606 * @peer: indicate whether peer or local
2608 * Returns 0 on success, non-zero value on failure
2610 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2611 u32 *mib_val, u8 peer)
2613 struct uic_command uic_cmd = {0};
2614 static const char *const action[] = {
2618 const char *get = action[!!peer];
2620 int retries = UFS_UIC_COMMAND_RETRIES;
2621 struct ufs_pa_layer_attr orig_pwr_info;
2622 struct ufs_pa_layer_attr temp_pwr_info;
2623 bool pwr_mode_change = false;
2625 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2626 orig_pwr_info = hba->pwr_info;
2627 temp_pwr_info = orig_pwr_info;
2629 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2630 orig_pwr_info.pwr_rx == FAST_MODE) {
2631 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2632 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2633 pwr_mode_change = true;
2634 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2635 orig_pwr_info.pwr_rx == SLOW_MODE) {
2636 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2637 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2638 pwr_mode_change = true;
2640 if (pwr_mode_change) {
2641 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2647 uic_cmd.command = peer ?
2648 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2649 uic_cmd.argument1 = attr_sel;
2652 /* for peer attributes we retry upon failure */
2653 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2655 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2656 get, UIC_GET_ATTR_ID(attr_sel), ret);
2657 } while (ret && peer && --retries);
2660 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2661 get, UIC_GET_ATTR_ID(attr_sel), retries);
2663 if (mib_val && !ret)
2664 *mib_val = uic_cmd.argument3;
2666 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2668 ufshcd_change_power_mode(hba, &orig_pwr_info);
2672 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2675 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2676 * state) and waits for it to take effect.
2678 * @hba: per adapter instance
2679 * @cmd: UIC command to execute
2681 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2682 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2683 * and device UniPro link and hence it's final completion would be indicated by
2684 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2685 * addition to normal UIC command completion Status (UCCS). This function only
2686 * returns after the relevant status bits indicate the completion.
2688 * Returns 0 on success, non-zero value on failure
2690 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2692 struct completion uic_async_done;
2693 unsigned long flags;
2696 bool reenable_intr = false;
2698 mutex_lock(&hba->uic_cmd_mutex);
2699 init_completion(&uic_async_done);
2700 ufshcd_add_delay_before_dme_cmd(hba);
2702 spin_lock_irqsave(hba->host->host_lock, flags);
2703 hba->uic_async_done = &uic_async_done;
2704 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2705 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2707 * Make sure UIC command completion interrupt is disabled before
2708 * issuing UIC command.
2711 reenable_intr = true;
2713 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2714 spin_unlock_irqrestore(hba->host->host_lock, flags);
2717 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2718 cmd->command, cmd->argument3, ret);
2722 if (!wait_for_completion_timeout(hba->uic_async_done,
2723 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2725 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2726 cmd->command, cmd->argument3);
2731 status = ufshcd_get_upmcrs(hba);
2732 if (status != PWR_LOCAL) {
2734 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
2735 cmd->command, status);
2736 ret = (status != PWR_OK) ? status : -1;
2739 spin_lock_irqsave(hba->host->host_lock, flags);
2740 hba->active_uic_cmd = NULL;
2741 hba->uic_async_done = NULL;
2743 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
2744 spin_unlock_irqrestore(hba->host->host_lock, flags);
2745 mutex_unlock(&hba->uic_cmd_mutex);
2751 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2752 * using DME_SET primitives.
2753 * @hba: per adapter instance
2754 * @mode: powr mode value
2756 * Returns 0 on success, non-zero value on failure
2758 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2760 struct uic_command uic_cmd = {0};
2763 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2764 ret = ufshcd_dme_set(hba,
2765 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2767 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2773 uic_cmd.command = UIC_CMD_DME_SET;
2774 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2775 uic_cmd.argument3 = mode;
2776 ufshcd_hold(hba, false);
2777 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2778 ufshcd_release(hba);
2784 static int ufshcd_link_recovery(struct ufs_hba *hba)
2787 unsigned long flags;
2789 spin_lock_irqsave(hba->host->host_lock, flags);
2790 hba->ufshcd_state = UFSHCD_STATE_RESET;
2791 ufshcd_set_eh_in_progress(hba);
2792 spin_unlock_irqrestore(hba->host->host_lock, flags);
2794 ret = ufshcd_host_reset_and_restore(hba);
2796 spin_lock_irqsave(hba->host->host_lock, flags);
2798 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2799 ufshcd_clear_eh_in_progress(hba);
2800 spin_unlock_irqrestore(hba->host->host_lock, flags);
2803 dev_err(hba->dev, "%s: link recovery failed, err %d",
2809 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2812 struct uic_command uic_cmd = {0};
2814 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2815 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2818 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
2822 * If link recovery fails then return error so that caller
2823 * don't retry the hibern8 enter again.
2825 if (ufshcd_link_recovery(hba))
2832 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2834 int ret = 0, retries;
2836 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
2837 ret = __ufshcd_uic_hibern8_enter(hba);
2838 if (!ret || ret == -ENOLINK)
2845 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2847 struct uic_command uic_cmd = {0};
2850 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2851 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2853 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2855 ret = ufshcd_link_recovery(hba);
2862 * ufshcd_init_pwr_info - setting the POR (power on reset)
2863 * values in hba power info
2864 * @hba: per-adapter instance
2866 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2868 hba->pwr_info.gear_rx = UFS_PWM_G1;
2869 hba->pwr_info.gear_tx = UFS_PWM_G1;
2870 hba->pwr_info.lane_rx = 1;
2871 hba->pwr_info.lane_tx = 1;
2872 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2873 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2874 hba->pwr_info.hs_rate = 0;
2878 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2879 * @hba: per-adapter instance
2881 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2883 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2885 if (hba->max_pwr_info.is_valid)
2888 pwr_info->pwr_tx = FASTAUTO_MODE;
2889 pwr_info->pwr_rx = FASTAUTO_MODE;
2890 pwr_info->hs_rate = PA_HS_MODE_B;
2892 /* Get the connected lane count */
2893 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2894 &pwr_info->lane_rx);
2895 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2896 &pwr_info->lane_tx);
2898 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2899 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2907 * First, get the maximum gears of HS speed.
2908 * If a zero value, it means there is no HSGEAR capability.
2909 * Then, get the maximum gears of PWM speed.
2911 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2912 if (!pwr_info->gear_rx) {
2913 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2914 &pwr_info->gear_rx);
2915 if (!pwr_info->gear_rx) {
2916 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2917 __func__, pwr_info->gear_rx);
2920 pwr_info->pwr_rx = SLOWAUTO_MODE;
2923 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2924 &pwr_info->gear_tx);
2925 if (!pwr_info->gear_tx) {
2926 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2927 &pwr_info->gear_tx);
2928 if (!pwr_info->gear_tx) {
2929 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2930 __func__, pwr_info->gear_tx);
2933 pwr_info->pwr_tx = SLOWAUTO_MODE;
2936 hba->max_pwr_info.is_valid = true;
2940 static int ufshcd_change_power_mode(struct ufs_hba *hba,
2941 struct ufs_pa_layer_attr *pwr_mode)
2945 /* if already configured to the requested pwr_mode */
2946 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2947 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2948 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2949 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2950 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2951 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2952 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2953 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2958 * Configure attributes for power mode change with below.
2959 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2960 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2963 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2964 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2966 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2967 pwr_mode->pwr_rx == FAST_MODE)
2968 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2970 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
2972 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2973 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2975 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2976 pwr_mode->pwr_tx == FAST_MODE)
2977 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2979 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
2981 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2982 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2983 pwr_mode->pwr_rx == FAST_MODE ||
2984 pwr_mode->pwr_tx == FAST_MODE)
2985 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2988 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2989 | pwr_mode->pwr_tx);
2993 "%s: power mode change failed %d\n", __func__, ret);
2995 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
2998 memcpy(&hba->pwr_info, pwr_mode,
2999 sizeof(struct ufs_pa_layer_attr));
3006 * ufshcd_config_pwr_mode - configure a new power mode
3007 * @hba: per-adapter instance
3008 * @desired_pwr_mode: desired power configuration
3010 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3011 struct ufs_pa_layer_attr *desired_pwr_mode)
3013 struct ufs_pa_layer_attr final_params = { 0 };
3016 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3017 desired_pwr_mode, &final_params);
3020 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3022 ret = ufshcd_change_power_mode(hba, &final_params);
3028 * ufshcd_complete_dev_init() - checks device readiness
3029 * hba: per-adapter instance
3031 * Set fDeviceInit flag and poll until device toggles it.
3033 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3039 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3040 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
3043 "%s setting fDeviceInit flag failed with error %d\n",
3048 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3049 for (i = 0; i < 1000 && !err && flag_res; i++)
3050 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3051 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3055 "%s reading fDeviceInit flag failed with error %d\n",
3059 "%s fDeviceInit was not cleared by the device\n",
3067 * ufshcd_make_hba_operational - Make UFS controller operational
3068 * @hba: per adapter instance
3070 * To bring UFS host controller to operational state,
3071 * 1. Enable required interrupts
3072 * 2. Configure interrupt aggregation
3073 * 3. Program UTRL and UTMRL base address
3074 * 4. Configure run-stop-registers
3076 * Returns 0 on success, non-zero value on failure
3078 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3083 /* Enable required interrupts */
3084 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3086 /* Configure interrupt aggregation */
3087 if (ufshcd_is_intr_aggr_allowed(hba))
3088 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
3090 ufshcd_disable_intr_aggr(hba);
3092 /* Configure UTRL and UTMRL base address registers */
3093 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
3094 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
3095 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
3096 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
3097 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
3098 REG_UTP_TASK_REQ_LIST_BASE_L);
3099 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
3100 REG_UTP_TASK_REQ_LIST_BASE_H);
3103 * Make sure base address and interrupt setup are updated before
3104 * enabling the run/stop registers below.
3109 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
3111 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
3112 if (!(ufshcd_get_lists_status(reg))) {
3113 ufshcd_enable_run_stop_reg(hba);
3116 "Host controller not ready to process requests");
3126 * ufshcd_hba_stop - Send controller to reset state
3127 * @hba: per adapter instance
3128 * @can_sleep: perform sleep or just spin
3130 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
3134 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
3135 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
3136 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
3139 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
3143 * ufshcd_hba_enable - initialize the controller
3144 * @hba: per adapter instance
3146 * The controller resets itself and controller firmware initialization
3147 * sequence kicks off. When controller is ready it will set
3148 * the Host Controller Enable bit to 1.
3150 * Returns 0 on success, non-zero value on failure
3152 static int ufshcd_hba_enable(struct ufs_hba *hba)
3157 * msleep of 1 and 5 used in this function might result in msleep(20),
3158 * but it was necessary to send the UFS FPGA to reset mode during
3159 * development and testing of this driver. msleep can be changed to
3160 * mdelay and retry count can be reduced based on the controller.
3162 if (!ufshcd_is_hba_active(hba))
3163 /* change controller state to "reset state" */
3164 ufshcd_hba_stop(hba, true);
3166 /* UniPro link is disabled at this point */
3167 ufshcd_set_link_off(hba);
3169 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
3171 /* start controller initialization sequence */
3172 ufshcd_hba_start(hba);
3175 * To initialize a UFS host controller HCE bit must be set to 1.
3176 * During initialization the HCE bit value changes from 1->0->1.
3177 * When the host controller completes initialization sequence
3178 * it sets the value of HCE bit to 1. The same HCE bit is read back
3179 * to check if the controller has completed initialization sequence.
3180 * So without this delay the value HCE = 1, set in the previous
3181 * instruction might be read back.
3182 * This delay can be changed based on the controller.
3186 /* wait for the host controller to complete initialization */
3188 while (ufshcd_is_hba_active(hba)) {
3193 "Controller enable failed\n");
3199 /* enable UIC related interrupts */
3200 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
3202 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
3207 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3209 int tx_lanes, i, err = 0;
3212 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3215 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3217 for (i = 0; i < tx_lanes; i++) {
3219 err = ufshcd_dme_set(hba,
3220 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3221 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3224 err = ufshcd_dme_peer_set(hba,
3225 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3226 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3229 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3230 __func__, peer, i, err);
3238 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3240 return ufshcd_disable_tx_lcc(hba, true);
3244 * ufshcd_link_startup - Initialize unipro link startup
3245 * @hba: per adapter instance
3247 * Returns 0 for success, non-zero in case of failure
3249 static int ufshcd_link_startup(struct ufs_hba *hba)
3252 int retries = DME_LINKSTARTUP_RETRIES;
3253 bool link_startup_again = false;
3256 * If UFS device isn't active then we will have to issue link startup
3257 * 2 times to make sure the device state move to active.
3259 if (!ufshcd_is_ufs_dev_active(hba))
3260 link_startup_again = true;
3264 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
3266 ret = ufshcd_dme_link_startup(hba);
3268 /* check if device is detected by inter-connect layer */
3269 if (!ret && !ufshcd_is_device_present(hba)) {
3270 dev_err(hba->dev, "%s: Device not present\n", __func__);
3276 * DME link lost indication is only received when link is up,
3277 * but we can't be sure if the link is up until link startup
3278 * succeeds. So reset the local Uni-Pro and try again.
3280 if (ret && ufshcd_hba_enable(hba))
3282 } while (ret && retries--);
3285 /* failed to get the link up... retire */
3288 if (link_startup_again) {
3289 link_startup_again = false;
3290 retries = DME_LINKSTARTUP_RETRIES;
3294 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3295 ret = ufshcd_disable_device_tx_lcc(hba);
3300 /* Include any host controller configuration via UIC commands */
3301 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3305 ret = ufshcd_make_hba_operational(hba);
3308 dev_err(hba->dev, "link startup failed %d\n", ret);
3313 * ufshcd_verify_dev_init() - Verify device initialization
3314 * @hba: per-adapter instance
3316 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
3317 * device Transport Protocol (UTP) layer is ready after a reset.
3318 * If the UTP layer at the device side is not initialized, it may
3319 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
3320 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
3322 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3327 ufshcd_hold(hba, false);
3328 mutex_lock(&hba->dev_cmd.lock);
3329 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3330 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3333 if (!err || err == -ETIMEDOUT)
3336 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3338 mutex_unlock(&hba->dev_cmd.lock);
3339 ufshcd_release(hba);
3342 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3347 * ufshcd_set_queue_depth - set lun queue depth
3348 * @sdev: pointer to SCSI device
3350 * Read bLUQueueDepth value and activate scsi tagged command
3351 * queueing. For WLUN, queue depth is set to 1. For best-effort
3352 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
3353 * value that host can queue.
3355 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3359 struct ufs_hba *hba;
3361 hba = shost_priv(sdev->host);
3363 lun_qdepth = hba->nutrs;
3364 ret = ufshcd_read_unit_desc_param(hba,
3365 ufshcd_scsi_to_upiu_lun(sdev->lun),
3366 UNIT_DESC_PARAM_LU_Q_DEPTH,
3368 sizeof(lun_qdepth));
3370 /* Some WLUN doesn't support unit descriptor */
3371 if (ret == -EOPNOTSUPP)
3373 else if (!lun_qdepth)
3374 /* eventually, we can figure out the real queue depth */
3375 lun_qdepth = hba->nutrs;
3377 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3379 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3380 __func__, lun_qdepth);
3381 scsi_change_queue_depth(sdev, lun_qdepth);
3385 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
3386 * @hba: per-adapter instance
3387 * @lun: UFS device lun id
3388 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
3390 * Returns 0 in case of success and b_lu_write_protect status would be returned
3391 * @b_lu_write_protect parameter.
3392 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
3393 * Returns -EINVAL in case of invalid parameters passed to this function.
3395 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3397 u8 *b_lu_write_protect)
3401 if (!b_lu_write_protect)
3404 * According to UFS device spec, RPMB LU can't be write
3405 * protected so skip reading bLUWriteProtect parameter for
3406 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
3408 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3411 ret = ufshcd_read_unit_desc_param(hba,
3413 UNIT_DESC_PARAM_LU_WR_PROTECT,
3415 sizeof(*b_lu_write_protect));
3420 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
3422 * @hba: per-adapter instance
3423 * @sdev: pointer to SCSI device
3426 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3427 struct scsi_device *sdev)
3429 if (hba->dev_info.f_power_on_wp_en &&
3430 !hba->dev_info.is_lu_power_on_wp) {
3431 u8 b_lu_write_protect;
3433 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3434 &b_lu_write_protect) &&
3435 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3436 hba->dev_info.is_lu_power_on_wp = true;
3441 * ufshcd_slave_alloc - handle initial SCSI device configurations
3442 * @sdev: pointer to SCSI device
3446 static int ufshcd_slave_alloc(struct scsi_device *sdev)
3448 struct ufs_hba *hba;
3450 hba = shost_priv(sdev->host);
3452 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
3453 sdev->use_10_for_ms = 1;
3455 /* allow SCSI layer to restart the device in case of errors */
3456 sdev->allow_restart = 1;
3458 /* REPORT SUPPORTED OPERATION CODES is not supported */
3459 sdev->no_report_opcodes = 1;
3461 /* WRITE_SAME command is not supported */
3462 sdev->no_write_same = 1;
3464 ufshcd_set_queue_depth(sdev);
3466 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3472 * ufshcd_change_queue_depth - change queue depth
3473 * @sdev: pointer to SCSI device
3474 * @depth: required depth to set
3476 * Change queue depth and make sure the max. limits are not crossed.
3478 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
3480 struct ufs_hba *hba = shost_priv(sdev->host);
3482 if (depth > hba->nutrs)
3484 return scsi_change_queue_depth(sdev, depth);
3488 * ufshcd_slave_configure - adjust SCSI device configurations
3489 * @sdev: pointer to SCSI device
3491 static int ufshcd_slave_configure(struct scsi_device *sdev)
3493 struct request_queue *q = sdev->request_queue;
3495 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3496 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3502 * ufshcd_slave_destroy - remove SCSI device configurations
3503 * @sdev: pointer to SCSI device
3505 static void ufshcd_slave_destroy(struct scsi_device *sdev)
3507 struct ufs_hba *hba;
3509 hba = shost_priv(sdev->host);
3510 /* Drop the reference as it won't be needed anymore */
3511 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3512 unsigned long flags;
3514 spin_lock_irqsave(hba->host->host_lock, flags);
3515 hba->sdev_ufs_device = NULL;
3516 spin_unlock_irqrestore(hba->host->host_lock, flags);
3521 * ufshcd_task_req_compl - handle task management request completion
3522 * @hba: per adapter instance
3523 * @index: index of the completed request
3524 * @resp: task management service response
3526 * Returns non-zero value on error, zero on success
3528 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
3530 struct utp_task_req_desc *task_req_descp;
3531 struct utp_upiu_task_rsp *task_rsp_upiup;
3532 unsigned long flags;
3536 spin_lock_irqsave(hba->host->host_lock, flags);
3538 /* Clear completed tasks from outstanding_tasks */
3539 __clear_bit(index, &hba->outstanding_tasks);
3541 task_req_descp = hba->utmrdl_base_addr;
3542 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3544 if (ocs_value == OCS_SUCCESS) {
3545 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3546 task_req_descp[index].task_rsp_upiu;
3547 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3548 task_result = task_result & MASK_TM_SERVICE_RESP;
3550 *resp = (u8)task_result;
3552 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3553 __func__, ocs_value);
3555 spin_unlock_irqrestore(hba->host->host_lock, flags);
3561 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3562 * @lrb: pointer to local reference block of completed command
3563 * @scsi_status: SCSI command status
3565 * Returns value base on SCSI command status
3568 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3572 switch (scsi_status) {
3573 case SAM_STAT_CHECK_CONDITION:
3574 ufshcd_copy_sense_data(lrbp);
3576 result |= DID_OK << 16 |
3577 COMMAND_COMPLETE << 8 |
3580 case SAM_STAT_TASK_SET_FULL:
3582 case SAM_STAT_TASK_ABORTED:
3583 ufshcd_copy_sense_data(lrbp);
3584 result |= scsi_status;
3587 result |= DID_ERROR << 16;
3589 } /* end of switch */
3595 * ufshcd_transfer_rsp_status - Get overall status of the response
3596 * @hba: per adapter instance
3597 * @lrb: pointer to local reference block of completed command
3599 * Returns result of the command to notify SCSI midlayer
3602 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3608 /* overall command status of utrd */
3609 ocs = ufshcd_get_tr_ocs(lrbp);
3613 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3616 case UPIU_TRANSACTION_RESPONSE:
3618 * get the response UPIU result to extract
3619 * the SCSI command status
3621 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3624 * get the result based on SCSI status response
3625 * to notify the SCSI midlayer of the command status
3627 scsi_status = result & MASK_SCSI_STATUS;
3628 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
3631 * Currently we are only supporting BKOPs exception
3632 * events hence we can ignore BKOPs exception event
3633 * during power management callbacks. BKOPs exception
3634 * event is not expected to be raised in runtime suspend
3635 * callback as it allows the urgent bkops.
3636 * During system suspend, we are anyway forcefully
3637 * disabling the bkops and if urgent bkops is needed
3638 * it will be enabled on system resume. Long term
3639 * solution could be to abort the system suspend if
3640 * UFS device needs urgent BKOPs.
3642 if (!hba->pm_op_in_progress &&
3643 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
3644 schedule_work(&hba->eeh_work);
3646 case UPIU_TRANSACTION_REJECT_UPIU:
3647 /* TODO: handle Reject UPIU Response */
3648 result = DID_ERROR << 16;
3650 "Reject UPIU not fully implemented\n");
3653 result = DID_ERROR << 16;
3655 "Unexpected request response code = %x\n",
3661 result |= DID_ABORT << 16;
3663 case OCS_INVALID_COMMAND_STATUS:
3664 result |= DID_REQUEUE << 16;
3666 case OCS_INVALID_CMD_TABLE_ATTR:
3667 case OCS_INVALID_PRDT_ATTR:
3668 case OCS_MISMATCH_DATA_BUF_SIZE:
3669 case OCS_MISMATCH_RESP_UPIU_SIZE:
3670 case OCS_PEER_COMM_FAILURE:
3671 case OCS_FATAL_ERROR:
3673 result |= DID_ERROR << 16;
3675 "OCS error from controller = %x\n", ocs);
3677 } /* end of switch */
3683 * ufshcd_uic_cmd_compl - handle completion of uic command
3684 * @hba: per adapter instance
3685 * @intr_status: interrupt status generated by the controller
3687 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3689 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
3690 hba->active_uic_cmd->argument2 |=
3691 ufshcd_get_uic_cmd_result(hba);
3692 hba->active_uic_cmd->argument3 =
3693 ufshcd_get_dme_attr_val(hba);
3694 complete(&hba->active_uic_cmd->done);
3697 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3698 complete(hba->uic_async_done);
3702 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
3703 * @hba: per adapter instance
3704 * @completed_reqs: requests to complete
3706 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3707 unsigned long completed_reqs)
3709 struct ufshcd_lrb *lrbp;
3710 struct scsi_cmnd *cmd;
3714 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3715 lrbp = &hba->lrb[index];
3718 result = ufshcd_transfer_rsp_status(hba, lrbp);
3719 scsi_dma_unmap(cmd);
3720 cmd->result = result;
3721 /* Mark completed command as NULL in LRB */
3723 clear_bit_unlock(index, &hba->lrb_in_use);
3724 /* Do not touch lrbp after scsi done */
3725 cmd->scsi_done(cmd);
3726 __ufshcd_release(hba);
3727 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3728 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
3729 if (hba->dev_cmd.complete)
3730 complete(hba->dev_cmd.complete);
3734 /* clear corresponding bits of completed commands */
3735 hba->outstanding_reqs ^= completed_reqs;
3737 ufshcd_clk_scaling_update_busy(hba);
3739 /* we might have free'd some tags above */
3740 wake_up(&hba->dev_cmd.tag_wq);
3744 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3745 * @hba: per adapter instance
3747 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3749 unsigned long completed_reqs;
3752 /* Resetting interrupt aggregation counters first and reading the
3753 * DOOR_BELL afterward allows us to handle all the completed requests.
3754 * In order to prevent other interrupts starvation the DB is read once
3755 * after reset. The down side of this solution is the possibility of
3756 * false interrupt if device completes another request after resetting
3757 * aggregation and before reading the DB.
3759 if (ufshcd_is_intr_aggr_allowed(hba))
3760 ufshcd_reset_intr_aggr(hba);
3762 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3763 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3765 __ufshcd_transfer_req_compl(hba, completed_reqs);
3769 * ufshcd_disable_ee - disable exception event
3770 * @hba: per-adapter instance
3771 * @mask: exception event to disable
3773 * Disables exception event in the device so that the EVENT_ALERT
3776 * Returns zero on success, non-zero error value on failure.
3778 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3783 if (!(hba->ee_ctrl_mask & mask))
3786 val = hba->ee_ctrl_mask & ~mask;
3787 val &= 0xFFFF; /* 2 bytes */
3788 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3789 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3791 hba->ee_ctrl_mask &= ~mask;
3797 * ufshcd_enable_ee - enable exception event
3798 * @hba: per-adapter instance
3799 * @mask: exception event to enable
3801 * Enable corresponding exception event in the device to allow
3802 * device to alert host in critical scenarios.
3804 * Returns zero on success, non-zero error value on failure.
3806 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3811 if (hba->ee_ctrl_mask & mask)
3814 val = hba->ee_ctrl_mask | mask;
3815 val &= 0xFFFF; /* 2 bytes */
3816 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3817 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3819 hba->ee_ctrl_mask |= mask;
3825 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3826 * @hba: per-adapter instance
3828 * Allow device to manage background operations on its own. Enabling
3829 * this might lead to inconsistent latencies during normal data transfers
3830 * as the device is allowed to manage its own way of handling background
3833 * Returns zero on success, non-zero on failure.
3835 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3839 if (hba->auto_bkops_enabled)
3842 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3843 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3845 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3850 hba->auto_bkops_enabled = true;
3852 /* No need of URGENT_BKOPS exception from the device */
3853 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3855 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3862 * ufshcd_disable_auto_bkops - block device in doing background operations
3863 * @hba: per-adapter instance
3865 * Disabling background operations improves command response latency but
3866 * has drawback of device moving into critical state where the device is
3867 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3868 * host is idle so that BKOPS are managed effectively without any negative
3871 * Returns zero on success, non-zero on failure.
3873 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3877 if (!hba->auto_bkops_enabled)
3881 * If host assisted BKOPs is to be enabled, make sure
3882 * urgent bkops exception is allowed.
3884 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3886 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3891 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
3892 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3894 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3896 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3900 hba->auto_bkops_enabled = false;
3906 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
3907 * @hba: per adapter instance
3909 * After a device reset the device may toggle the BKOPS_EN flag
3910 * to default value. The s/w tracking variables should be updated
3911 * as well. This function would change the auto-bkops state based on
3912 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
3914 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3916 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
3917 hba->auto_bkops_enabled = false;
3918 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3919 ufshcd_enable_auto_bkops(hba);
3921 hba->auto_bkops_enabled = true;
3922 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
3923 ufshcd_disable_auto_bkops(hba);
3927 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3929 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3930 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3934 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
3935 * @hba: per-adapter instance
3936 * @status: bkops_status value
3938 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3939 * flag in the device to permit background operations if the device
3940 * bkops_status is greater than or equal to "status" argument passed to
3941 * this function, disable otherwise.
3943 * Returns 0 for success, non-zero in case of failure.
3945 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3946 * to know whether auto bkops is enabled or disabled after this function
3947 * returns control to it.
3949 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3950 enum bkops_status status)
3953 u32 curr_status = 0;
3955 err = ufshcd_get_bkops_status(hba, &curr_status);
3957 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3960 } else if (curr_status > BKOPS_STATUS_MAX) {
3961 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3962 __func__, curr_status);
3967 if (curr_status >= status)
3968 err = ufshcd_enable_auto_bkops(hba);
3970 err = ufshcd_disable_auto_bkops(hba);
3976 * ufshcd_urgent_bkops - handle urgent bkops exception event
3977 * @hba: per-adapter instance
3979 * Enable fBackgroundOpsEn flag in the device to permit background
3982 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3983 * and negative error value for any other failure.
3985 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3987 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
3990 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3992 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3993 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3996 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
3999 u32 curr_status = 0;
4001 if (hba->is_urgent_bkops_lvl_checked)
4002 goto enable_auto_bkops;
4004 err = ufshcd_get_bkops_status(hba, &curr_status);
4006 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4012 * We are seeing that some devices are raising the urgent bkops
4013 * exception events even when BKOPS status doesn't indicate performace
4014 * impacted or critical. Handle these device by determining their urgent
4015 * bkops status at runtime.
4017 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
4018 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4019 __func__, curr_status);
4020 /* update the current status as the urgent bkops level */
4021 hba->urgent_bkops_lvl = curr_status;
4022 hba->is_urgent_bkops_lvl_checked = true;
4026 err = ufshcd_enable_auto_bkops(hba);
4029 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
4034 * ufshcd_exception_event_handler - handle exceptions raised by device
4035 * @work: pointer to work data
4037 * Read bExceptionEventStatus attribute from the device and handle the
4038 * exception event accordingly.
4040 static void ufshcd_exception_event_handler(struct work_struct *work)
4042 struct ufs_hba *hba;
4045 hba = container_of(work, struct ufs_hba, eeh_work);
4047 pm_runtime_get_sync(hba->dev);
4048 scsi_block_requests(hba->host);
4049 err = ufshcd_get_ee_status(hba, &status);
4051 dev_err(hba->dev, "%s: failed to get exception status %d\n",
4056 status &= hba->ee_ctrl_mask;
4058 if (status & MASK_EE_URGENT_BKOPS)
4059 ufshcd_bkops_exception_event_handler(hba);
4062 scsi_unblock_requests(hba->host);
4063 pm_runtime_put_sync(hba->dev);
4067 /* Complete requests that have door-bell cleared */
4068 static void ufshcd_complete_requests(struct ufs_hba *hba)
4070 ufshcd_transfer_req_compl(hba);
4071 ufshcd_tmc_handler(hba);
4075 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
4076 * to recover from the DL NAC errors or not.
4077 * @hba: per-adapter instance
4079 * Returns true if error handling is required, false otherwise
4081 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
4083 unsigned long flags;
4084 bool err_handling = true;
4086 spin_lock_irqsave(hba->host->host_lock, flags);
4088 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
4089 * device fatal error and/or DL NAC & REPLAY timeout errors.
4091 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
4094 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
4095 ((hba->saved_err & UIC_ERROR) &&
4096 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
4099 if ((hba->saved_err & UIC_ERROR) &&
4100 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
4103 * wait for 50ms to see if we can get any other errors or not.
4105 spin_unlock_irqrestore(hba->host->host_lock, flags);
4107 spin_lock_irqsave(hba->host->host_lock, flags);
4110 * now check if we have got any other severe errors other than
4113 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4114 ((hba->saved_err & UIC_ERROR) &&
4115 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
4119 * As DL NAC is the only error received so far, send out NOP
4120 * command to confirm if link is still active or not.
4121 * - If we don't get any response then do error recovery.
4122 * - If we get response then clear the DL NAC error bit.
4125 spin_unlock_irqrestore(hba->host->host_lock, flags);
4126 err = ufshcd_verify_dev_init(hba);
4127 spin_lock_irqsave(hba->host->host_lock, flags);
4132 /* Link seems to be alive hence ignore the DL NAC errors */
4133 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
4134 hba->saved_err &= ~UIC_ERROR;
4135 /* clear NAC error */
4136 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4137 if (!hba->saved_uic_err) {
4138 err_handling = false;
4143 spin_unlock_irqrestore(hba->host->host_lock, flags);
4144 return err_handling;
4148 * ufshcd_err_handler - handle UFS errors that require s/w attention
4149 * @work: pointer to work structure
4151 static void ufshcd_err_handler(struct work_struct *work)
4153 struct ufs_hba *hba;
4154 unsigned long flags;
4159 bool needs_reset = false;
4161 hba = container_of(work, struct ufs_hba, eh_work);
4163 pm_runtime_get_sync(hba->dev);
4164 ufshcd_hold(hba, false);
4166 spin_lock_irqsave(hba->host->host_lock, flags);
4167 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
4170 hba->ufshcd_state = UFSHCD_STATE_RESET;
4171 ufshcd_set_eh_in_progress(hba);
4173 /* Complete requests that have door-bell cleared by h/w */
4174 ufshcd_complete_requests(hba);
4176 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4179 spin_unlock_irqrestore(hba->host->host_lock, flags);
4180 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
4181 ret = ufshcd_quirk_dl_nac_errors(hba);
4182 spin_lock_irqsave(hba->host->host_lock, flags);
4184 goto skip_err_handling;
4186 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4187 ((hba->saved_err & UIC_ERROR) &&
4188 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4189 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4190 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4194 * if host reset is required then skip clearing the pending
4195 * transfers forcefully because they will automatically get
4196 * cleared after link startup.
4199 goto skip_pending_xfer_clear;
4201 /* release lock as clear command might sleep */
4202 spin_unlock_irqrestore(hba->host->host_lock, flags);
4203 /* Clear pending transfer requests */
4204 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4205 if (ufshcd_clear_cmd(hba, tag)) {
4207 goto lock_skip_pending_xfer_clear;
4211 /* Clear pending task management requests */
4212 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4213 if (ufshcd_clear_tm_cmd(hba, tag)) {
4215 goto lock_skip_pending_xfer_clear;
4219 lock_skip_pending_xfer_clear:
4220 spin_lock_irqsave(hba->host->host_lock, flags);
4222 /* Complete the requests that are cleared by s/w */
4223 ufshcd_complete_requests(hba);
4225 if (err_xfer || err_tm)
4228 skip_pending_xfer_clear:
4229 /* Fatal errors need reset */
4231 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4234 * ufshcd_reset_and_restore() does the link reinitialization
4235 * which will need atleast one empty doorbell slot to send the
4236 * device management commands (NOP and query commands).
4237 * If there is no slot empty at this moment then free up last
4240 if (hba->outstanding_reqs == max_doorbells)
4241 __ufshcd_transfer_req_compl(hba,
4242 (1UL << (hba->nutrs - 1)));
4244 spin_unlock_irqrestore(hba->host->host_lock, flags);
4245 err = ufshcd_reset_and_restore(hba);
4246 spin_lock_irqsave(hba->host->host_lock, flags);
4248 dev_err(hba->dev, "%s: reset and restore failed\n",
4250 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4253 * Inform scsi mid-layer that we did reset and allow to handle
4254 * Unit Attention properly.
4256 scsi_report_bus_reset(hba->host, 0);
4258 hba->saved_uic_err = 0;
4263 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4264 if (hba->saved_err || hba->saved_uic_err)
4265 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4266 __func__, hba->saved_err, hba->saved_uic_err);
4269 ufshcd_clear_eh_in_progress(hba);
4272 spin_unlock_irqrestore(hba->host->host_lock, flags);
4273 scsi_unblock_requests(hba->host);
4274 ufshcd_release(hba);
4275 pm_runtime_put_sync(hba->dev);
4279 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4280 * @hba: per-adapter instance
4282 static void ufshcd_update_uic_error(struct ufs_hba *hba)
4286 /* PA_INIT_ERROR is fatal and needs UIC reset */
4287 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4288 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4289 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
4290 else if (hba->dev_quirks &
4291 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4292 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4294 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4295 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4296 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4299 /* UIC NL/TL/DME errors needs software retry */
4300 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4302 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4304 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4306 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4308 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4310 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4312 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4313 __func__, hba->uic_error);
4317 * ufshcd_check_errors - Check for errors that need s/w attention
4318 * @hba: per-adapter instance
4320 static void ufshcd_check_errors(struct ufs_hba *hba)
4322 bool queue_eh_work = false;
4324 if (hba->errors & INT_FATAL_ERRORS)
4325 queue_eh_work = true;
4327 if (hba->errors & UIC_ERROR) {
4329 ufshcd_update_uic_error(hba);
4331 queue_eh_work = true;
4334 if (queue_eh_work) {
4336 * update the transfer error masks to sticky bits, let's do this
4337 * irrespective of current ufshcd_state.
4339 hba->saved_err |= hba->errors;
4340 hba->saved_uic_err |= hba->uic_error;
4342 /* handle fatal errors only when link is functional */
4343 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4344 /* block commands from scsi mid-layer */
4345 scsi_block_requests(hba->host);
4347 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
4348 schedule_work(&hba->eh_work);
4352 * if (!queue_eh_work) -
4353 * Other errors are either non-fatal where host recovers
4354 * itself without s/w intervention or errors that will be
4355 * handled by the SCSI core layer.
4360 * ufshcd_tmc_handler - handle task management function completion
4361 * @hba: per adapter instance
4363 static void ufshcd_tmc_handler(struct ufs_hba *hba)
4367 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
4368 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
4369 wake_up(&hba->tm_wq);
4373 * ufshcd_sl_intr - Interrupt service routine
4374 * @hba: per adapter instance
4375 * @intr_status: contains interrupts generated by the controller
4377 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4379 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4381 ufshcd_check_errors(hba);
4383 if (intr_status & UFSHCD_UIC_MASK)
4384 ufshcd_uic_cmd_compl(hba, intr_status);
4386 if (intr_status & UTP_TASK_REQ_COMPL)
4387 ufshcd_tmc_handler(hba);
4389 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4390 ufshcd_transfer_req_compl(hba);
4394 * ufshcd_intr - Main interrupt service routine
4396 * @__hba: pointer to adapter instance
4398 * Returns IRQ_HANDLED - If interrupt is valid
4399 * IRQ_NONE - If invalid interrupt
4401 static irqreturn_t ufshcd_intr(int irq, void *__hba)
4403 u32 intr_status, enabled_intr_status = 0;
4404 irqreturn_t retval = IRQ_NONE;
4405 struct ufs_hba *hba = __hba;
4406 int retries = hba->nutrs;
4408 spin_lock(hba->host->host_lock);
4409 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
4412 * There could be max of hba->nutrs reqs in flight and in worst case
4413 * if the reqs get finished 1 by 1 after the interrupt status is
4414 * read, make sure we handle them by checking the interrupt status
4415 * again in a loop until we process all of the reqs before returning.
4417 while (intr_status && retries--) {
4418 enabled_intr_status =
4419 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
4421 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
4422 if (enabled_intr_status) {
4423 ufshcd_sl_intr(hba, enabled_intr_status);
4424 retval = IRQ_HANDLED;
4427 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
4430 spin_unlock(hba->host->host_lock);
4434 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4437 u32 mask = 1 << tag;
4438 unsigned long flags;
4440 if (!test_bit(tag, &hba->outstanding_tasks))
4443 spin_lock_irqsave(hba->host->host_lock, flags);
4444 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4445 spin_unlock_irqrestore(hba->host->host_lock, flags);
4447 /* poll for max. 1 sec to clear door bell register by h/w */
4448 err = ufshcd_wait_for_register(hba,
4449 REG_UTP_TASK_REQ_DOOR_BELL,
4450 mask, 0, 1000, 1000, true);
4456 * ufshcd_issue_tm_cmd - issues task management commands to controller
4457 * @hba: per adapter instance
4458 * @lun_id: LUN ID to which TM command is sent
4459 * @task_id: task ID to which the TM command is applicable
4460 * @tm_function: task management function opcode
4461 * @tm_response: task management service response return value
4463 * Returns non-zero value on error, zero on success.
4465 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4466 u8 tm_function, u8 *tm_response)
4468 struct utp_task_req_desc *task_req_descp;
4469 struct utp_upiu_task_req *task_req_upiup;
4470 struct Scsi_Host *host;
4471 unsigned long flags;
4479 * Get free slot, sleep if slots are unavailable.
4480 * Even though we use wait_event() which sleeps indefinitely,
4481 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
4483 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
4484 ufshcd_hold(hba, false);
4486 spin_lock_irqsave(host->host_lock, flags);
4487 task_req_descp = hba->utmrdl_base_addr;
4488 task_req_descp += free_slot;
4490 /* Configure task request descriptor */
4491 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4492 task_req_descp->header.dword_2 =
4493 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4495 /* Configure task request UPIU */
4497 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
4498 task_tag = hba->nutrs + free_slot;
4499 task_req_upiup->header.dword_0 =
4500 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
4502 task_req_upiup->header.dword_1 =
4503 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
4505 * The host shall provide the same value for LUN field in the basic
4506 * header and for Input Parameter.
4508 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4509 task_req_upiup->input_param2 = cpu_to_be32(task_id);
4511 /* send command to the controller */
4512 __set_bit(free_slot, &hba->outstanding_tasks);
4514 /* Make sure descriptors are ready before ringing the task doorbell */
4517 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
4519 spin_unlock_irqrestore(host->host_lock, flags);
4521 /* wait until the task management command is completed */
4522 err = wait_event_timeout(hba->tm_wq,
4523 test_bit(free_slot, &hba->tm_condition),
4524 msecs_to_jiffies(TM_CMD_TIMEOUT));
4526 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4527 __func__, tm_function);
4528 if (ufshcd_clear_tm_cmd(hba, free_slot))
4529 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4530 __func__, free_slot);
4533 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
4536 clear_bit(free_slot, &hba->tm_condition);
4537 ufshcd_put_tm_slot(hba, free_slot);
4538 wake_up(&hba->tm_tag_wq);
4540 ufshcd_release(hba);
4545 * ufshcd_eh_device_reset_handler - device reset handler registered to
4547 * @cmd: SCSI command pointer
4549 * Returns SUCCESS/FAILED
4551 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
4553 struct Scsi_Host *host;
4554 struct ufs_hba *hba;
4558 unsigned long flags;
4560 host = cmd->device->host;
4561 hba = shost_priv(host);
4563 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
4564 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
4565 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4571 /* clear the commands that were pending for corresponding LUN */
4572 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4573 if (hba->lrb[pos].lun == lun) {
4574 err = ufshcd_clear_cmd(hba, pos);
4579 spin_lock_irqsave(host->host_lock, flags);
4580 ufshcd_transfer_req_compl(hba);
4581 spin_unlock_irqrestore(host->host_lock, flags);
4586 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4593 * ufshcd_abort - abort a specific command
4594 * @cmd: SCSI command pointer
4596 * Abort the pending command in device by sending UFS_ABORT_TASK task management
4597 * command, and in host controller by clearing the door-bell register. There can
4598 * be race between controller sending the command to the device while abort is
4599 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
4600 * really issued and then try to abort it.
4602 * Returns SUCCESS/FAILED
4604 static int ufshcd_abort(struct scsi_cmnd *cmd)
4606 struct Scsi_Host *host;
4607 struct ufs_hba *hba;
4608 unsigned long flags;
4613 struct ufshcd_lrb *lrbp;
4616 host = cmd->device->host;
4617 hba = shost_priv(host);
4618 tag = cmd->request->tag;
4619 if (!ufshcd_valid_tag(hba, tag)) {
4621 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4622 __func__, tag, cmd, cmd->request);
4626 ufshcd_hold(hba, false);
4627 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4628 /* If command is already aborted/completed, return SUCCESS */
4629 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4631 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4632 __func__, tag, hba->outstanding_reqs, reg);
4636 if (!(reg & (1 << tag))) {
4638 "%s: cmd was completed, but without a notifying intr, tag = %d",
4642 lrbp = &hba->lrb[tag];
4643 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4644 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4645 UFS_QUERY_TASK, &resp);
4646 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4647 /* cmd pending in the device */
4649 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4651 * cmd not pending in the device, check if it is
4654 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4655 if (reg & (1 << tag)) {
4656 /* sleep for max. 200us to stabilize */
4657 usleep_range(100, 200);
4660 /* command completed already */
4664 err = resp; /* service response error */
4674 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4675 UFS_ABORT_TASK, &resp);
4676 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4678 err = resp; /* service response error */
4682 err = ufshcd_clear_cmd(hba, tag);
4686 scsi_dma_unmap(cmd);
4688 spin_lock_irqsave(host->host_lock, flags);
4689 ufshcd_outstanding_req_clear(hba, tag);
4690 hba->lrb[tag].cmd = NULL;
4691 spin_unlock_irqrestore(host->host_lock, flags);
4693 clear_bit_unlock(tag, &hba->lrb_in_use);
4694 wake_up(&hba->dev_cmd.tag_wq);
4700 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4705 * This ufshcd_release() corresponds to the original scsi cmd that got
4706 * aborted here (as we won't get any IRQ for it).
4708 ufshcd_release(hba);
4713 * ufshcd_host_reset_and_restore - reset and restore host controller
4714 * @hba: per-adapter instance
4716 * Note that host controller reset may issue DME_RESET to
4717 * local and remote (device) Uni-Pro stack and the attributes
4718 * are reset to default state.
4720 * Returns zero on success, non-zero on failure
4722 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4725 unsigned long flags;
4727 /* Reset the host controller */
4728 spin_lock_irqsave(hba->host->host_lock, flags);
4729 ufshcd_hba_stop(hba, false);
4730 spin_unlock_irqrestore(hba->host->host_lock, flags);
4732 err = ufshcd_hba_enable(hba);
4736 /* Establish the link again and restore the device */
4737 err = ufshcd_probe_hba(hba);
4739 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
4743 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4749 * ufshcd_reset_and_restore - reset and re-initialize host/device
4750 * @hba: per-adapter instance
4752 * Reset and recover device, host and re-establish link. This
4753 * is helpful to recover the communication in fatal error conditions.
4755 * Returns zero on success, non-zero on failure
4757 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4760 unsigned long flags;
4761 int retries = MAX_HOST_RESET_RETRIES;
4764 err = ufshcd_host_reset_and_restore(hba);
4765 } while (err && --retries);
4768 * After reset the door-bell might be cleared, complete
4769 * outstanding requests in s/w here.
4771 spin_lock_irqsave(hba->host->host_lock, flags);
4772 ufshcd_transfer_req_compl(hba);
4773 ufshcd_tmc_handler(hba);
4774 spin_unlock_irqrestore(hba->host->host_lock, flags);
4780 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
4781 * @cmd - SCSI command pointer
4783 * Returns SUCCESS/FAILED
4785 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
4788 unsigned long flags;
4789 struct ufs_hba *hba;
4791 hba = shost_priv(cmd->device->host);
4793 ufshcd_hold(hba, false);
4795 * Check if there is any race with fatal error handling.
4796 * If so, wait for it to complete. Even though fatal error
4797 * handling does reset and restore in some cases, don't assume
4798 * anything out of it. We are just avoiding race here.
4801 spin_lock_irqsave(hba->host->host_lock, flags);
4802 if (!(work_pending(&hba->eh_work) ||
4803 hba->ufshcd_state == UFSHCD_STATE_RESET))
4805 spin_unlock_irqrestore(hba->host->host_lock, flags);
4806 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4807 flush_work(&hba->eh_work);
4810 hba->ufshcd_state = UFSHCD_STATE_RESET;
4811 ufshcd_set_eh_in_progress(hba);
4812 spin_unlock_irqrestore(hba->host->host_lock, flags);
4814 err = ufshcd_reset_and_restore(hba);
4816 spin_lock_irqsave(hba->host->host_lock, flags);
4819 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4822 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4824 ufshcd_clear_eh_in_progress(hba);
4825 spin_unlock_irqrestore(hba->host->host_lock, flags);
4827 ufshcd_release(hba);
4832 * ufshcd_get_max_icc_level - calculate the ICC level
4833 * @sup_curr_uA: max. current supported by the regulator
4834 * @start_scan: row at the desc table to start scan from
4835 * @buff: power descriptor buffer
4837 * Returns calculated max ICC level for specific regulator
4839 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4846 for (i = start_scan; i >= 0; i--) {
4847 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
4848 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4849 ATTR_ICC_LVL_UNIT_OFFSET;
4850 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
4852 case UFSHCD_NANO_AMP:
4853 curr_uA = curr_uA / 1000;
4855 case UFSHCD_MILI_AMP:
4856 curr_uA = curr_uA * 1000;
4859 curr_uA = curr_uA * 1000 * 1000;
4861 case UFSHCD_MICRO_AMP:
4865 if (sup_curr_uA >= curr_uA)
4870 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4877 * ufshcd_calc_icc_level - calculate the max ICC level
4878 * In case regulators are not initialized we'll return 0
4879 * @hba: per-adapter instance
4880 * @desc_buf: power descriptor buffer to extract ICC levels from.
4881 * @len: length of desc_buff
4883 * Returns calculated ICC level
4885 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4886 u8 *desc_buf, int len)
4890 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4891 !hba->vreg_info.vccq2) {
4893 "%s: Regulator capability was not set, actvIccLevel=%d",
4894 __func__, icc_level);
4898 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
4899 icc_level = ufshcd_get_max_icc_level(
4900 hba->vreg_info.vcc->max_uA,
4901 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4902 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4904 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
4905 icc_level = ufshcd_get_max_icc_level(
4906 hba->vreg_info.vccq->max_uA,
4908 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4910 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
4911 icc_level = ufshcd_get_max_icc_level(
4912 hba->vreg_info.vccq2->max_uA,
4914 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4919 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4922 int buff_len = hba->desc_size.pwr_desc;
4923 u8 desc_buf[hba->desc_size.pwr_desc];
4925 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4928 "%s: Failed reading power descriptor.len = %d ret = %d",
4929 __func__, buff_len, ret);
4933 hba->init_prefetch_data.icc_level =
4934 ufshcd_find_max_sup_active_icc_level(hba,
4935 desc_buf, buff_len);
4936 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4937 __func__, hba->init_prefetch_data.icc_level);
4939 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4940 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4941 &hba->init_prefetch_data.icc_level);
4945 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4946 __func__, hba->init_prefetch_data.icc_level , ret);
4951 * ufshcd_scsi_add_wlus - Adds required W-LUs
4952 * @hba: per-adapter instance
4954 * UFS device specification requires the UFS devices to support 4 well known
4956 * "REPORT_LUNS" (address: 01h)
4957 * "UFS Device" (address: 50h)
4958 * "RPMB" (address: 44h)
4959 * "BOOT" (address: 30h)
4960 * UFS device's power management needs to be controlled by "POWER CONDITION"
4961 * field of SSU (START STOP UNIT) command. But this "power condition" field
4962 * will take effect only when its sent to "UFS device" well known logical unit
4963 * hence we require the scsi_device instance to represent this logical unit in
4964 * order for the UFS host driver to send the SSU command for power management.
4966 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4967 * Block) LU so user space process can control this LU. User space may also
4968 * want to have access to BOOT LU.
4970 * This function adds scsi device instances for each of all well known LUs
4971 * (except "REPORT LUNS" LU).
4973 * Returns zero on success (all required W-LUs are added successfully),
4974 * non-zero error value on failure (if failed to add any of the required W-LU).
4976 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4979 struct scsi_device *sdev_rpmb;
4980 struct scsi_device *sdev_boot;
4982 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4983 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4984 if (IS_ERR(hba->sdev_ufs_device)) {
4985 ret = PTR_ERR(hba->sdev_ufs_device);
4986 hba->sdev_ufs_device = NULL;
4989 scsi_device_put(hba->sdev_ufs_device);
4991 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4992 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4993 if (IS_ERR(sdev_boot)) {
4994 ret = PTR_ERR(sdev_boot);
4995 goto remove_sdev_ufs_device;
4997 scsi_device_put(sdev_boot);
4999 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
5000 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
5001 if (IS_ERR(sdev_rpmb)) {
5002 ret = PTR_ERR(sdev_rpmb);
5003 goto remove_sdev_boot;
5005 scsi_device_put(sdev_rpmb);
5009 scsi_remove_device(sdev_boot);
5010 remove_sdev_ufs_device:
5011 scsi_remove_device(hba->sdev_ufs_device);
5016 static int ufs_get_device_desc(struct ufs_hba *hba,
5017 struct ufs_dev_desc *dev_desc)
5021 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
5022 u8 desc_buf[hba->desc_size.dev_desc];
5024 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
5026 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
5032 * getting vendor (manufacturerID) and Bank Index in big endian
5035 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
5036 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
5038 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
5040 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
5041 QUERY_DESC_MAX_SIZE, ASCII_STD);
5043 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
5048 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
5049 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
5050 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
5053 /* Null terminate the model string */
5054 dev_desc->model[MAX_MODEL_LEN] = '\0';
5060 static void ufs_fixup_device_setup(struct ufs_hba *hba,
5061 struct ufs_dev_desc *dev_desc)
5063 struct ufs_dev_fix *f;
5065 for (f = ufs_fixups; f->quirk; f++) {
5066 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
5067 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
5068 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
5069 !strcmp(f->card.model, UFS_ANY_MODEL)))
5070 hba->dev_quirks |= f->quirk;
5075 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
5076 * @hba: per-adapter instance
5078 * PA_TActivate parameter can be tuned manually if UniPro version is less than
5079 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
5080 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
5081 * the hibern8 exit latency.
5083 * Returns zero on success, non-zero error value on failure.
5085 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
5088 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
5090 ret = ufshcd_dme_peer_get(hba,
5092 RX_MIN_ACTIVATETIME_CAPABILITY,
5093 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5094 &peer_rx_min_activatetime);
5098 /* make sure proper unit conversion is applied */
5099 tuned_pa_tactivate =
5100 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
5101 / PA_TACTIVATE_TIME_UNIT_US);
5102 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5103 tuned_pa_tactivate);
5110 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
5111 * @hba: per-adapter instance
5113 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
5114 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
5115 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
5116 * This optimal value can help reduce the hibern8 exit latency.
5118 * Returns zero on success, non-zero error value on failure.
5120 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
5123 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
5124 u32 max_hibern8_time, tuned_pa_hibern8time;
5126 ret = ufshcd_dme_get(hba,
5127 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
5128 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
5129 &local_tx_hibern8_time_cap);
5133 ret = ufshcd_dme_peer_get(hba,
5134 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
5135 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5136 &peer_rx_hibern8_time_cap);
5140 max_hibern8_time = max(local_tx_hibern8_time_cap,
5141 peer_rx_hibern8_time_cap);
5142 /* make sure proper unit conversion is applied */
5143 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
5144 / PA_HIBERN8_TIME_UNIT_US);
5145 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
5146 tuned_pa_hibern8time);
5152 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
5153 * less than device PA_TACTIVATE time.
5154 * @hba: per-adapter instance
5156 * Some UFS devices require host PA_TACTIVATE to be lower than device
5157 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
5160 * Returns zero on success, non-zero error value on failure.
5162 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
5165 u32 granularity, peer_granularity;
5166 u32 pa_tactivate, peer_pa_tactivate;
5167 u32 pa_tactivate_us, peer_pa_tactivate_us;
5168 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
5170 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5175 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5180 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
5181 (granularity > PA_GRANULARITY_MAX_VAL)) {
5182 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
5183 __func__, granularity);
5187 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
5188 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
5189 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
5190 __func__, peer_granularity);
5194 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
5198 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
5199 &peer_pa_tactivate);
5203 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
5204 peer_pa_tactivate_us = peer_pa_tactivate *
5205 gran_to_us_table[peer_granularity - 1];
5207 if (pa_tactivate_us > peer_pa_tactivate_us) {
5208 u32 new_peer_pa_tactivate;
5210 new_peer_pa_tactivate = pa_tactivate_us /
5211 gran_to_us_table[peer_granularity - 1];
5212 new_peer_pa_tactivate++;
5213 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5214 new_peer_pa_tactivate);
5221 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5223 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
5224 ufshcd_tune_pa_tactivate(hba);
5225 ufshcd_tune_pa_hibern8time(hba);
5228 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
5229 /* set 1ms timeout for PA_TACTIVATE */
5230 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
5232 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
5233 ufshcd_quirk_tune_host_pa_tactivate(hba);
5235 ufshcd_vops_apply_dev_quirks(hba);
5238 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
5242 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
5243 &hba->desc_size.dev_desc);
5245 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
5247 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
5248 &hba->desc_size.pwr_desc);
5250 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
5252 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
5253 &hba->desc_size.interc_desc);
5255 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
5257 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
5258 &hba->desc_size.conf_desc);
5260 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
5262 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
5263 &hba->desc_size.unit_desc);
5265 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
5267 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
5268 &hba->desc_size.geom_desc);
5270 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
5273 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
5275 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
5276 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
5277 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
5278 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
5279 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
5280 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
5284 * ufshcd_probe_hba - probe hba to detect device and initialize
5285 * @hba: per-adapter instance
5287 * Execute link-startup and verify device initialization
5289 static int ufshcd_probe_hba(struct ufs_hba *hba)
5291 struct ufs_dev_desc card = {0};
5294 ret = ufshcd_link_startup(hba);
5298 ufshcd_init_pwr_info(hba);
5300 /* set the default level for urgent bkops */
5301 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5302 hba->is_urgent_bkops_lvl_checked = false;
5304 /* UniPro link is active now */
5305 ufshcd_set_link_active(hba);
5307 ret = ufshcd_verify_dev_init(hba);
5311 ret = ufshcd_complete_dev_init(hba);
5315 /* Init check for device descriptor sizes */
5316 ufshcd_init_desc_sizes(hba);
5318 ret = ufs_get_device_desc(hba, &card);
5320 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
5325 ufs_fixup_device_setup(hba, &card);
5326 ufshcd_tune_unipro_params(hba);
5328 ret = ufshcd_set_vccq_rail_unused(hba,
5329 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5333 /* UFS device is also active now */
5334 ufshcd_set_ufs_dev_active(hba);
5335 ufshcd_force_reset_auto_bkops(hba);
5336 hba->wlun_dev_clr_ua = true;
5338 if (ufshcd_get_max_pwr_mode(hba)) {
5340 "%s: Failed getting max supported power mode\n",
5343 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
5345 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5349 /* set the state as operational after switching to desired gear */
5350 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5353 * If we are in error handling context or in power management callbacks
5354 * context, no need to scan the host
5356 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5359 /* clear any previous UFS device information */
5360 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
5361 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5362 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
5363 hba->dev_info.f_power_on_wp_en = flag;
5365 if (!hba->is_init_prefetch)
5366 ufshcd_init_icc_levels(hba);
5368 /* Add required well known logical units to scsi mid layer */
5369 ret = ufshcd_scsi_add_wlus(hba);
5373 scsi_scan_host(hba->host);
5374 pm_runtime_put_sync(hba->dev);
5377 if (!hba->is_init_prefetch)
5378 hba->is_init_prefetch = true;
5380 /* Resume devfreq after UFS device is detected */
5381 if (ufshcd_is_clkscaling_enabled(hba))
5382 devfreq_resume_device(hba->devfreq);
5386 * If we failed to initialize the device or the device is not
5387 * present, turn off the power/clocks etc.
5389 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5390 pm_runtime_put_sync(hba->dev);
5391 ufshcd_hba_exit(hba);
5398 * ufshcd_async_scan - asynchronous execution for probing hba
5399 * @data: data pointer to pass to this function
5400 * @cookie: cookie data
5402 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5404 struct ufs_hba *hba = (struct ufs_hba *)data;
5406 ufshcd_probe_hba(hba);
5409 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5411 unsigned long flags;
5412 struct Scsi_Host *host;
5413 struct ufs_hba *hba;
5417 if (!scmd || !scmd->device || !scmd->device->host)
5418 return BLK_EH_NOT_HANDLED;
5420 host = scmd->device->host;
5421 hba = shost_priv(host);
5423 return BLK_EH_NOT_HANDLED;
5425 spin_lock_irqsave(host->host_lock, flags);
5427 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5428 if (hba->lrb[index].cmd == scmd) {
5434 spin_unlock_irqrestore(host->host_lock, flags);
5437 * Bypass SCSI error handling and reset the block layer timer if this
5438 * SCSI command was not actually dispatched to UFS driver, otherwise
5439 * let SCSI layer handle the error as usual.
5441 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5444 static struct scsi_host_template ufshcd_driver_template = {
5445 .module = THIS_MODULE,
5447 .proc_name = UFSHCD,
5448 .queuecommand = ufshcd_queuecommand,
5449 .slave_alloc = ufshcd_slave_alloc,
5450 .slave_configure = ufshcd_slave_configure,
5451 .slave_destroy = ufshcd_slave_destroy,
5452 .change_queue_depth = ufshcd_change_queue_depth,
5453 .eh_abort_handler = ufshcd_abort,
5454 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5455 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
5456 .eh_timed_out = ufshcd_eh_timed_out,
5458 .sg_tablesize = SG_ALL,
5459 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5460 .can_queue = UFSHCD_CAN_QUEUE,
5461 .max_host_blocked = 1,
5462 .track_queue_depth = 1,
5465 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5474 * "set_load" operation shall be required on those regulators
5475 * which specifically configured current limitation. Otherwise
5476 * zero max_uA may cause unexpected behavior when regulator is
5477 * enabled or set as high power mode.
5482 ret = regulator_set_load(vreg->reg, ua);
5484 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5485 __func__, vreg->name, ua, ret);
5491 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5492 struct ufs_vreg *vreg)
5496 else if (vreg->unused)
5499 return ufshcd_config_vreg_load(hba->dev, vreg,
5500 UFS_VREG_LPM_LOAD_UA);
5503 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5504 struct ufs_vreg *vreg)
5508 else if (vreg->unused)
5511 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
5514 static int ufshcd_config_vreg(struct device *dev,
5515 struct ufs_vreg *vreg, bool on)
5518 struct regulator *reg;
5520 int min_uV, uA_load;
5527 if (regulator_count_voltages(reg) > 0) {
5528 if (vreg->min_uV && vreg->max_uV) {
5529 min_uV = on ? vreg->min_uV : 0;
5530 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5533 "%s: %s set voltage failed, err=%d\n",
5534 __func__, name, ret);
5539 uA_load = on ? vreg->max_uA : 0;
5540 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5548 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5554 else if (vreg->enabled || vreg->unused)
5557 ret = ufshcd_config_vreg(dev, vreg, true);
5559 ret = regulator_enable(vreg->reg);
5562 vreg->enabled = true;
5564 dev_err(dev, "%s: %s enable failed, err=%d\n",
5565 __func__, vreg->name, ret);
5570 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5576 else if (!vreg->enabled || vreg->unused)
5579 ret = regulator_disable(vreg->reg);
5582 /* ignore errors on applying disable config */
5583 ufshcd_config_vreg(dev, vreg, false);
5584 vreg->enabled = false;
5586 dev_err(dev, "%s: %s disable failed, err=%d\n",
5587 __func__, vreg->name, ret);
5593 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5596 struct device *dev = hba->dev;
5597 struct ufs_vreg_info *info = &hba->vreg_info;
5602 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5606 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5610 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5616 ufshcd_toggle_vreg(dev, info->vccq2, false);
5617 ufshcd_toggle_vreg(dev, info->vccq, false);
5618 ufshcd_toggle_vreg(dev, info->vcc, false);
5623 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5625 struct ufs_vreg_info *info = &hba->vreg_info;
5628 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5633 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5640 vreg->reg = devm_regulator_get(dev, vreg->name);
5641 if (IS_ERR(vreg->reg)) {
5642 ret = PTR_ERR(vreg->reg);
5643 dev_err(dev, "%s: %s get failed, err=%d\n",
5644 __func__, vreg->name, ret);
5650 static int ufshcd_init_vreg(struct ufs_hba *hba)
5653 struct device *dev = hba->dev;
5654 struct ufs_vreg_info *info = &hba->vreg_info;
5659 ret = ufshcd_get_vreg(dev, info->vcc);
5663 ret = ufshcd_get_vreg(dev, info->vccq);
5667 ret = ufshcd_get_vreg(dev, info->vccq2);
5672 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5674 struct ufs_vreg_info *info = &hba->vreg_info;
5677 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5682 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5685 struct ufs_vreg_info *info = &hba->vreg_info;
5689 else if (!info->vccq)
5693 /* shut off the rail here */
5694 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5696 * Mark this rail as no longer used, so it doesn't get enabled
5700 info->vccq->unused = true;
5703 * rail should have been already enabled hence just make sure
5704 * that unused flag is cleared.
5706 info->vccq->unused = false;
5712 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5716 struct ufs_clk_info *clki;
5717 struct list_head *head = &hba->clk_list_head;
5718 unsigned long flags;
5720 if (!head || list_empty(head))
5723 list_for_each_entry(clki, head, list) {
5724 if (!IS_ERR_OR_NULL(clki->clk)) {
5725 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5728 if (on && !clki->enabled) {
5729 ret = clk_prepare_enable(clki->clk);
5731 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
5732 __func__, clki->name, ret);
5735 } else if (!on && clki->enabled) {
5736 clk_disable_unprepare(clki->clk);
5739 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
5740 clki->name, on ? "en" : "dis");
5744 ret = ufshcd_vops_setup_clocks(hba, on);
5747 list_for_each_entry(clki, head, list) {
5748 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5749 clk_disable_unprepare(clki->clk);
5752 spin_lock_irqsave(hba->host->host_lock, flags);
5753 hba->clk_gating.state = CLKS_ON;
5754 spin_unlock_irqrestore(hba->host->host_lock, flags);
5759 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
5761 return __ufshcd_setup_clocks(hba, on, false);
5764 static int ufshcd_init_clocks(struct ufs_hba *hba)
5767 struct ufs_clk_info *clki;
5768 struct device *dev = hba->dev;
5769 struct list_head *head = &hba->clk_list_head;
5771 if (!head || list_empty(head))
5774 list_for_each_entry(clki, head, list) {
5778 clki->clk = devm_clk_get(dev, clki->name);
5779 if (IS_ERR(clki->clk)) {
5780 ret = PTR_ERR(clki->clk);
5781 dev_err(dev, "%s: %s clk get failed, %d\n",
5782 __func__, clki->name, ret);
5786 if (clki->max_freq) {
5787 ret = clk_set_rate(clki->clk, clki->max_freq);
5789 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5790 __func__, clki->name,
5791 clki->max_freq, ret);
5794 clki->curr_freq = clki->max_freq;
5796 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
5797 clki->name, clk_get_rate(clki->clk));
5803 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
5810 err = ufshcd_vops_init(hba);
5814 err = ufshcd_vops_setup_regulators(hba, true);
5821 ufshcd_vops_exit(hba);
5824 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
5825 __func__, ufshcd_get_var_name(hba), err);
5829 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
5834 ufshcd_vops_setup_clocks(hba, false);
5836 ufshcd_vops_setup_regulators(hba, false);
5838 ufshcd_vops_exit(hba);
5841 static int ufshcd_hba_init(struct ufs_hba *hba)
5846 * Handle host controller power separately from the UFS device power
5847 * rails as it will help controlling the UFS host controller power
5848 * collapse easily which is different than UFS device power collapse.
5849 * Also, enable the host controller power before we go ahead with rest
5850 * of the initialization here.
5852 err = ufshcd_init_hba_vreg(hba);
5856 err = ufshcd_setup_hba_vreg(hba, true);
5860 err = ufshcd_init_clocks(hba);
5862 goto out_disable_hba_vreg;
5864 err = ufshcd_setup_clocks(hba, true);
5866 goto out_disable_hba_vreg;
5868 err = ufshcd_init_vreg(hba);
5870 goto out_disable_clks;
5872 err = ufshcd_setup_vreg(hba, true);
5874 goto out_disable_clks;
5876 err = ufshcd_variant_hba_init(hba);
5878 goto out_disable_vreg;
5880 hba->is_powered = true;
5884 ufshcd_setup_vreg(hba, false);
5886 ufshcd_setup_clocks(hba, false);
5887 out_disable_hba_vreg:
5888 ufshcd_setup_hba_vreg(hba, false);
5893 static void ufshcd_hba_exit(struct ufs_hba *hba)
5895 if (hba->is_powered) {
5896 ufshcd_variant_hba_exit(hba);
5897 ufshcd_setup_vreg(hba, false);
5898 ufshcd_setup_clocks(hba, false);
5899 ufshcd_setup_hba_vreg(hba, false);
5900 hba->is_powered = false;
5905 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
5907 unsigned char cmd[6] = {REQUEST_SENSE,
5911 SCSI_SENSE_BUFFERSIZE,
5916 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5922 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5923 SCSI_SENSE_BUFFERSIZE, NULL,
5924 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
5926 pr_err("%s: failed with err %d\n", __func__, ret);
5934 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
5936 * @hba: per adapter instance
5937 * @pwr_mode: device power mode to set
5939 * Returns 0 if requested power mode is set successfully
5940 * Returns non-zero if failed to set the requested power mode
5942 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
5943 enum ufs_dev_pwr_mode pwr_mode)
5945 unsigned char cmd[6] = { START_STOP };
5946 struct scsi_sense_hdr sshdr;
5947 struct scsi_device *sdp;
5948 unsigned long flags;
5951 spin_lock_irqsave(hba->host->host_lock, flags);
5952 sdp = hba->sdev_ufs_device;
5954 ret = scsi_device_get(sdp);
5955 if (!ret && !scsi_device_online(sdp)) {
5957 scsi_device_put(sdp);
5962 spin_unlock_irqrestore(hba->host->host_lock, flags);
5968 * If scsi commands fail, the scsi mid-layer schedules scsi error-
5969 * handling, which would wait for host to be resumed. Since we know
5970 * we are functional while we are here, skip host resume in error
5973 hba->host->eh_noresume = 1;
5974 if (hba->wlun_dev_clr_ua) {
5975 ret = ufshcd_send_request_sense(hba, sdp);
5978 /* Unit attention condition is cleared now */
5979 hba->wlun_dev_clr_ua = false;
5982 cmd[4] = pwr_mode << 4;
5985 * Current function would be generally called from the power management
5986 * callbacks hence set the REQ_PM flag so that it doesn't resume the
5987 * already suspended childs.
5989 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
5990 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
5992 sdev_printk(KERN_WARNING, sdp,
5993 "START_STOP failed for power mode: %d, result %x\n",
5995 if (driver_byte(ret) & DRIVER_SENSE)
5996 scsi_print_sense_hdr(sdp, NULL, &sshdr);
6000 hba->curr_dev_pwr_mode = pwr_mode;
6002 scsi_device_put(sdp);
6003 hba->host->eh_noresume = 0;
6007 static int ufshcd_link_state_transition(struct ufs_hba *hba,
6008 enum uic_link_state req_link_state,
6009 int check_for_bkops)
6013 if (req_link_state == hba->uic_link_state)
6016 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
6017 ret = ufshcd_uic_hibern8_enter(hba);
6019 ufshcd_set_link_hibern8(hba);
6024 * If autobkops is enabled, link can't be turned off because
6025 * turning off the link would also turn off the device.
6027 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
6028 (!check_for_bkops || (check_for_bkops &&
6029 !hba->auto_bkops_enabled))) {
6031 * Let's make sure that link is in low power mode, we are doing
6032 * this currently by putting the link in Hibern8. Otherway to
6033 * put the link in low power mode is to send the DME end point
6034 * to device and then send the DME reset command to local
6035 * unipro. But putting the link in hibern8 is much faster.
6037 ret = ufshcd_uic_hibern8_enter(hba);
6041 * Change controller state to "reset state" which
6042 * should also put the link in off/reset state
6044 ufshcd_hba_stop(hba, true);
6046 * TODO: Check if we need any delay to make sure that
6047 * controller is reset
6049 ufshcd_set_link_off(hba);
6056 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
6059 * It seems some UFS devices may keep drawing more than sleep current
6060 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
6061 * To avoid this situation, add 2ms delay before putting these UFS
6062 * rails in LPM mode.
6064 if (!ufshcd_is_link_active(hba) &&
6065 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
6066 usleep_range(2000, 2100);
6069 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
6072 * If UFS device and link is in OFF state, all power supplies (VCC,
6073 * VCCQ, VCCQ2) can be turned off if power on write protect is not
6074 * required. If UFS link is inactive (Hibern8 or OFF state) and device
6075 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
6077 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
6078 * in low power state which would save some power.
6080 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
6081 !hba->dev_info.is_lu_power_on_wp) {
6082 ufshcd_setup_vreg(hba, false);
6083 } else if (!ufshcd_is_ufs_dev_active(hba)) {
6084 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
6085 if (!ufshcd_is_link_active(hba)) {
6086 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
6087 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
6092 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
6096 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
6097 !hba->dev_info.is_lu_power_on_wp) {
6098 ret = ufshcd_setup_vreg(hba, true);
6099 } else if (!ufshcd_is_ufs_dev_active(hba)) {
6100 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
6101 if (!ret && !ufshcd_is_link_active(hba)) {
6102 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6105 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6113 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
6115 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
6120 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
6122 if (ufshcd_is_link_off(hba))
6123 ufshcd_setup_hba_vreg(hba, false);
6126 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
6128 if (ufshcd_is_link_off(hba))
6129 ufshcd_setup_hba_vreg(hba, true);
6133 * ufshcd_suspend - helper function for suspend operations
6134 * @hba: per adapter instance
6135 * @pm_op: desired low power operation type
6137 * This function will try to put the UFS device and link into low power
6138 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
6139 * (System PM level).
6141 * If this function is called during shutdown, it will make sure that
6142 * both UFS device and UFS link is powered off.
6144 * NOTE: UFS device & link must be active before we enter in this function.
6146 * Returns 0 for success and non-zero for failure
6148 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6151 enum ufs_pm_level pm_lvl;
6152 enum ufs_dev_pwr_mode req_dev_pwr_mode;
6153 enum uic_link_state req_link_state;
6155 hba->pm_op_in_progress = 1;
6156 if (!ufshcd_is_shutdown_pm(pm_op)) {
6157 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
6158 hba->rpm_lvl : hba->spm_lvl;
6159 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
6160 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
6162 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
6163 req_link_state = UIC_LINK_OFF_STATE;
6167 * If we can't transition into any of the low power modes
6168 * just gate the clocks.
6170 ufshcd_hold(hba, false);
6171 hba->clk_gating.is_suspended = true;
6173 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
6174 req_link_state == UIC_LINK_ACTIVE_STATE) {
6178 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
6179 (req_link_state == hba->uic_link_state))
6182 /* UFS device & link must be active before we enter in this function */
6183 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
6188 if (ufshcd_is_runtime_pm(pm_op)) {
6189 if (ufshcd_can_autobkops_during_suspend(hba)) {
6191 * The device is idle with no requests in the queue,
6192 * allow background operations if bkops status shows
6193 * that performance might be impacted.
6195 ret = ufshcd_urgent_bkops(hba);
6199 /* make sure that auto bkops is disabled */
6200 ufshcd_disable_auto_bkops(hba);
6204 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
6205 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
6206 !ufshcd_is_runtime_pm(pm_op))) {
6207 /* ensure that bkops is disabled */
6208 ufshcd_disable_auto_bkops(hba);
6209 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
6214 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
6216 goto set_dev_active;
6218 ufshcd_vreg_set_lpm(hba);
6222 * The clock scaling needs access to controller registers. Hence, Wait
6223 * for pending clock scaling work to be done before clocks are
6226 if (ufshcd_is_clkscaling_enabled(hba)) {
6227 devfreq_suspend_device(hba->devfreq);
6228 hba->clk_scaling.window_start_t = 0;
6231 * Call vendor specific suspend callback. As these callbacks may access
6232 * vendor specific host controller register space call them before the
6233 * host clocks are ON.
6235 ret = ufshcd_vops_suspend(hba, pm_op);
6237 goto set_link_active;
6239 ret = ufshcd_vops_setup_clocks(hba, false);
6243 if (!ufshcd_is_link_active(hba))
6244 ufshcd_setup_clocks(hba, false);
6246 /* If link is active, device ref_clk can't be switched off */
6247 __ufshcd_setup_clocks(hba, false, true);
6249 hba->clk_gating.state = CLKS_OFF;
6251 * Disable the host irq as host controller as there won't be any
6252 * host controller transaction expected till resume.
6254 ufshcd_disable_irq(hba);
6255 /* Put the host controller in low power mode if possible */
6256 ufshcd_hba_vreg_set_lpm(hba);
6260 ufshcd_vops_resume(hba, pm_op);
6262 ufshcd_vreg_set_hpm(hba);
6263 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
6264 ufshcd_set_link_active(hba);
6265 else if (ufshcd_is_link_off(hba))
6266 ufshcd_host_reset_and_restore(hba);
6268 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
6269 ufshcd_disable_auto_bkops(hba);
6271 hba->clk_gating.is_suspended = false;
6272 ufshcd_release(hba);
6274 hba->pm_op_in_progress = 0;
6279 * ufshcd_resume - helper function for resume operations
6280 * @hba: per adapter instance
6281 * @pm_op: runtime PM or system PM
6283 * This function basically brings the UFS device, UniPro link and controller
6286 * Returns 0 for success and non-zero for failure
6288 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6291 enum uic_link_state old_link_state;
6293 hba->pm_op_in_progress = 1;
6294 old_link_state = hba->uic_link_state;
6296 ufshcd_hba_vreg_set_hpm(hba);
6297 /* Make sure clocks are enabled before accessing controller */
6298 ret = ufshcd_setup_clocks(hba, true);
6302 /* enable the host irq as host controller would be active soon */
6303 ret = ufshcd_enable_irq(hba);
6305 goto disable_irq_and_vops_clks;
6307 ret = ufshcd_vreg_set_hpm(hba);
6309 goto disable_irq_and_vops_clks;
6312 * Call vendor specific resume callback. As these callbacks may access
6313 * vendor specific host controller register space call them when the
6314 * host clocks are ON.
6316 ret = ufshcd_vops_resume(hba, pm_op);
6320 if (ufshcd_is_link_hibern8(hba)) {
6321 ret = ufshcd_uic_hibern8_exit(hba);
6323 ufshcd_set_link_active(hba);
6325 goto vendor_suspend;
6326 } else if (ufshcd_is_link_off(hba)) {
6327 ret = ufshcd_host_reset_and_restore(hba);
6329 * ufshcd_host_reset_and_restore() should have already
6330 * set the link state as active
6332 if (ret || !ufshcd_is_link_active(hba))
6333 goto vendor_suspend;
6336 if (!ufshcd_is_ufs_dev_active(hba)) {
6337 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6339 goto set_old_link_state;
6342 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
6343 ufshcd_enable_auto_bkops(hba);
6346 * If BKOPs operations are urgently needed at this moment then
6347 * keep auto-bkops enabled or else disable it.
6349 ufshcd_urgent_bkops(hba);
6351 hba->clk_gating.is_suspended = false;
6353 if (ufshcd_is_clkscaling_enabled(hba))
6354 devfreq_resume_device(hba->devfreq);
6356 /* Schedule clock gating in case of no access to UFS device yet */
6357 ufshcd_release(hba);
6361 ufshcd_link_state_transition(hba, old_link_state, 0);
6363 ufshcd_vops_suspend(hba, pm_op);
6365 ufshcd_vreg_set_lpm(hba);
6366 disable_irq_and_vops_clks:
6367 ufshcd_disable_irq(hba);
6368 ufshcd_setup_clocks(hba, false);
6370 hba->pm_op_in_progress = 0;
6375 * ufshcd_system_suspend - system suspend routine
6376 * @hba: per adapter instance
6377 * @pm_op: runtime PM or system PM
6379 * Check the description of ufshcd_suspend() function for more details.
6381 * Returns 0 for success and non-zero for failure
6383 int ufshcd_system_suspend(struct ufs_hba *hba)
6387 if (!hba || !hba->is_powered)
6390 if (pm_runtime_suspended(hba->dev)) {
6391 if (hba->rpm_lvl == hba->spm_lvl)
6393 * There is possibility that device may still be in
6394 * active state during the runtime suspend.
6396 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6397 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
6401 * UFS device and/or UFS link low power states during runtime
6402 * suspend seems to be different than what is expected during
6403 * system suspend. Hence runtime resume the devic & link and
6404 * let the system suspend low power states to take effect.
6405 * TODO: If resume takes longer time, we might have optimize
6406 * it in future by not resuming everything if possible.
6408 ret = ufshcd_runtime_resume(hba);
6413 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6416 hba->is_sys_suspended = true;
6419 EXPORT_SYMBOL(ufshcd_system_suspend);
6422 * ufshcd_system_resume - system resume routine
6423 * @hba: per adapter instance
6425 * Returns 0 for success and non-zero for failure
6428 int ufshcd_system_resume(struct ufs_hba *hba)
6433 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
6435 * Let the runtime resume take care of resuming
6436 * if runtime suspended.
6440 return ufshcd_resume(hba, UFS_SYSTEM_PM);
6442 EXPORT_SYMBOL(ufshcd_system_resume);
6445 * ufshcd_runtime_suspend - runtime suspend routine
6446 * @hba: per adapter instance
6448 * Check the description of ufshcd_suspend() function for more details.
6450 * Returns 0 for success and non-zero for failure
6452 int ufshcd_runtime_suspend(struct ufs_hba *hba)
6457 if (!hba->is_powered)
6460 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
6462 EXPORT_SYMBOL(ufshcd_runtime_suspend);
6465 * ufshcd_runtime_resume - runtime resume routine
6466 * @hba: per adapter instance
6468 * This function basically brings the UFS device, UniPro link and controller
6469 * to active state. Following operations are done in this function:
6471 * 1. Turn on all the controller related clocks
6472 * 2. Bring the UniPro link out of Hibernate state
6473 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
6475 * 4. If auto-bkops is enabled on the device, disable it.
6477 * So following would be the possible power state after this function return
6479 * S1: UFS device in Active state with VCC rail ON
6480 * UniPro link in Active state
6481 * All the UFS/UniPro controller clocks are ON
6483 * Returns 0 for success and non-zero for failure
6485 int ufshcd_runtime_resume(struct ufs_hba *hba)
6490 if (!hba->is_powered)
6493 return ufshcd_resume(hba, UFS_RUNTIME_PM);
6495 EXPORT_SYMBOL(ufshcd_runtime_resume);
6497 int ufshcd_runtime_idle(struct ufs_hba *hba)
6501 EXPORT_SYMBOL(ufshcd_runtime_idle);
6504 * ufshcd_shutdown - shutdown routine
6505 * @hba: per adapter instance
6507 * This function would power off both UFS device and UFS link.
6509 * Returns 0 always to allow force shutdown even in case of errors.
6511 int ufshcd_shutdown(struct ufs_hba *hba)
6515 if (!hba->is_powered)
6518 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6521 pm_runtime_get_sync(hba->dev);
6523 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6526 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6527 /* allow force shutdown even in case of errors */
6530 EXPORT_SYMBOL(ufshcd_shutdown);
6533 * ufshcd_remove - de-allocate SCSI host and host memory space
6534 * data structure memory
6535 * @hba - per adapter instance
6537 void ufshcd_remove(struct ufs_hba *hba)
6539 scsi_remove_host(hba->host);
6540 /* disable interrupts */
6541 ufshcd_disable_intr(hba, hba->intr_mask);
6542 ufshcd_hba_stop(hba, true);
6544 ufshcd_exit_clk_gating(hba);
6545 if (ufshcd_is_clkscaling_enabled(hba))
6546 devfreq_remove_device(hba->devfreq);
6547 ufshcd_hba_exit(hba);
6549 EXPORT_SYMBOL_GPL(ufshcd_remove);
6552 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
6553 * @hba: pointer to Host Bus Adapter (HBA)
6555 void ufshcd_dealloc_host(struct ufs_hba *hba)
6557 scsi_host_put(hba->host);
6559 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6562 * ufshcd_set_dma_mask - Set dma mask based on the controller
6563 * addressing capability
6564 * @hba: per adapter instance
6566 * Returns 0 for success, non-zero for failure
6568 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
6570 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
6571 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
6574 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
6578 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
6579 * @dev: pointer to device handle
6580 * @hba_handle: driver private handle
6581 * Returns 0 on success, non-zero value on failure
6583 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
6585 struct Scsi_Host *host;
6586 struct ufs_hba *hba;
6591 "Invalid memory reference for dev is NULL\n");
6596 host = scsi_host_alloc(&ufshcd_driver_template,
6597 sizeof(struct ufs_hba));
6599 dev_err(dev, "scsi_host_alloc failed\n");
6603 hba = shost_priv(host);
6611 EXPORT_SYMBOL(ufshcd_alloc_host);
6613 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6616 struct ufs_clk_info *clki;
6617 struct list_head *head = &hba->clk_list_head;
6619 if (!head || list_empty(head))
6622 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6626 list_for_each_entry(clki, head, list) {
6627 if (!IS_ERR_OR_NULL(clki->clk)) {
6628 if (scale_up && clki->max_freq) {
6629 if (clki->curr_freq == clki->max_freq)
6631 ret = clk_set_rate(clki->clk, clki->max_freq);
6633 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6634 __func__, clki->name,
6635 clki->max_freq, ret);
6638 clki->curr_freq = clki->max_freq;
6640 } else if (!scale_up && clki->min_freq) {
6641 if (clki->curr_freq == clki->min_freq)
6643 ret = clk_set_rate(clki->clk, clki->min_freq);
6645 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6646 __func__, clki->name,
6647 clki->min_freq, ret);
6650 clki->curr_freq = clki->min_freq;
6653 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6654 clki->name, clk_get_rate(clki->clk));
6657 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6663 static int ufshcd_devfreq_target(struct device *dev,
6664 unsigned long *freq, u32 flags)
6667 struct ufs_hba *hba = dev_get_drvdata(dev);
6668 bool release_clk_hold = false;
6669 unsigned long irq_flags;
6671 if (!ufshcd_is_clkscaling_enabled(hba))
6674 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6675 if (ufshcd_eh_in_progress(hba)) {
6676 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6680 if (ufshcd_is_clkgating_allowed(hba) &&
6681 (hba->clk_gating.state != CLKS_ON)) {
6682 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
6683 /* hold the vote until the scaling work is completed */
6684 hba->clk_gating.active_reqs++;
6685 release_clk_hold = true;
6686 hba->clk_gating.state = CLKS_ON;
6689 * Clock gating work seems to be running in parallel
6690 * hence skip scaling work to avoid deadlock between
6691 * current scaling work and gating work.
6693 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6697 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6699 if (*freq == UINT_MAX)
6700 err = ufshcd_scale_clks(hba, true);
6701 else if (*freq == 0)
6702 err = ufshcd_scale_clks(hba, false);
6704 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6705 if (release_clk_hold)
6706 __ufshcd_release(hba);
6707 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6712 static int ufshcd_devfreq_get_dev_status(struct device *dev,
6713 struct devfreq_dev_status *stat)
6715 struct ufs_hba *hba = dev_get_drvdata(dev);
6716 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6717 unsigned long flags;
6719 if (!ufshcd_is_clkscaling_enabled(hba))
6722 memset(stat, 0, sizeof(*stat));
6724 spin_lock_irqsave(hba->host->host_lock, flags);
6725 if (!scaling->window_start_t)
6728 if (scaling->is_busy_started)
6729 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6730 scaling->busy_start_t));
6732 stat->total_time = jiffies_to_usecs((long)jiffies -
6733 (long)scaling->window_start_t);
6734 stat->busy_time = scaling->tot_busy_t;
6736 scaling->window_start_t = jiffies;
6737 scaling->tot_busy_t = 0;
6739 if (hba->outstanding_reqs) {
6740 scaling->busy_start_t = ktime_get();
6741 scaling->is_busy_started = true;
6743 scaling->busy_start_t = ktime_set(0, 0);
6744 scaling->is_busy_started = false;
6746 spin_unlock_irqrestore(hba->host->host_lock, flags);
6750 static struct devfreq_dev_profile ufs_devfreq_profile = {
6752 .target = ufshcd_devfreq_target,
6753 .get_dev_status = ufshcd_devfreq_get_dev_status,
6757 * ufshcd_init - Driver initialization routine
6758 * @hba: per-adapter instance
6759 * @mmio_base: base register address
6760 * @irq: Interrupt line of device
6761 * Returns 0 on success, non-zero value on failure
6763 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6766 struct Scsi_Host *host = hba->host;
6767 struct device *dev = hba->dev;
6771 "Invalid memory reference for mmio_base is NULL\n");
6776 hba->mmio_base = mmio_base;
6779 /* Set descriptor lengths to specification defaults */
6780 ufshcd_def_desc_sizes(hba);
6782 err = ufshcd_hba_init(hba);
6786 /* Read capabilities registers */
6787 ufshcd_hba_capabilities(hba);
6789 /* Get UFS version supported by the controller */
6790 hba->ufs_version = ufshcd_get_ufs_version(hba);
6792 /* Get Interrupt bit mask per version */
6793 hba->intr_mask = ufshcd_get_intr_mask(hba);
6795 err = ufshcd_set_dma_mask(hba);
6797 dev_err(hba->dev, "set dma mask failed\n");
6801 /* Allocate memory for host memory space */
6802 err = ufshcd_memory_alloc(hba);
6804 dev_err(hba->dev, "Memory allocation failed\n");
6809 ufshcd_host_memory_configure(hba);
6811 host->can_queue = hba->nutrs;
6812 host->cmd_per_lun = hba->nutrs;
6813 host->max_id = UFSHCD_MAX_ID;
6814 host->max_lun = UFS_MAX_LUNS;
6815 host->max_channel = UFSHCD_MAX_CHANNEL;
6816 host->unique_id = host->host_no;
6817 host->max_cmd_len = MAX_CDB_SIZE;
6819 hba->max_pwr_info.is_valid = false;
6821 /* Initailize wait queue for task management */
6822 init_waitqueue_head(&hba->tm_wq);
6823 init_waitqueue_head(&hba->tm_tag_wq);
6825 /* Initialize work queues */
6826 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
6827 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
6829 /* Initialize UIC command mutex */
6830 mutex_init(&hba->uic_cmd_mutex);
6832 /* Initialize mutex for device management commands */
6833 mutex_init(&hba->dev_cmd.lock);
6835 /* Initialize device management tag acquire wait queue */
6836 init_waitqueue_head(&hba->dev_cmd.tag_wq);
6838 ufshcd_init_clk_gating(hba);
6841 * In order to avoid any spurious interrupt immediately after
6842 * registering UFS controller interrupt handler, clear any pending UFS
6843 * interrupt status and disable all the UFS interrupts.
6845 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
6846 REG_INTERRUPT_STATUS);
6847 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
6849 * Make sure that UFS interrupts are disabled and any pending interrupt
6850 * status is cleared before registering UFS interrupt handler.
6854 /* IRQ registration */
6855 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
6857 dev_err(hba->dev, "request irq failed\n");
6860 hba->is_irq_enabled = true;
6863 err = scsi_add_host(host, hba->dev);
6865 dev_err(hba->dev, "scsi_add_host failed\n");
6869 /* Host controller enable */
6870 err = ufshcd_hba_enable(hba);
6872 dev_err(hba->dev, "Host controller enable failed\n");
6873 goto out_remove_scsi_host;
6876 if (ufshcd_is_clkscaling_enabled(hba)) {
6877 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
6878 "simple_ondemand", NULL);
6879 if (IS_ERR(hba->devfreq)) {
6880 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
6881 PTR_ERR(hba->devfreq));
6882 err = PTR_ERR(hba->devfreq);
6883 goto out_remove_scsi_host;
6885 /* Suspend devfreq until the UFS device is detected */
6886 devfreq_suspend_device(hba->devfreq);
6887 hba->clk_scaling.window_start_t = 0;
6890 /* Hold auto suspend until async scan completes */
6891 pm_runtime_get_sync(dev);
6894 * We are assuming that device wasn't put in sleep/power-down
6895 * state exclusively during the boot stage before kernel.
6896 * This assumption helps avoid doing link startup twice during
6897 * ufshcd_probe_hba().
6899 ufshcd_set_ufs_dev_active(hba);
6901 async_schedule(ufshcd_async_scan, hba);
6905 out_remove_scsi_host:
6906 scsi_remove_host(hba->host);
6908 ufshcd_exit_clk_gating(hba);
6910 hba->is_irq_enabled = false;
6911 ufshcd_hba_exit(hba);
6915 EXPORT_SYMBOL_GPL(ufshcd_init);
6917 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
6918 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
6919 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
6920 MODULE_LICENSE("GPL");
6921 MODULE_VERSION(UFSHCD_DRIVER_VERSION);