GNU Linux-libre 4.14.257-gnu1
[releases.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
43 #include <linux/of.h>
44 #include "ufshcd.h"
45 #include "ufs_quirks.h"
46 #include "unipro.h"
47
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/ufs.h>
50
51 #define UFSHCD_REQ_SENSE_SIZE   18
52
53 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
54                                  UTP_TASK_REQ_COMPL |\
55                                  UFSHCD_ERROR_MASK)
56 /* UIC command timeout, unit: ms */
57 #define UIC_CMD_TIMEOUT 500
58
59 /* NOP OUT retries waiting for NOP IN response */
60 #define NOP_OUT_RETRIES    10
61 /* Timeout after 30 msecs if NOP OUT hangs without response */
62 #define NOP_OUT_TIMEOUT    30 /* msecs */
63
64 /* Query request retries */
65 #define QUERY_REQ_RETRIES 3
66 /* Query request timeout */
67 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT  100 /* msecs */
71
72 /* maximum number of retries for a general UIC command  */
73 #define UFS_UIC_COMMAND_RETRIES 3
74
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
77
78 /* Maximum retries for Hibern8 enter */
79 #define UIC_HIBERN8_ENTER_RETRIES 3
80
81 /* maximum number of reset retries before giving up */
82 #define MAX_HOST_RESET_RETRIES 5
83
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
89
90 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
91         ({                                                              \
92                 int _ret;                                               \
93                 if (_on)                                                \
94                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
95                 else                                                    \
96                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
97                 _ret;                                                   \
98         })
99
100 #define ufshcd_hex_dump(prefix_str, buf, len) \
101 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102
103 enum {
104         UFSHCD_MAX_CHANNEL      = 0,
105         UFSHCD_MAX_ID           = 1,
106         UFSHCD_CMD_PER_LUN      = 32,
107         UFSHCD_CAN_QUEUE        = 32,
108 };
109
110 /* UFSHCD states */
111 enum {
112         UFSHCD_STATE_RESET,
113         UFSHCD_STATE_ERROR,
114         UFSHCD_STATE_OPERATIONAL,
115         UFSHCD_STATE_EH_SCHEDULED,
116 };
117
118 /* UFSHCD error handling flags */
119 enum {
120         UFSHCD_EH_IN_PROGRESS = (1 << 0),
121 };
122
123 /* UFSHCD UIC layer error flags */
124 enum {
125         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
126         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
127         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
128         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
129         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
130         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
131 };
132
133 #define ufshcd_set_eh_in_progress(h) \
134         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
135 #define ufshcd_eh_in_progress(h) \
136         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
137 #define ufshcd_clear_eh_in_progress(h) \
138         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
139
140 #define ufshcd_set_ufs_dev_active(h) \
141         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
142 #define ufshcd_set_ufs_dev_sleep(h) \
143         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
144 #define ufshcd_set_ufs_dev_poweroff(h) \
145         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
146 #define ufshcd_is_ufs_dev_active(h) \
147         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
148 #define ufshcd_is_ufs_dev_sleep(h) \
149         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
150 #define ufshcd_is_ufs_dev_poweroff(h) \
151         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
152
153 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
154         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
155         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
156         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
157         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
158         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
159         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
160 };
161
162 static inline enum ufs_dev_pwr_mode
163 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
164 {
165         return ufs_pm_lvl_states[lvl].dev_state;
166 }
167
168 static inline enum uic_link_state
169 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
170 {
171         return ufs_pm_lvl_states[lvl].link_state;
172 }
173
174 static inline enum ufs_pm_level
175 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
176                                         enum uic_link_state link_state)
177 {
178         enum ufs_pm_level lvl;
179
180         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
181                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
182                         (ufs_pm_lvl_states[lvl].link_state == link_state))
183                         return lvl;
184         }
185
186         /* if no match found, return the level 0 */
187         return UFS_PM_LVL_0;
188 }
189
190 static struct ufs_dev_fix ufs_fixups[] = {
191         /* UFS cards deviations table */
192         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
193                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
194         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
195                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
196         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
197         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
198                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
199         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
200                 UFS_DEVICE_NO_FASTAUTO),
201         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
202                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
203         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
204                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
206                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
207         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
208                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
209         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
210         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
211                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
212
213         END_FIX
214 };
215
216 static void ufshcd_tmc_handler(struct ufs_hba *hba);
217 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
218 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
219 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
220 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
221 static void ufshcd_hba_exit(struct ufs_hba *hba);
222 static int ufshcd_probe_hba(struct ufs_hba *hba);
223 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
224                                  bool skip_ref_clk);
225 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
226 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
227 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
228 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
229 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
230 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
231 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
232 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
233 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
234 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
235 static irqreturn_t ufshcd_intr(int irq, void *__hba);
236 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
237                 struct ufs_pa_layer_attr *desired_pwr_mode);
238 static int ufshcd_change_power_mode(struct ufs_hba *hba,
239                              struct ufs_pa_layer_attr *pwr_mode);
240 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
241 {
242         return tag >= 0 && tag < hba->nutrs;
243 }
244
245 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
246 {
247         int ret = 0;
248
249         if (!hba->is_irq_enabled) {
250                 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
251                                 hba);
252                 if (ret)
253                         dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
254                                 __func__, ret);
255                 hba->is_irq_enabled = true;
256         }
257
258         return ret;
259 }
260
261 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
262 {
263         if (hba->is_irq_enabled) {
264                 free_irq(hba->irq, hba);
265                 hba->is_irq_enabled = false;
266         }
267 }
268
269 /* replace non-printable or non-ASCII characters with spaces */
270 static inline void ufshcd_remove_non_printable(char *val)
271 {
272         if (!val)
273                 return;
274
275         if (*val < 0x20 || *val > 0x7e)
276                 *val = ' ';
277 }
278
279 static void ufshcd_add_command_trace(struct ufs_hba *hba,
280                 unsigned int tag, const char *str)
281 {
282         sector_t lba = -1;
283         u8 opcode = 0;
284         u32 intr, doorbell;
285         struct ufshcd_lrb *lrbp;
286         int transfer_len = -1;
287
288         if (!trace_ufshcd_command_enabled())
289                 return;
290
291         lrbp = &hba->lrb[tag];
292
293         if (lrbp->cmd) { /* data phase exists */
294                 opcode = (u8)(*lrbp->cmd->cmnd);
295                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
296                         /*
297                          * Currently we only fully trace read(10) and write(10)
298                          * commands
299                          */
300                         if (lrbp->cmd->request && lrbp->cmd->request->bio)
301                                 lba =
302                                   lrbp->cmd->request->bio->bi_iter.bi_sector;
303                         transfer_len = be32_to_cpu(
304                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
305                 }
306         }
307
308         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
309         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
310         trace_ufshcd_command(dev_name(hba->dev), str, tag,
311                                 doorbell, transfer_len, intr, lba, opcode);
312 }
313
314 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
315 {
316         struct ufs_clk_info *clki;
317         struct list_head *head = &hba->clk_list_head;
318
319         if (list_empty(head))
320                 return;
321
322         list_for_each_entry(clki, head, list) {
323                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
324                                 clki->max_freq)
325                         dev_err(hba->dev, "clk: %s, rate: %u\n",
326                                         clki->name, clki->curr_freq);
327         }
328 }
329
330 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
331                 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
332 {
333         int i;
334
335         for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
336                 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
337
338                 if (err_hist->reg[p] == 0)
339                         continue;
340                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
341                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
342         }
343 }
344
345 static void ufshcd_print_host_regs(struct ufs_hba *hba)
346 {
347         /*
348          * hex_dump reads its data without the readl macro. This might
349          * cause inconsistency issues on some platform, as the printed
350          * values may be from cache and not the most recent value.
351          * To know whether you are looking at an un-cached version verify
352          * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
353          * during platform/pci probe function.
354          */
355         ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
356         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
357                 hba->ufs_version, hba->capabilities);
358         dev_err(hba->dev,
359                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
360                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
361         dev_err(hba->dev,
362                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
363                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
364                 hba->ufs_stats.hibern8_exit_cnt);
365
366         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
367         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
368         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
369         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
370         ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
371
372         ufshcd_print_clk_freqs(hba);
373
374         if (hba->vops && hba->vops->dbg_register_dump)
375                 hba->vops->dbg_register_dump(hba);
376 }
377
378 static
379 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
380 {
381         struct ufshcd_lrb *lrbp;
382         int prdt_length;
383         int tag;
384
385         for_each_set_bit(tag, &bitmap, hba->nutrs) {
386                 lrbp = &hba->lrb[tag];
387
388                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
389                                 tag, ktime_to_us(lrbp->issue_time_stamp));
390                 dev_err(hba->dev,
391                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
392                         tag, (u64)lrbp->utrd_dma_addr);
393
394                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
395                                 sizeof(struct utp_transfer_req_desc));
396                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
397                         (u64)lrbp->ucd_req_dma_addr);
398                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
399                                 sizeof(struct utp_upiu_req));
400                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
401                         (u64)lrbp->ucd_rsp_dma_addr);
402                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
403                                 sizeof(struct utp_upiu_rsp));
404
405                 prdt_length = le16_to_cpu(
406                         lrbp->utr_descriptor_ptr->prd_table_length);
407                 dev_err(hba->dev,
408                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
409                         tag, prdt_length,
410                         (u64)lrbp->ucd_prdt_dma_addr);
411
412                 if (pr_prdt)
413                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
414                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
415         }
416 }
417
418 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
419 {
420         struct utp_task_req_desc *tmrdp;
421         int tag;
422
423         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
424                 tmrdp = &hba->utmrdl_base_addr[tag];
425                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
426                 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
427                                 sizeof(struct request_desc_header));
428                 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
429                                 tag);
430                 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
431                                 sizeof(struct utp_upiu_req));
432                 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
433                                 tag);
434                 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
435                                 sizeof(struct utp_task_req_desc));
436         }
437 }
438
439 static void ufshcd_print_host_state(struct ufs_hba *hba)
440 {
441         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
442         dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
443                 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
444         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
445                 hba->saved_err, hba->saved_uic_err);
446         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
447                 hba->curr_dev_pwr_mode, hba->uic_link_state);
448         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
449                 hba->pm_op_in_progress, hba->is_sys_suspended);
450         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
451                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
452         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
453         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
454                 hba->eh_flags, hba->req_abort_count);
455         dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
456                 hba->capabilities, hba->caps);
457         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
458                 hba->dev_quirks);
459 }
460
461 /**
462  * ufshcd_print_pwr_info - print power params as saved in hba
463  * power info
464  * @hba: per-adapter instance
465  */
466 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
467 {
468         static const char * const names[] = {
469                 "INVALID MODE",
470                 "FAST MODE",
471                 "SLOW_MODE",
472                 "INVALID MODE",
473                 "FASTAUTO_MODE",
474                 "SLOWAUTO_MODE",
475                 "INVALID MODE",
476         };
477
478         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
479                  __func__,
480                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
481                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
482                  names[hba->pwr_info.pwr_rx],
483                  names[hba->pwr_info.pwr_tx],
484                  hba->pwr_info.hs_rate);
485 }
486
487 /*
488  * ufshcd_wait_for_register - wait for register value to change
489  * @hba - per-adapter interface
490  * @reg - mmio register offset
491  * @mask - mask to apply to read register value
492  * @val - wait condition
493  * @interval_us - polling interval in microsecs
494  * @timeout_ms - timeout in millisecs
495  * @can_sleep - perform sleep or just spin
496  *
497  * Returns -ETIMEDOUT on error, zero on success
498  */
499 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
500                                 u32 val, unsigned long interval_us,
501                                 unsigned long timeout_ms, bool can_sleep)
502 {
503         int err = 0;
504         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
505
506         /* ignore bits that we don't intend to wait on */
507         val = val & mask;
508
509         while ((ufshcd_readl(hba, reg) & mask) != val) {
510                 if (can_sleep)
511                         usleep_range(interval_us, interval_us + 50);
512                 else
513                         udelay(interval_us);
514                 if (time_after(jiffies, timeout)) {
515                         if ((ufshcd_readl(hba, reg) & mask) != val)
516                                 err = -ETIMEDOUT;
517                         break;
518                 }
519         }
520
521         return err;
522 }
523
524 /**
525  * ufshcd_get_intr_mask - Get the interrupt bit mask
526  * @hba - Pointer to adapter instance
527  *
528  * Returns interrupt bit mask per version
529  */
530 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
531 {
532         u32 intr_mask = 0;
533
534         switch (hba->ufs_version) {
535         case UFSHCI_VERSION_10:
536                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
537                 break;
538         case UFSHCI_VERSION_11:
539         case UFSHCI_VERSION_20:
540                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
541                 break;
542         case UFSHCI_VERSION_21:
543         default:
544                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
545                 break;
546         }
547
548         return intr_mask;
549 }
550
551 /**
552  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
553  * @hba - Pointer to adapter instance
554  *
555  * Returns UFSHCI version supported by the controller
556  */
557 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
558 {
559         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
560                 return ufshcd_vops_get_ufs_hci_version(hba);
561
562         return ufshcd_readl(hba, REG_UFS_VERSION);
563 }
564
565 /**
566  * ufshcd_is_device_present - Check if any device connected to
567  *                            the host controller
568  * @hba: pointer to adapter instance
569  *
570  * Returns true if device present, false if no device detected
571  */
572 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
573 {
574         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
575                                                 DEVICE_PRESENT) ? true : false;
576 }
577
578 /**
579  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
580  * @lrb: pointer to local command reference block
581  *
582  * This function is used to get the OCS field from UTRD
583  * Returns the OCS field in the UTRD
584  */
585 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
586 {
587         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
588 }
589
590 /**
591  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
592  * @task_req_descp: pointer to utp_task_req_desc structure
593  *
594  * This function is used to get the OCS field from UTMRD
595  * Returns the OCS field in the UTMRD
596  */
597 static inline int
598 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
599 {
600         return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
601 }
602
603 /**
604  * ufshcd_get_tm_free_slot - get a free slot for task management request
605  * @hba: per adapter instance
606  * @free_slot: pointer to variable with available slot value
607  *
608  * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
609  * Returns 0 if free slot is not available, else return 1 with tag value
610  * in @free_slot.
611  */
612 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
613 {
614         int tag;
615         bool ret = false;
616
617         if (!free_slot)
618                 goto out;
619
620         do {
621                 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
622                 if (tag >= hba->nutmrs)
623                         goto out;
624         } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
625
626         *free_slot = tag;
627         ret = true;
628 out:
629         return ret;
630 }
631
632 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
633 {
634         clear_bit_unlock(slot, &hba->tm_slots_in_use);
635 }
636
637 /**
638  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
639  * @hba: per adapter instance
640  * @pos: position of the bit to be cleared
641  */
642 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
643 {
644         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
645 }
646
647 /**
648  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
649  * @hba: per adapter instance
650  * @tag: position of the bit to be cleared
651  */
652 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
653 {
654         __clear_bit(tag, &hba->outstanding_reqs);
655 }
656
657 /**
658  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
659  * @reg: Register value of host controller status
660  *
661  * Returns integer, 0 on Success and positive value if failed
662  */
663 static inline int ufshcd_get_lists_status(u32 reg)
664 {
665         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
666 }
667
668 /**
669  * ufshcd_get_uic_cmd_result - Get the UIC command result
670  * @hba: Pointer to adapter instance
671  *
672  * This function gets the result of UIC command completion
673  * Returns 0 on success, non zero value on error
674  */
675 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
676 {
677         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
678                MASK_UIC_COMMAND_RESULT;
679 }
680
681 /**
682  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
683  * @hba: Pointer to adapter instance
684  *
685  * This function gets UIC command argument3
686  * Returns 0 on success, non zero value on error
687  */
688 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
689 {
690         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
691 }
692
693 /**
694  * ufshcd_get_req_rsp - returns the TR response transaction type
695  * @ucd_rsp_ptr: pointer to response UPIU
696  */
697 static inline int
698 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
699 {
700         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
701 }
702
703 /**
704  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
705  * @ucd_rsp_ptr: pointer to response UPIU
706  *
707  * This function gets the response status and scsi_status from response UPIU
708  * Returns the response result code.
709  */
710 static inline int
711 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
712 {
713         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
714 }
715
716 /*
717  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
718  *                              from response UPIU
719  * @ucd_rsp_ptr: pointer to response UPIU
720  *
721  * Return the data segment length.
722  */
723 static inline unsigned int
724 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
725 {
726         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
727                 MASK_RSP_UPIU_DATA_SEG_LEN;
728 }
729
730 /**
731  * ufshcd_is_exception_event - Check if the device raised an exception event
732  * @ucd_rsp_ptr: pointer to response UPIU
733  *
734  * The function checks if the device raised an exception event indicated in
735  * the Device Information field of response UPIU.
736  *
737  * Returns true if exception is raised, false otherwise.
738  */
739 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
740 {
741         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
742                         MASK_RSP_EXCEPTION_EVENT ? true : false;
743 }
744
745 /**
746  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
747  * @hba: per adapter instance
748  */
749 static inline void
750 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
751 {
752         ufshcd_writel(hba, INT_AGGR_ENABLE |
753                       INT_AGGR_COUNTER_AND_TIMER_RESET,
754                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
755 }
756
757 /**
758  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
759  * @hba: per adapter instance
760  * @cnt: Interrupt aggregation counter threshold
761  * @tmout: Interrupt aggregation timeout value
762  */
763 static inline void
764 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
765 {
766         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
767                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
768                       INT_AGGR_TIMEOUT_VAL(tmout),
769                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
770 }
771
772 /**
773  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
774  * @hba: per adapter instance
775  */
776 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
777 {
778         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
779 }
780
781 /**
782  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
783  *                      When run-stop registers are set to 1, it indicates the
784  *                      host controller that it can process the requests
785  * @hba: per adapter instance
786  */
787 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
788 {
789         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
790                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
791         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
792                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
793 }
794
795 /**
796  * ufshcd_hba_start - Start controller initialization sequence
797  * @hba: per adapter instance
798  */
799 static inline void ufshcd_hba_start(struct ufs_hba *hba)
800 {
801         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
802 }
803
804 /**
805  * ufshcd_is_hba_active - Get controller state
806  * @hba: per adapter instance
807  *
808  * Returns false if controller is active, true otherwise
809  */
810 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
811 {
812         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
813                 ? false : true;
814 }
815
816 static const char *ufschd_uic_link_state_to_string(
817                         enum uic_link_state state)
818 {
819         switch (state) {
820         case UIC_LINK_OFF_STATE:        return "OFF";
821         case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
822         case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
823         default:                        return "UNKNOWN";
824         }
825 }
826
827 static const char *ufschd_ufs_dev_pwr_mode_to_string(
828                         enum ufs_dev_pwr_mode state)
829 {
830         switch (state) {
831         case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
832         case UFS_SLEEP_PWR_MODE:        return "SLEEP";
833         case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
834         default:                        return "UNKNOWN";
835         }
836 }
837
838 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
839 {
840         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
841         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
842             (hba->ufs_version == UFSHCI_VERSION_11))
843                 return UFS_UNIPRO_VER_1_41;
844         else
845                 return UFS_UNIPRO_VER_1_6;
846 }
847 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
848
849 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
850 {
851         /*
852          * If both host and device support UniPro ver1.6 or later, PA layer
853          * parameters tuning happens during link startup itself.
854          *
855          * We can manually tune PA layer parameters if either host or device
856          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
857          * logic simple, we will only do manual tuning if local unipro version
858          * doesn't support ver1.6 or later.
859          */
860         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
861                 return true;
862         else
863                 return false;
864 }
865
866 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
867 {
868         int ret = 0;
869         struct ufs_clk_info *clki;
870         struct list_head *head = &hba->clk_list_head;
871         ktime_t start = ktime_get();
872         bool clk_state_changed = false;
873
874         if (list_empty(head))
875                 goto out;
876
877         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
878         if (ret)
879                 return ret;
880
881         list_for_each_entry(clki, head, list) {
882                 if (!IS_ERR_OR_NULL(clki->clk)) {
883                         if (scale_up && clki->max_freq) {
884                                 if (clki->curr_freq == clki->max_freq)
885                                         continue;
886
887                                 clk_state_changed = true;
888                                 ret = clk_set_rate(clki->clk, clki->max_freq);
889                                 if (ret) {
890                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
891                                                 __func__, clki->name,
892                                                 clki->max_freq, ret);
893                                         break;
894                                 }
895                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
896                                                 "scaled up", clki->name,
897                                                 clki->curr_freq,
898                                                 clki->max_freq);
899
900                                 clki->curr_freq = clki->max_freq;
901
902                         } else if (!scale_up && clki->min_freq) {
903                                 if (clki->curr_freq == clki->min_freq)
904                                         continue;
905
906                                 clk_state_changed = true;
907                                 ret = clk_set_rate(clki->clk, clki->min_freq);
908                                 if (ret) {
909                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
910                                                 __func__, clki->name,
911                                                 clki->min_freq, ret);
912                                         break;
913                                 }
914                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
915                                                 "scaled down", clki->name,
916                                                 clki->curr_freq,
917                                                 clki->min_freq);
918                                 clki->curr_freq = clki->min_freq;
919                         }
920                 }
921                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
922                                 clki->name, clk_get_rate(clki->clk));
923         }
924
925         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
926
927 out:
928         if (clk_state_changed)
929                 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
930                         (scale_up ? "up" : "down"),
931                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
932         return ret;
933 }
934
935 /**
936  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
937  * @hba: per adapter instance
938  * @scale_up: True if scaling up and false if scaling down
939  *
940  * Returns true if scaling is required, false otherwise.
941  */
942 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
943                                                bool scale_up)
944 {
945         struct ufs_clk_info *clki;
946         struct list_head *head = &hba->clk_list_head;
947
948         if (list_empty(head))
949                 return false;
950
951         list_for_each_entry(clki, head, list) {
952                 if (!IS_ERR_OR_NULL(clki->clk)) {
953                         if (scale_up && clki->max_freq) {
954                                 if (clki->curr_freq == clki->max_freq)
955                                         continue;
956                                 return true;
957                         } else if (!scale_up && clki->min_freq) {
958                                 if (clki->curr_freq == clki->min_freq)
959                                         continue;
960                                 return true;
961                         }
962                 }
963         }
964
965         return false;
966 }
967
968 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
969                                         u64 wait_timeout_us)
970 {
971         unsigned long flags;
972         int ret = 0;
973         u32 tm_doorbell;
974         u32 tr_doorbell;
975         bool timeout = false, do_last_check = false;
976         ktime_t start;
977
978         ufshcd_hold(hba, false);
979         spin_lock_irqsave(hba->host->host_lock, flags);
980         /*
981          * Wait for all the outstanding tasks/transfer requests.
982          * Verify by checking the doorbell registers are clear.
983          */
984         start = ktime_get();
985         do {
986                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
987                         ret = -EBUSY;
988                         goto out;
989                 }
990
991                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
992                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
993                 if (!tm_doorbell && !tr_doorbell) {
994                         timeout = false;
995                         break;
996                 } else if (do_last_check) {
997                         break;
998                 }
999
1000                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1001                 schedule();
1002                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1003                     wait_timeout_us) {
1004                         timeout = true;
1005                         /*
1006                          * We might have scheduled out for long time so make
1007                          * sure to check if doorbells are cleared by this time
1008                          * or not.
1009                          */
1010                         do_last_check = true;
1011                 }
1012                 spin_lock_irqsave(hba->host->host_lock, flags);
1013         } while (tm_doorbell || tr_doorbell);
1014
1015         if (timeout) {
1016                 dev_err(hba->dev,
1017                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1018                         __func__, tm_doorbell, tr_doorbell);
1019                 ret = -EBUSY;
1020         }
1021 out:
1022         spin_unlock_irqrestore(hba->host->host_lock, flags);
1023         ufshcd_release(hba);
1024         return ret;
1025 }
1026
1027 /**
1028  * ufshcd_scale_gear - scale up/down UFS gear
1029  * @hba: per adapter instance
1030  * @scale_up: True for scaling up gear and false for scaling down
1031  *
1032  * Returns 0 for success,
1033  * Returns -EBUSY if scaling can't happen at this time
1034  * Returns non-zero for any other errors
1035  */
1036 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1037 {
1038         #define UFS_MIN_GEAR_TO_SCALE_DOWN      UFS_HS_G1
1039         int ret = 0;
1040         struct ufs_pa_layer_attr new_pwr_info;
1041
1042         if (scale_up) {
1043                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1044                        sizeof(struct ufs_pa_layer_attr));
1045         } else {
1046                 memcpy(&new_pwr_info, &hba->pwr_info,
1047                        sizeof(struct ufs_pa_layer_attr));
1048
1049                 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1050                     || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1051                         /* save the current power mode */
1052                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1053                                 &hba->pwr_info,
1054                                 sizeof(struct ufs_pa_layer_attr));
1055
1056                         /* scale down gear */
1057                         new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1058                         new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1059                 }
1060         }
1061
1062         /* check if the power mode needs to be changed or not? */
1063         ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1064
1065         if (ret)
1066                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1067                         __func__, ret,
1068                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1069                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1070
1071         return ret;
1072 }
1073
1074 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1075 {
1076         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1077         int ret = 0;
1078         /*
1079          * make sure that there are no outstanding requests when
1080          * clock scaling is in progress
1081          */
1082         scsi_block_requests(hba->host);
1083         down_write(&hba->clk_scaling_lock);
1084         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1085                 ret = -EBUSY;
1086                 up_write(&hba->clk_scaling_lock);
1087                 scsi_unblock_requests(hba->host);
1088         }
1089
1090         return ret;
1091 }
1092
1093 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1094 {
1095         up_write(&hba->clk_scaling_lock);
1096         scsi_unblock_requests(hba->host);
1097 }
1098
1099 /**
1100  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1101  * @hba: per adapter instance
1102  * @scale_up: True for scaling up and false for scalin down
1103  *
1104  * Returns 0 for success,
1105  * Returns -EBUSY if scaling can't happen at this time
1106  * Returns non-zero for any other errors
1107  */
1108 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1109 {
1110         int ret = 0;
1111
1112         /* let's not get into low power until clock scaling is completed */
1113         ufshcd_hold(hba, false);
1114
1115         ret = ufshcd_clock_scaling_prepare(hba);
1116         if (ret)
1117                 return ret;
1118
1119         /* scale down the gear before scaling down clocks */
1120         if (!scale_up) {
1121                 ret = ufshcd_scale_gear(hba, false);
1122                 if (ret)
1123                         goto out;
1124         }
1125
1126         ret = ufshcd_scale_clks(hba, scale_up);
1127         if (ret) {
1128                 if (!scale_up)
1129                         ufshcd_scale_gear(hba, true);
1130                 goto out;
1131         }
1132
1133         /* scale up the gear after scaling up clocks */
1134         if (scale_up) {
1135                 ret = ufshcd_scale_gear(hba, true);
1136                 if (ret) {
1137                         ufshcd_scale_clks(hba, false);
1138                         goto out;
1139                 }
1140         }
1141
1142         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1143
1144 out:
1145         ufshcd_clock_scaling_unprepare(hba);
1146         ufshcd_release(hba);
1147         return ret;
1148 }
1149
1150 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1151 {
1152         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1153                                            clk_scaling.suspend_work);
1154         unsigned long irq_flags;
1155
1156         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1157         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1158                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1159                 return;
1160         }
1161         hba->clk_scaling.is_suspended = true;
1162         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1163
1164         __ufshcd_suspend_clkscaling(hba);
1165 }
1166
1167 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1168 {
1169         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1170                                            clk_scaling.resume_work);
1171         unsigned long irq_flags;
1172
1173         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1174         if (!hba->clk_scaling.is_suspended) {
1175                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1176                 return;
1177         }
1178         hba->clk_scaling.is_suspended = false;
1179         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1180
1181         devfreq_resume_device(hba->devfreq);
1182 }
1183
1184 static int ufshcd_devfreq_target(struct device *dev,
1185                                 unsigned long *freq, u32 flags)
1186 {
1187         int ret = 0;
1188         struct ufs_hba *hba = dev_get_drvdata(dev);
1189         ktime_t start;
1190         bool scale_up, sched_clk_scaling_suspend_work = false;
1191         unsigned long irq_flags;
1192
1193         if (!ufshcd_is_clkscaling_supported(hba))
1194                 return -EINVAL;
1195
1196         if ((*freq > 0) && (*freq < UINT_MAX)) {
1197                 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1198                 return -EINVAL;
1199         }
1200
1201         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1202         if (ufshcd_eh_in_progress(hba)) {
1203                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1204                 return 0;
1205         }
1206
1207         if (!hba->clk_scaling.active_reqs)
1208                 sched_clk_scaling_suspend_work = true;
1209
1210         scale_up = (*freq == UINT_MAX) ? true : false;
1211         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1212                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1213                 ret = 0;
1214                 goto out; /* no state change required */
1215         }
1216         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1217
1218         pm_runtime_get_noresume(hba->dev);
1219         if (!pm_runtime_active(hba->dev)) {
1220                 pm_runtime_put_noidle(hba->dev);
1221                 ret = -EAGAIN;
1222                 goto out;
1223         }
1224         start = ktime_get();
1225         ret = ufshcd_devfreq_scale(hba, scale_up);
1226         pm_runtime_put(hba->dev);
1227
1228         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1229                 (scale_up ? "up" : "down"),
1230                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1231
1232 out:
1233         if (sched_clk_scaling_suspend_work)
1234                 queue_work(hba->clk_scaling.workq,
1235                            &hba->clk_scaling.suspend_work);
1236
1237         return ret;
1238 }
1239
1240
1241 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1242                 struct devfreq_dev_status *stat)
1243 {
1244         struct ufs_hba *hba = dev_get_drvdata(dev);
1245         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1246         unsigned long flags;
1247
1248         if (!ufshcd_is_clkscaling_supported(hba))
1249                 return -EINVAL;
1250
1251         memset(stat, 0, sizeof(*stat));
1252
1253         spin_lock_irqsave(hba->host->host_lock, flags);
1254         if (!scaling->window_start_t)
1255                 goto start_window;
1256
1257         if (scaling->is_busy_started)
1258                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1259                                         scaling->busy_start_t));
1260
1261         stat->total_time = jiffies_to_usecs((long)jiffies -
1262                                 (long)scaling->window_start_t);
1263         stat->busy_time = scaling->tot_busy_t;
1264 start_window:
1265         scaling->window_start_t = jiffies;
1266         scaling->tot_busy_t = 0;
1267
1268         if (hba->outstanding_reqs) {
1269                 scaling->busy_start_t = ktime_get();
1270                 scaling->is_busy_started = true;
1271         } else {
1272                 scaling->busy_start_t = 0;
1273                 scaling->is_busy_started = false;
1274         }
1275         spin_unlock_irqrestore(hba->host->host_lock, flags);
1276         return 0;
1277 }
1278
1279 static struct devfreq_dev_profile ufs_devfreq_profile = {
1280         .polling_ms     = 100,
1281         .target         = ufshcd_devfreq_target,
1282         .get_dev_status = ufshcd_devfreq_get_dev_status,
1283 };
1284
1285 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1286 {
1287         unsigned long flags;
1288
1289         devfreq_suspend_device(hba->devfreq);
1290         spin_lock_irqsave(hba->host->host_lock, flags);
1291         hba->clk_scaling.window_start_t = 0;
1292         spin_unlock_irqrestore(hba->host->host_lock, flags);
1293 }
1294
1295 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1296 {
1297         unsigned long flags;
1298         bool suspend = false;
1299
1300         if (!ufshcd_is_clkscaling_supported(hba))
1301                 return;
1302
1303         spin_lock_irqsave(hba->host->host_lock, flags);
1304         if (!hba->clk_scaling.is_suspended) {
1305                 suspend = true;
1306                 hba->clk_scaling.is_suspended = true;
1307         }
1308         spin_unlock_irqrestore(hba->host->host_lock, flags);
1309
1310         if (suspend)
1311                 __ufshcd_suspend_clkscaling(hba);
1312 }
1313
1314 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1315 {
1316         unsigned long flags;
1317         bool resume = false;
1318
1319         if (!ufshcd_is_clkscaling_supported(hba))
1320                 return;
1321
1322         spin_lock_irqsave(hba->host->host_lock, flags);
1323         if (hba->clk_scaling.is_suspended) {
1324                 resume = true;
1325                 hba->clk_scaling.is_suspended = false;
1326         }
1327         spin_unlock_irqrestore(hba->host->host_lock, flags);
1328
1329         if (resume)
1330                 devfreq_resume_device(hba->devfreq);
1331 }
1332
1333 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1334                 struct device_attribute *attr, char *buf)
1335 {
1336         struct ufs_hba *hba = dev_get_drvdata(dev);
1337
1338         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1339 }
1340
1341 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1342                 struct device_attribute *attr, const char *buf, size_t count)
1343 {
1344         struct ufs_hba *hba = dev_get_drvdata(dev);
1345         u32 value;
1346         int err;
1347
1348         if (kstrtou32(buf, 0, &value))
1349                 return -EINVAL;
1350
1351         value = !!value;
1352         if (value == hba->clk_scaling.is_allowed)
1353                 goto out;
1354
1355         pm_runtime_get_sync(hba->dev);
1356         ufshcd_hold(hba, false);
1357
1358         cancel_work_sync(&hba->clk_scaling.suspend_work);
1359         cancel_work_sync(&hba->clk_scaling.resume_work);
1360
1361         hba->clk_scaling.is_allowed = value;
1362
1363         if (value) {
1364                 ufshcd_resume_clkscaling(hba);
1365         } else {
1366                 ufshcd_suspend_clkscaling(hba);
1367                 err = ufshcd_devfreq_scale(hba, true);
1368                 if (err)
1369                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1370                                         __func__, err);
1371         }
1372
1373         ufshcd_release(hba);
1374         pm_runtime_put_sync(hba->dev);
1375 out:
1376         return count;
1377 }
1378
1379 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1380 {
1381         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1382         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1383         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1384         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1385         hba->clk_scaling.enable_attr.attr.mode = 0644;
1386         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1387                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1388 }
1389
1390 static void ufshcd_ungate_work(struct work_struct *work)
1391 {
1392         int ret;
1393         unsigned long flags;
1394         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1395                         clk_gating.ungate_work);
1396
1397         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1398
1399         spin_lock_irqsave(hba->host->host_lock, flags);
1400         if (hba->clk_gating.state == CLKS_ON) {
1401                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1402                 goto unblock_reqs;
1403         }
1404
1405         spin_unlock_irqrestore(hba->host->host_lock, flags);
1406         ufshcd_setup_clocks(hba, true);
1407
1408         /* Exit from hibern8 */
1409         if (ufshcd_can_hibern8_during_gating(hba)) {
1410                 /* Prevent gating in this path */
1411                 hba->clk_gating.is_suspended = true;
1412                 if (ufshcd_is_link_hibern8(hba)) {
1413                         ret = ufshcd_uic_hibern8_exit(hba);
1414                         if (ret)
1415                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1416                                         __func__, ret);
1417                         else
1418                                 ufshcd_set_link_active(hba);
1419                 }
1420                 hba->clk_gating.is_suspended = false;
1421         }
1422 unblock_reqs:
1423         scsi_unblock_requests(hba->host);
1424 }
1425
1426 /**
1427  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1428  * Also, exit from hibern8 mode and set the link as active.
1429  * @hba: per adapter instance
1430  * @async: This indicates whether caller should ungate clocks asynchronously.
1431  */
1432 int ufshcd_hold(struct ufs_hba *hba, bool async)
1433 {
1434         int rc = 0;
1435         bool flush_result;
1436         unsigned long flags;
1437
1438         if (!ufshcd_is_clkgating_allowed(hba))
1439                 goto out;
1440         spin_lock_irqsave(hba->host->host_lock, flags);
1441         hba->clk_gating.active_reqs++;
1442
1443         if (ufshcd_eh_in_progress(hba)) {
1444                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1445                 return 0;
1446         }
1447
1448 start:
1449         switch (hba->clk_gating.state) {
1450         case CLKS_ON:
1451                 /*
1452                  * Wait for the ungate work to complete if in progress.
1453                  * Though the clocks may be in ON state, the link could
1454                  * still be in hibner8 state if hibern8 is allowed
1455                  * during clock gating.
1456                  * Make sure we exit hibern8 state also in addition to
1457                  * clocks being ON.
1458                  */
1459                 if (ufshcd_can_hibern8_during_gating(hba) &&
1460                     ufshcd_is_link_hibern8(hba)) {
1461                         if (async) {
1462                                 rc = -EAGAIN;
1463                                 hba->clk_gating.active_reqs--;
1464                                 break;
1465                         }
1466                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1467                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1468                         if (hba->clk_gating.is_suspended && !flush_result)
1469                                 goto out;
1470                         spin_lock_irqsave(hba->host->host_lock, flags);
1471                         goto start;
1472                 }
1473                 break;
1474         case REQ_CLKS_OFF:
1475                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1476                         hba->clk_gating.state = CLKS_ON;
1477                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1478                                                 hba->clk_gating.state);
1479                         break;
1480                 }
1481                 /*
1482                  * If we are here, it means gating work is either done or
1483                  * currently running. Hence, fall through to cancel gating
1484                  * work and to enable clocks.
1485                  */
1486         case CLKS_OFF:
1487                 scsi_block_requests(hba->host);
1488                 hba->clk_gating.state = REQ_CLKS_ON;
1489                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1490                                         hba->clk_gating.state);
1491                 schedule_work(&hba->clk_gating.ungate_work);
1492                 /*
1493                  * fall through to check if we should wait for this
1494                  * work to be done or not.
1495                  */
1496         case REQ_CLKS_ON:
1497                 if (async) {
1498                         rc = -EAGAIN;
1499                         hba->clk_gating.active_reqs--;
1500                         break;
1501                 }
1502
1503                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1504                 flush_work(&hba->clk_gating.ungate_work);
1505                 /* Make sure state is CLKS_ON before returning */
1506                 spin_lock_irqsave(hba->host->host_lock, flags);
1507                 goto start;
1508         default:
1509                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1510                                 __func__, hba->clk_gating.state);
1511                 break;
1512         }
1513         spin_unlock_irqrestore(hba->host->host_lock, flags);
1514 out:
1515         return rc;
1516 }
1517 EXPORT_SYMBOL_GPL(ufshcd_hold);
1518
1519 static void ufshcd_gate_work(struct work_struct *work)
1520 {
1521         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1522                         clk_gating.gate_work.work);
1523         unsigned long flags;
1524
1525         spin_lock_irqsave(hba->host->host_lock, flags);
1526         /*
1527          * In case you are here to cancel this work the gating state
1528          * would be marked as REQ_CLKS_ON. In this case save time by
1529          * skipping the gating work and exit after changing the clock
1530          * state to CLKS_ON.
1531          */
1532         if (hba->clk_gating.is_suspended ||
1533                 (hba->clk_gating.state == REQ_CLKS_ON)) {
1534                 hba->clk_gating.state = CLKS_ON;
1535                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1536                                         hba->clk_gating.state);
1537                 goto rel_lock;
1538         }
1539
1540         if (hba->clk_gating.active_reqs
1541                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1542                 || hba->lrb_in_use || hba->outstanding_tasks
1543                 || hba->active_uic_cmd || hba->uic_async_done)
1544                 goto rel_lock;
1545
1546         spin_unlock_irqrestore(hba->host->host_lock, flags);
1547
1548         /* put the link into hibern8 mode before turning off clocks */
1549         if (ufshcd_can_hibern8_during_gating(hba)) {
1550                 if (ufshcd_uic_hibern8_enter(hba)) {
1551                         hba->clk_gating.state = CLKS_ON;
1552                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1553                                                 hba->clk_gating.state);
1554                         goto out;
1555                 }
1556                 ufshcd_set_link_hibern8(hba);
1557         }
1558
1559         if (!ufshcd_is_link_active(hba))
1560                 ufshcd_setup_clocks(hba, false);
1561         else
1562                 /* If link is active, device ref_clk can't be switched off */
1563                 __ufshcd_setup_clocks(hba, false, true);
1564
1565         /*
1566          * In case you are here to cancel this work the gating state
1567          * would be marked as REQ_CLKS_ON. In this case keep the state
1568          * as REQ_CLKS_ON which would anyway imply that clocks are off
1569          * and a request to turn them on is pending. By doing this way,
1570          * we keep the state machine in tact and this would ultimately
1571          * prevent from doing cancel work multiple times when there are
1572          * new requests arriving before the current cancel work is done.
1573          */
1574         spin_lock_irqsave(hba->host->host_lock, flags);
1575         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1576                 hba->clk_gating.state = CLKS_OFF;
1577                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1578                                         hba->clk_gating.state);
1579         }
1580 rel_lock:
1581         spin_unlock_irqrestore(hba->host->host_lock, flags);
1582 out:
1583         return;
1584 }
1585
1586 /* host lock must be held before calling this variant */
1587 static void __ufshcd_release(struct ufs_hba *hba)
1588 {
1589         if (!ufshcd_is_clkgating_allowed(hba))
1590                 return;
1591
1592         hba->clk_gating.active_reqs--;
1593
1594         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1595                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1596                 || hba->lrb_in_use || hba->outstanding_tasks
1597                 || hba->active_uic_cmd || hba->uic_async_done
1598                 || ufshcd_eh_in_progress(hba))
1599                 return;
1600
1601         hba->clk_gating.state = REQ_CLKS_OFF;
1602         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1603         schedule_delayed_work(&hba->clk_gating.gate_work,
1604                         msecs_to_jiffies(hba->clk_gating.delay_ms));
1605 }
1606
1607 void ufshcd_release(struct ufs_hba *hba)
1608 {
1609         unsigned long flags;
1610
1611         spin_lock_irqsave(hba->host->host_lock, flags);
1612         __ufshcd_release(hba);
1613         spin_unlock_irqrestore(hba->host->host_lock, flags);
1614 }
1615 EXPORT_SYMBOL_GPL(ufshcd_release);
1616
1617 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1618                 struct device_attribute *attr, char *buf)
1619 {
1620         struct ufs_hba *hba = dev_get_drvdata(dev);
1621
1622         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1623 }
1624
1625 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1626                 struct device_attribute *attr, const char *buf, size_t count)
1627 {
1628         struct ufs_hba *hba = dev_get_drvdata(dev);
1629         unsigned long flags, value;
1630
1631         if (kstrtoul(buf, 0, &value))
1632                 return -EINVAL;
1633
1634         spin_lock_irqsave(hba->host->host_lock, flags);
1635         hba->clk_gating.delay_ms = value;
1636         spin_unlock_irqrestore(hba->host->host_lock, flags);
1637         return count;
1638 }
1639
1640 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1641                 struct device_attribute *attr, char *buf)
1642 {
1643         struct ufs_hba *hba = dev_get_drvdata(dev);
1644
1645         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1646 }
1647
1648 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1649                 struct device_attribute *attr, const char *buf, size_t count)
1650 {
1651         struct ufs_hba *hba = dev_get_drvdata(dev);
1652         unsigned long flags;
1653         u32 value;
1654
1655         if (kstrtou32(buf, 0, &value))
1656                 return -EINVAL;
1657
1658         value = !!value;
1659         if (value == hba->clk_gating.is_enabled)
1660                 goto out;
1661
1662         if (value) {
1663                 ufshcd_release(hba);
1664         } else {
1665                 spin_lock_irqsave(hba->host->host_lock, flags);
1666                 hba->clk_gating.active_reqs++;
1667                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1668         }
1669
1670         hba->clk_gating.is_enabled = value;
1671 out:
1672         return count;
1673 }
1674
1675 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1676 {
1677         if (!ufshcd_is_clkgating_allowed(hba))
1678                 return;
1679
1680         hba->clk_gating.delay_ms = 150;
1681         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1682         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1683
1684         hba->clk_gating.is_enabled = true;
1685
1686         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1687         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1688         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1689         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1690         hba->clk_gating.delay_attr.attr.mode = 0644;
1691         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1692                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1693
1694         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1695         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1696         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1697         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1698         hba->clk_gating.enable_attr.attr.mode = 0644;
1699         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1700                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1701 }
1702
1703 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1704 {
1705         if (!ufshcd_is_clkgating_allowed(hba))
1706                 return;
1707         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1708         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1709         cancel_work_sync(&hba->clk_gating.ungate_work);
1710         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1711 }
1712
1713 /* Must be called with host lock acquired */
1714 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1715 {
1716         bool queue_resume_work = false;
1717
1718         if (!ufshcd_is_clkscaling_supported(hba))
1719                 return;
1720
1721         if (!hba->clk_scaling.active_reqs++)
1722                 queue_resume_work = true;
1723
1724         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1725                 return;
1726
1727         if (queue_resume_work)
1728                 queue_work(hba->clk_scaling.workq,
1729                            &hba->clk_scaling.resume_work);
1730
1731         if (!hba->clk_scaling.window_start_t) {
1732                 hba->clk_scaling.window_start_t = jiffies;
1733                 hba->clk_scaling.tot_busy_t = 0;
1734                 hba->clk_scaling.is_busy_started = false;
1735         }
1736
1737         if (!hba->clk_scaling.is_busy_started) {
1738                 hba->clk_scaling.busy_start_t = ktime_get();
1739                 hba->clk_scaling.is_busy_started = true;
1740         }
1741 }
1742
1743 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1744 {
1745         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1746
1747         if (!ufshcd_is_clkscaling_supported(hba))
1748                 return;
1749
1750         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1751                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1752                                         scaling->busy_start_t));
1753                 scaling->busy_start_t = 0;
1754                 scaling->is_busy_started = false;
1755         }
1756 }
1757 /**
1758  * ufshcd_send_command - Send SCSI or device management commands
1759  * @hba: per adapter instance
1760  * @task_tag: Task tag of the command
1761  */
1762 static inline
1763 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1764 {
1765         hba->lrb[task_tag].issue_time_stamp = ktime_get();
1766         ufshcd_clk_scaling_start_busy(hba);
1767         __set_bit(task_tag, &hba->outstanding_reqs);
1768         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1769         /* Make sure that doorbell is committed immediately */
1770         wmb();
1771         ufshcd_add_command_trace(hba, task_tag, "send");
1772 }
1773
1774 /**
1775  * ufshcd_copy_sense_data - Copy sense data in case of check condition
1776  * @lrb - pointer to local reference block
1777  */
1778 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1779 {
1780         int len;
1781         if (lrbp->sense_buffer &&
1782             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1783                 int len_to_copy;
1784
1785                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1786                 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1787
1788                 memcpy(lrbp->sense_buffer,
1789                         lrbp->ucd_rsp_ptr->sr.sense_data,
1790                         min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1791         }
1792 }
1793
1794 /**
1795  * ufshcd_copy_query_response() - Copy the Query Response and the data
1796  * descriptor
1797  * @hba: per adapter instance
1798  * @lrb - pointer to local reference block
1799  */
1800 static
1801 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1802 {
1803         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1804
1805         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1806
1807         /* Get the descriptor */
1808         if (hba->dev_cmd.query.descriptor &&
1809             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1810                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1811                                 GENERAL_UPIU_REQUEST_SIZE;
1812                 u16 resp_len;
1813                 u16 buf_len;
1814
1815                 /* data segment length */
1816                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1817                                                 MASK_QUERY_DATA_SEG_LEN;
1818                 buf_len = be16_to_cpu(
1819                                 hba->dev_cmd.query.request.upiu_req.length);
1820                 if (likely(buf_len >= resp_len)) {
1821                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1822                 } else {
1823                         dev_warn(hba->dev,
1824                                 "%s: Response size is bigger than buffer",
1825                                 __func__);
1826                         return -EINVAL;
1827                 }
1828         }
1829
1830         return 0;
1831 }
1832
1833 /**
1834  * ufshcd_hba_capabilities - Read controller capabilities
1835  * @hba: per adapter instance
1836  */
1837 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1838 {
1839         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1840
1841         /* nutrs and nutmrs are 0 based values */
1842         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1843         hba->nutmrs =
1844         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1845 }
1846
1847 /**
1848  * ufshcd_ready_for_uic_cmd - Check if controller is ready
1849  *                            to accept UIC commands
1850  * @hba: per adapter instance
1851  * Return true on success, else false
1852  */
1853 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1854 {
1855         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1856                 return true;
1857         else
1858                 return false;
1859 }
1860
1861 /**
1862  * ufshcd_get_upmcrs - Get the power mode change request status
1863  * @hba: Pointer to adapter instance
1864  *
1865  * This function gets the UPMCRS field of HCS register
1866  * Returns value of UPMCRS field
1867  */
1868 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1869 {
1870         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1871 }
1872
1873 /**
1874  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1875  * @hba: per adapter instance
1876  * @uic_cmd: UIC command
1877  *
1878  * Mutex must be held.
1879  */
1880 static inline void
1881 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1882 {
1883         WARN_ON(hba->active_uic_cmd);
1884
1885         hba->active_uic_cmd = uic_cmd;
1886
1887         /* Write Args */
1888         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1889         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1890         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1891
1892         /* Write UIC Cmd */
1893         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1894                       REG_UIC_COMMAND);
1895 }
1896
1897 /**
1898  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1899  * @hba: per adapter instance
1900  * @uic_command: UIC command
1901  *
1902  * Must be called with mutex held.
1903  * Returns 0 only if success.
1904  */
1905 static int
1906 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1907 {
1908         int ret;
1909         unsigned long flags;
1910
1911         if (wait_for_completion_timeout(&uic_cmd->done,
1912                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1913                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1914         else
1915                 ret = -ETIMEDOUT;
1916
1917         spin_lock_irqsave(hba->host->host_lock, flags);
1918         hba->active_uic_cmd = NULL;
1919         spin_unlock_irqrestore(hba->host->host_lock, flags);
1920
1921         return ret;
1922 }
1923
1924 /**
1925  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1926  * @hba: per adapter instance
1927  * @uic_cmd: UIC command
1928  * @completion: initialize the completion only if this is set to true
1929  *
1930  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
1931  * with mutex held and host_lock locked.
1932  * Returns 0 only if success.
1933  */
1934 static int
1935 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1936                       bool completion)
1937 {
1938         if (!ufshcd_ready_for_uic_cmd(hba)) {
1939                 dev_err(hba->dev,
1940                         "Controller not ready to accept UIC commands\n");
1941                 return -EIO;
1942         }
1943
1944         if (completion)
1945                 init_completion(&uic_cmd->done);
1946
1947         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
1948
1949         return 0;
1950 }
1951
1952 /**
1953  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1954  * @hba: per adapter instance
1955  * @uic_cmd: UIC command
1956  *
1957  * Returns 0 only if success.
1958  */
1959 static int
1960 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1961 {
1962         int ret;
1963         unsigned long flags;
1964
1965         ufshcd_hold(hba, false);
1966         mutex_lock(&hba->uic_cmd_mutex);
1967         ufshcd_add_delay_before_dme_cmd(hba);
1968
1969         spin_lock_irqsave(hba->host->host_lock, flags);
1970         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
1971         spin_unlock_irqrestore(hba->host->host_lock, flags);
1972         if (!ret)
1973                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1974
1975         mutex_unlock(&hba->uic_cmd_mutex);
1976
1977         ufshcd_release(hba);
1978         return ret;
1979 }
1980
1981 /**
1982  * ufshcd_map_sg - Map scatter-gather list to prdt
1983  * @lrbp - pointer to local reference block
1984  *
1985  * Returns 0 in case of success, non-zero value in case of failure
1986  */
1987 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1988 {
1989         struct ufshcd_sg_entry *prd_table;
1990         struct scatterlist *sg;
1991         struct scsi_cmnd *cmd;
1992         int sg_segments;
1993         int i;
1994
1995         cmd = lrbp->cmd;
1996         sg_segments = scsi_dma_map(cmd);
1997         if (sg_segments < 0)
1998                 return sg_segments;
1999
2000         if (sg_segments) {
2001                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2002                         lrbp->utr_descriptor_ptr->prd_table_length =
2003                                 cpu_to_le16((u16)(sg_segments *
2004                                         sizeof(struct ufshcd_sg_entry)));
2005                 else
2006                         lrbp->utr_descriptor_ptr->prd_table_length =
2007                                 cpu_to_le16((u16) (sg_segments));
2008
2009                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2010
2011                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2012                         prd_table[i].size  =
2013                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2014                         prd_table[i].base_addr =
2015                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2016                         prd_table[i].upper_addr =
2017                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2018                         prd_table[i].reserved = 0;
2019                 }
2020         } else {
2021                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2022         }
2023
2024         return 0;
2025 }
2026
2027 /**
2028  * ufshcd_enable_intr - enable interrupts
2029  * @hba: per adapter instance
2030  * @intrs: interrupt bits
2031  */
2032 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2033 {
2034         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2035
2036         if (hba->ufs_version == UFSHCI_VERSION_10) {
2037                 u32 rw;
2038                 rw = set & INTERRUPT_MASK_RW_VER_10;
2039                 set = rw | ((set ^ intrs) & intrs);
2040         } else {
2041                 set |= intrs;
2042         }
2043
2044         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2045 }
2046
2047 /**
2048  * ufshcd_disable_intr - disable interrupts
2049  * @hba: per adapter instance
2050  * @intrs: interrupt bits
2051  */
2052 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2053 {
2054         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2055
2056         if (hba->ufs_version == UFSHCI_VERSION_10) {
2057                 u32 rw;
2058                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2059                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2060                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2061
2062         } else {
2063                 set &= ~intrs;
2064         }
2065
2066         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2067 }
2068
2069 /**
2070  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2071  * descriptor according to request
2072  * @lrbp: pointer to local reference block
2073  * @upiu_flags: flags required in the header
2074  * @cmd_dir: requests data direction
2075  */
2076 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2077                         u32 *upiu_flags, enum dma_data_direction cmd_dir)
2078 {
2079         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2080         u32 data_direction;
2081         u32 dword_0;
2082
2083         if (cmd_dir == DMA_FROM_DEVICE) {
2084                 data_direction = UTP_DEVICE_TO_HOST;
2085                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2086         } else if (cmd_dir == DMA_TO_DEVICE) {
2087                 data_direction = UTP_HOST_TO_DEVICE;
2088                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2089         } else {
2090                 data_direction = UTP_NO_DATA_TRANSFER;
2091                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2092         }
2093
2094         dword_0 = data_direction | (lrbp->command_type
2095                                 << UPIU_COMMAND_TYPE_OFFSET);
2096         if (lrbp->intr_cmd)
2097                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2098
2099         /* Transfer request descriptor header fields */
2100         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2101         /* dword_1 is reserved, hence it is set to 0 */
2102         req_desc->header.dword_1 = 0;
2103         /*
2104          * assigning invalid value for command status. Controller
2105          * updates OCS on command completion, with the command
2106          * status
2107          */
2108         req_desc->header.dword_2 =
2109                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2110         /* dword_3 is reserved, hence it is set to 0 */
2111         req_desc->header.dword_3 = 0;
2112
2113         req_desc->prd_table_length = 0;
2114 }
2115
2116 /**
2117  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2118  * for scsi commands
2119  * @lrbp - local reference block pointer
2120  * @upiu_flags - flags
2121  */
2122 static
2123 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2124 {
2125         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2126         unsigned short cdb_len;
2127
2128         /* command descriptor fields */
2129         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2130                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2131                                 lrbp->lun, lrbp->task_tag);
2132         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2133                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2134
2135         /* Total EHS length and Data segment length will be zero */
2136         ucd_req_ptr->header.dword_2 = 0;
2137
2138         ucd_req_ptr->sc.exp_data_transfer_len =
2139                 cpu_to_be32(lrbp->cmd->sdb.length);
2140
2141         cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2142         memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2143         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2144
2145         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2146 }
2147
2148 /**
2149  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2150  * for query requsts
2151  * @hba: UFS hba
2152  * @lrbp: local reference block pointer
2153  * @upiu_flags: flags
2154  */
2155 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2156                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2157 {
2158         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2159         struct ufs_query *query = &hba->dev_cmd.query;
2160         u16 len = be16_to_cpu(query->request.upiu_req.length);
2161         u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2162
2163         /* Query request header */
2164         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2165                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2166                         lrbp->lun, lrbp->task_tag);
2167         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2168                         0, query->request.query_func, 0, 0);
2169
2170         /* Data segment length only need for WRITE_DESC */
2171         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2172                 ucd_req_ptr->header.dword_2 =
2173                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2174         else
2175                 ucd_req_ptr->header.dword_2 = 0;
2176
2177         /* Copy the Query Request buffer as is */
2178         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2179                         QUERY_OSF_SIZE);
2180
2181         /* Copy the Descriptor */
2182         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2183                 memcpy(descp, query->descriptor, len);
2184
2185         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2186 }
2187
2188 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2189 {
2190         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2191
2192         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2193
2194         /* command descriptor fields */
2195         ucd_req_ptr->header.dword_0 =
2196                 UPIU_HEADER_DWORD(
2197                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2198         /* clear rest of the fields of basic header */
2199         ucd_req_ptr->header.dword_1 = 0;
2200         ucd_req_ptr->header.dword_2 = 0;
2201
2202         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2203 }
2204
2205 /**
2206  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2207  *                           for Device Management Purposes
2208  * @hba - per adapter instance
2209  * @lrb - pointer to local reference block
2210  */
2211 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2212 {
2213         u32 upiu_flags;
2214         int ret = 0;
2215
2216         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2217             (hba->ufs_version == UFSHCI_VERSION_11))
2218                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2219         else
2220                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2221
2222         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2223         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2224                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2225         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2226                 ufshcd_prepare_utp_nop_upiu(lrbp);
2227         else
2228                 ret = -EINVAL;
2229
2230         return ret;
2231 }
2232
2233 /**
2234  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2235  *                         for SCSI Purposes
2236  * @hba - per adapter instance
2237  * @lrb - pointer to local reference block
2238  */
2239 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2240 {
2241         u32 upiu_flags;
2242         int ret = 0;
2243
2244         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2245             (hba->ufs_version == UFSHCI_VERSION_11))
2246                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2247         else
2248                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2249
2250         if (likely(lrbp->cmd)) {
2251                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2252                                                 lrbp->cmd->sc_data_direction);
2253                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2254         } else {
2255                 ret = -EINVAL;
2256         }
2257
2258         return ret;
2259 }
2260
2261 /*
2262  * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2263  * @scsi_lun: scsi LUN id
2264  *
2265  * Returns UPIU LUN id
2266  */
2267 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2268 {
2269         if (scsi_is_wlun(scsi_lun))
2270                 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2271                         | UFS_UPIU_WLUN_ID;
2272         else
2273                 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2274 }
2275
2276 /**
2277  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2278  * @scsi_lun: UPIU W-LUN id
2279  *
2280  * Returns SCSI W-LUN id
2281  */
2282 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2283 {
2284         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2285 }
2286
2287 /**
2288  * ufshcd_queuecommand - main entry point for SCSI requests
2289  * @cmd: command from SCSI Midlayer
2290  * @done: call back function
2291  *
2292  * Returns 0 for success, non-zero in case of failure
2293  */
2294 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2295 {
2296         struct ufshcd_lrb *lrbp;
2297         struct ufs_hba *hba;
2298         unsigned long flags;
2299         int tag;
2300         int err = 0;
2301
2302         hba = shost_priv(host);
2303
2304         tag = cmd->request->tag;
2305         if (!ufshcd_valid_tag(hba, tag)) {
2306                 dev_err(hba->dev,
2307                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2308                         __func__, tag, cmd, cmd->request);
2309                 BUG();
2310         }
2311
2312         if (!down_read_trylock(&hba->clk_scaling_lock))
2313                 return SCSI_MLQUEUE_HOST_BUSY;
2314
2315         spin_lock_irqsave(hba->host->host_lock, flags);
2316         switch (hba->ufshcd_state) {
2317         case UFSHCD_STATE_OPERATIONAL:
2318                 break;
2319         case UFSHCD_STATE_EH_SCHEDULED:
2320         case UFSHCD_STATE_RESET:
2321                 err = SCSI_MLQUEUE_HOST_BUSY;
2322                 goto out_unlock;
2323         case UFSHCD_STATE_ERROR:
2324                 set_host_byte(cmd, DID_ERROR);
2325                 cmd->scsi_done(cmd);
2326                 goto out_unlock;
2327         default:
2328                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2329                                 __func__, hba->ufshcd_state);
2330                 set_host_byte(cmd, DID_BAD_TARGET);
2331                 cmd->scsi_done(cmd);
2332                 goto out_unlock;
2333         }
2334
2335         /* if error handling is in progress, don't issue commands */
2336         if (ufshcd_eh_in_progress(hba)) {
2337                 set_host_byte(cmd, DID_ERROR);
2338                 cmd->scsi_done(cmd);
2339                 goto out_unlock;
2340         }
2341         spin_unlock_irqrestore(hba->host->host_lock, flags);
2342
2343         hba->req_abort_count = 0;
2344
2345         /* acquire the tag to make sure device cmds don't use it */
2346         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2347                 /*
2348                  * Dev manage command in progress, requeue the command.
2349                  * Requeuing the command helps in cases where the request *may*
2350                  * find different tag instead of waiting for dev manage command
2351                  * completion.
2352                  */
2353                 err = SCSI_MLQUEUE_HOST_BUSY;
2354                 goto out;
2355         }
2356
2357         err = ufshcd_hold(hba, true);
2358         if (err) {
2359                 err = SCSI_MLQUEUE_HOST_BUSY;
2360                 clear_bit_unlock(tag, &hba->lrb_in_use);
2361                 goto out;
2362         }
2363         WARN_ON(hba->clk_gating.state != CLKS_ON);
2364
2365         lrbp = &hba->lrb[tag];
2366
2367         WARN_ON(lrbp->cmd);
2368         lrbp->cmd = cmd;
2369         lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2370         lrbp->sense_buffer = cmd->sense_buffer;
2371         lrbp->task_tag = tag;
2372         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2373         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2374         lrbp->req_abort_skip = false;
2375
2376         ufshcd_comp_scsi_upiu(hba, lrbp);
2377
2378         err = ufshcd_map_sg(hba, lrbp);
2379         if (err) {
2380                 ufshcd_release(hba);
2381                 lrbp->cmd = NULL;
2382                 clear_bit_unlock(tag, &hba->lrb_in_use);
2383                 goto out;
2384         }
2385         /* Make sure descriptors are ready before ringing the doorbell */
2386         wmb();
2387
2388         /* issue command to the controller */
2389         spin_lock_irqsave(hba->host->host_lock, flags);
2390         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2391         ufshcd_send_command(hba, tag);
2392 out_unlock:
2393         spin_unlock_irqrestore(hba->host->host_lock, flags);
2394 out:
2395         up_read(&hba->clk_scaling_lock);
2396         return err;
2397 }
2398
2399 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2400                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2401 {
2402         lrbp->cmd = NULL;
2403         lrbp->sense_bufflen = 0;
2404         lrbp->sense_buffer = NULL;
2405         lrbp->task_tag = tag;
2406         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2407         lrbp->intr_cmd = true; /* No interrupt aggregation */
2408         hba->dev_cmd.type = cmd_type;
2409
2410         return ufshcd_comp_devman_upiu(hba, lrbp);
2411 }
2412
2413 static int
2414 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2415 {
2416         int err = 0;
2417         unsigned long flags;
2418         u32 mask = 1 << tag;
2419
2420         /* clear outstanding transaction before retry */
2421         spin_lock_irqsave(hba->host->host_lock, flags);
2422         ufshcd_utrl_clear(hba, tag);
2423         spin_unlock_irqrestore(hba->host->host_lock, flags);
2424
2425         /*
2426          * wait for for h/w to clear corresponding bit in door-bell.
2427          * max. wait is 1 sec.
2428          */
2429         err = ufshcd_wait_for_register(hba,
2430                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2431                         mask, ~mask, 1000, 1000, true);
2432
2433         return err;
2434 }
2435
2436 static int
2437 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2438 {
2439         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2440
2441         /* Get the UPIU response */
2442         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2443                                 UPIU_RSP_CODE_OFFSET;
2444         return query_res->response;
2445 }
2446
2447 /**
2448  * ufshcd_dev_cmd_completion() - handles device management command responses
2449  * @hba: per adapter instance
2450  * @lrbp: pointer to local reference block
2451  */
2452 static int
2453 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2454 {
2455         int resp;
2456         int err = 0;
2457
2458         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2459         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2460
2461         switch (resp) {
2462         case UPIU_TRANSACTION_NOP_IN:
2463                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2464                         err = -EINVAL;
2465                         dev_err(hba->dev, "%s: unexpected response %x\n",
2466                                         __func__, resp);
2467                 }
2468                 break;
2469         case UPIU_TRANSACTION_QUERY_RSP:
2470                 err = ufshcd_check_query_response(hba, lrbp);
2471                 if (!err)
2472                         err = ufshcd_copy_query_response(hba, lrbp);
2473                 break;
2474         case UPIU_TRANSACTION_REJECT_UPIU:
2475                 /* TODO: handle Reject UPIU Response */
2476                 err = -EPERM;
2477                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2478                                 __func__);
2479                 break;
2480         default:
2481                 err = -EINVAL;
2482                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2483                                 __func__, resp);
2484                 break;
2485         }
2486
2487         return err;
2488 }
2489
2490 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2491                 struct ufshcd_lrb *lrbp, int max_timeout)
2492 {
2493         int err = 0;
2494         unsigned long time_left;
2495         unsigned long flags;
2496
2497         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2498                         msecs_to_jiffies(max_timeout));
2499
2500         /* Make sure descriptors are ready before ringing the doorbell */
2501         wmb();
2502         spin_lock_irqsave(hba->host->host_lock, flags);
2503         hba->dev_cmd.complete = NULL;
2504         if (likely(time_left)) {
2505                 err = ufshcd_get_tr_ocs(lrbp);
2506                 if (!err)
2507                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2508         }
2509         spin_unlock_irqrestore(hba->host->host_lock, flags);
2510
2511         if (!time_left) {
2512                 err = -ETIMEDOUT;
2513                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2514                         __func__, lrbp->task_tag);
2515                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2516                         /* successfully cleared the command, retry if needed */
2517                         err = -EAGAIN;
2518                 /*
2519                  * in case of an error, after clearing the doorbell,
2520                  * we also need to clear the outstanding_request
2521                  * field in hba
2522                  */
2523                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2524         }
2525
2526         return err;
2527 }
2528
2529 /**
2530  * ufshcd_get_dev_cmd_tag - Get device management command tag
2531  * @hba: per-adapter instance
2532  * @tag: pointer to variable with available slot value
2533  *
2534  * Get a free slot and lock it until device management command
2535  * completes.
2536  *
2537  * Returns false if free slot is unavailable for locking, else
2538  * return true with tag value in @tag.
2539  */
2540 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2541 {
2542         int tag;
2543         bool ret = false;
2544         unsigned long tmp;
2545
2546         if (!tag_out)
2547                 goto out;
2548
2549         do {
2550                 tmp = ~hba->lrb_in_use;
2551                 tag = find_last_bit(&tmp, hba->nutrs);
2552                 if (tag >= hba->nutrs)
2553                         goto out;
2554         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2555
2556         *tag_out = tag;
2557         ret = true;
2558 out:
2559         return ret;
2560 }
2561
2562 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2563 {
2564         clear_bit_unlock(tag, &hba->lrb_in_use);
2565 }
2566
2567 /**
2568  * ufshcd_exec_dev_cmd - API for sending device management requests
2569  * @hba - UFS hba
2570  * @cmd_type - specifies the type (NOP, Query...)
2571  * @timeout - time in seconds
2572  *
2573  * NOTE: Since there is only one available tag for device management commands,
2574  * it is expected you hold the hba->dev_cmd.lock mutex.
2575  */
2576 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2577                 enum dev_cmd_type cmd_type, int timeout)
2578 {
2579         struct ufshcd_lrb *lrbp;
2580         int err;
2581         int tag;
2582         struct completion wait;
2583         unsigned long flags;
2584
2585         down_read(&hba->clk_scaling_lock);
2586
2587         /*
2588          * Get free slot, sleep if slots are unavailable.
2589          * Even though we use wait_event() which sleeps indefinitely,
2590          * the maximum wait time is bounded by SCSI request timeout.
2591          */
2592         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2593
2594         init_completion(&wait);
2595         lrbp = &hba->lrb[tag];
2596         WARN_ON(lrbp->cmd);
2597         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2598         if (unlikely(err))
2599                 goto out_put_tag;
2600
2601         hba->dev_cmd.complete = &wait;
2602
2603         /* Make sure descriptors are ready before ringing the doorbell */
2604         wmb();
2605         spin_lock_irqsave(hba->host->host_lock, flags);
2606         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2607         ufshcd_send_command(hba, tag);
2608         spin_unlock_irqrestore(hba->host->host_lock, flags);
2609
2610         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2611
2612 out_put_tag:
2613         ufshcd_put_dev_cmd_tag(hba, tag);
2614         wake_up(&hba->dev_cmd.tag_wq);
2615         up_read(&hba->clk_scaling_lock);
2616         return err;
2617 }
2618
2619 /**
2620  * ufshcd_init_query() - init the query response and request parameters
2621  * @hba: per-adapter instance
2622  * @request: address of the request pointer to be initialized
2623  * @response: address of the response pointer to be initialized
2624  * @opcode: operation to perform
2625  * @idn: flag idn to access
2626  * @index: LU number to access
2627  * @selector: query/flag/descriptor further identification
2628  */
2629 static inline void ufshcd_init_query(struct ufs_hba *hba,
2630                 struct ufs_query_req **request, struct ufs_query_res **response,
2631                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2632 {
2633         *request = &hba->dev_cmd.query.request;
2634         *response = &hba->dev_cmd.query.response;
2635         memset(*request, 0, sizeof(struct ufs_query_req));
2636         memset(*response, 0, sizeof(struct ufs_query_res));
2637         (*request)->upiu_req.opcode = opcode;
2638         (*request)->upiu_req.idn = idn;
2639         (*request)->upiu_req.index = index;
2640         (*request)->upiu_req.selector = selector;
2641 }
2642
2643 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2644         enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2645 {
2646         int ret;
2647         int retries;
2648
2649         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2650                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2651                 if (ret)
2652                         dev_dbg(hba->dev,
2653                                 "%s: failed with error %d, retries %d\n",
2654                                 __func__, ret, retries);
2655                 else
2656                         break;
2657         }
2658
2659         if (ret)
2660                 dev_err(hba->dev,
2661                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2662                         __func__, opcode, idn, ret, retries);
2663         return ret;
2664 }
2665
2666 /**
2667  * ufshcd_query_flag() - API function for sending flag query requests
2668  * hba: per-adapter instance
2669  * query_opcode: flag query to perform
2670  * idn: flag idn to access
2671  * flag_res: the flag value after the query request completes
2672  *
2673  * Returns 0 for success, non-zero in case of failure
2674  */
2675 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2676                         enum flag_idn idn, bool *flag_res)
2677 {
2678         struct ufs_query_req *request = NULL;
2679         struct ufs_query_res *response = NULL;
2680         int err, index = 0, selector = 0;
2681         int timeout = QUERY_REQ_TIMEOUT;
2682
2683         BUG_ON(!hba);
2684
2685         ufshcd_hold(hba, false);
2686         mutex_lock(&hba->dev_cmd.lock);
2687         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2688                         selector);
2689
2690         switch (opcode) {
2691         case UPIU_QUERY_OPCODE_SET_FLAG:
2692         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2693         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2694                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2695                 break;
2696         case UPIU_QUERY_OPCODE_READ_FLAG:
2697                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2698                 if (!flag_res) {
2699                         /* No dummy reads */
2700                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2701                                         __func__);
2702                         err = -EINVAL;
2703                         goto out_unlock;
2704                 }
2705                 break;
2706         default:
2707                 dev_err(hba->dev,
2708                         "%s: Expected query flag opcode but got = %d\n",
2709                         __func__, opcode);
2710                 err = -EINVAL;
2711                 goto out_unlock;
2712         }
2713
2714         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2715
2716         if (err) {
2717                 dev_err(hba->dev,
2718                         "%s: Sending flag query for idn %d failed, err = %d\n",
2719                         __func__, idn, err);
2720                 goto out_unlock;
2721         }
2722
2723         if (flag_res)
2724                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2725                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2726
2727 out_unlock:
2728         mutex_unlock(&hba->dev_cmd.lock);
2729         ufshcd_release(hba);
2730         return err;
2731 }
2732
2733 /**
2734  * ufshcd_query_attr - API function for sending attribute requests
2735  * hba: per-adapter instance
2736  * opcode: attribute opcode
2737  * idn: attribute idn to access
2738  * index: index field
2739  * selector: selector field
2740  * attr_val: the attribute value after the query request completes
2741  *
2742  * Returns 0 for success, non-zero in case of failure
2743 */
2744 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2745                         enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2746 {
2747         struct ufs_query_req *request = NULL;
2748         struct ufs_query_res *response = NULL;
2749         int err;
2750
2751         BUG_ON(!hba);
2752
2753         ufshcd_hold(hba, false);
2754         if (!attr_val) {
2755                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2756                                 __func__, opcode);
2757                 err = -EINVAL;
2758                 goto out;
2759         }
2760
2761         mutex_lock(&hba->dev_cmd.lock);
2762         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2763                         selector);
2764
2765         switch (opcode) {
2766         case UPIU_QUERY_OPCODE_WRITE_ATTR:
2767                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2768                 request->upiu_req.value = cpu_to_be32(*attr_val);
2769                 break;
2770         case UPIU_QUERY_OPCODE_READ_ATTR:
2771                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2772                 break;
2773         default:
2774                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2775                                 __func__, opcode);
2776                 err = -EINVAL;
2777                 goto out_unlock;
2778         }
2779
2780         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2781
2782         if (err) {
2783                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2784                                 __func__, opcode, idn, index, err);
2785                 goto out_unlock;
2786         }
2787
2788         *attr_val = be32_to_cpu(response->upiu_res.value);
2789
2790 out_unlock:
2791         mutex_unlock(&hba->dev_cmd.lock);
2792 out:
2793         ufshcd_release(hba);
2794         return err;
2795 }
2796
2797 /**
2798  * ufshcd_query_attr_retry() - API function for sending query
2799  * attribute with retries
2800  * @hba: per-adapter instance
2801  * @opcode: attribute opcode
2802  * @idn: attribute idn to access
2803  * @index: index field
2804  * @selector: selector field
2805  * @attr_val: the attribute value after the query request
2806  * completes
2807  *
2808  * Returns 0 for success, non-zero in case of failure
2809 */
2810 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2811         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2812         u32 *attr_val)
2813 {
2814         int ret = 0;
2815         u32 retries;
2816
2817          for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2818                 ret = ufshcd_query_attr(hba, opcode, idn, index,
2819                                                 selector, attr_val);
2820                 if (ret)
2821                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2822                                 __func__, ret, retries);
2823                 else
2824                         break;
2825         }
2826
2827         if (ret)
2828                 dev_err(hba->dev,
2829                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2830                         __func__, idn, ret, QUERY_REQ_RETRIES);
2831         return ret;
2832 }
2833
2834 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2835                         enum query_opcode opcode, enum desc_idn idn, u8 index,
2836                         u8 selector, u8 *desc_buf, int *buf_len)
2837 {
2838         struct ufs_query_req *request = NULL;
2839         struct ufs_query_res *response = NULL;
2840         int err;
2841
2842         BUG_ON(!hba);
2843
2844         ufshcd_hold(hba, false);
2845         if (!desc_buf) {
2846                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2847                                 __func__, opcode);
2848                 err = -EINVAL;
2849                 goto out;
2850         }
2851
2852         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2853                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2854                                 __func__, *buf_len);
2855                 err = -EINVAL;
2856                 goto out;
2857         }
2858
2859         mutex_lock(&hba->dev_cmd.lock);
2860         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2861                         selector);
2862         hba->dev_cmd.query.descriptor = desc_buf;
2863         request->upiu_req.length = cpu_to_be16(*buf_len);
2864
2865         switch (opcode) {
2866         case UPIU_QUERY_OPCODE_WRITE_DESC:
2867                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2868                 break;
2869         case UPIU_QUERY_OPCODE_READ_DESC:
2870                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2871                 break;
2872         default:
2873                 dev_err(hba->dev,
2874                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2875                                 __func__, opcode);
2876                 err = -EINVAL;
2877                 goto out_unlock;
2878         }
2879
2880         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2881
2882         if (err) {
2883                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2884                                 __func__, opcode, idn, index, err);
2885                 goto out_unlock;
2886         }
2887
2888         *buf_len = be16_to_cpu(response->upiu_res.length);
2889
2890 out_unlock:
2891         hba->dev_cmd.query.descriptor = NULL;
2892         mutex_unlock(&hba->dev_cmd.lock);
2893 out:
2894         ufshcd_release(hba);
2895         return err;
2896 }
2897
2898 /**
2899  * ufshcd_query_descriptor_retry - API function for sending descriptor
2900  * requests
2901  * hba: per-adapter instance
2902  * opcode: attribute opcode
2903  * idn: attribute idn to access
2904  * index: index field
2905  * selector: selector field
2906  * desc_buf: the buffer that contains the descriptor
2907  * buf_len: length parameter passed to the device
2908  *
2909  * Returns 0 for success, non-zero in case of failure.
2910  * The buf_len parameter will contain, on return, the length parameter
2911  * received on the response.
2912  */
2913 static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2914                                          enum query_opcode opcode,
2915                                          enum desc_idn idn, u8 index,
2916                                          u8 selector,
2917                                          u8 *desc_buf, int *buf_len)
2918 {
2919         int err;
2920         int retries;
2921
2922         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2923                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2924                                                 selector, desc_buf, buf_len);
2925                 if (!err || err == -EINVAL)
2926                         break;
2927         }
2928
2929         return err;
2930 }
2931
2932 /**
2933  * ufshcd_read_desc_length - read the specified descriptor length from header
2934  * @hba: Pointer to adapter instance
2935  * @desc_id: descriptor idn value
2936  * @desc_index: descriptor index
2937  * @desc_length: pointer to variable to read the length of descriptor
2938  *
2939  * Return 0 in case of success, non-zero otherwise
2940  */
2941 static int ufshcd_read_desc_length(struct ufs_hba *hba,
2942         enum desc_idn desc_id,
2943         int desc_index,
2944         int *desc_length)
2945 {
2946         int ret;
2947         u8 header[QUERY_DESC_HDR_SIZE];
2948         int header_len = QUERY_DESC_HDR_SIZE;
2949
2950         if (desc_id >= QUERY_DESC_IDN_MAX)
2951                 return -EINVAL;
2952
2953         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2954                                         desc_id, desc_index, 0, header,
2955                                         &header_len);
2956
2957         if (ret) {
2958                 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2959                         __func__, desc_id);
2960                 return ret;
2961         } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2962                 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2963                         __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2964                         desc_id);
2965                 ret = -EINVAL;
2966         }
2967
2968         *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2969         return ret;
2970
2971 }
2972
2973 /**
2974  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2975  * @hba: Pointer to adapter instance
2976  * @desc_id: descriptor idn value
2977  * @desc_len: mapped desc length (out)
2978  *
2979  * Return 0 in case of success, non-zero otherwise
2980  */
2981 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2982         enum desc_idn desc_id, int *desc_len)
2983 {
2984         switch (desc_id) {
2985         case QUERY_DESC_IDN_DEVICE:
2986                 *desc_len = hba->desc_size.dev_desc;
2987                 break;
2988         case QUERY_DESC_IDN_POWER:
2989                 *desc_len = hba->desc_size.pwr_desc;
2990                 break;
2991         case QUERY_DESC_IDN_GEOMETRY:
2992                 *desc_len = hba->desc_size.geom_desc;
2993                 break;
2994         case QUERY_DESC_IDN_CONFIGURATION:
2995                 *desc_len = hba->desc_size.conf_desc;
2996                 break;
2997         case QUERY_DESC_IDN_UNIT:
2998                 *desc_len = hba->desc_size.unit_desc;
2999                 break;
3000         case QUERY_DESC_IDN_INTERCONNECT:
3001                 *desc_len = hba->desc_size.interc_desc;
3002                 break;
3003         case QUERY_DESC_IDN_STRING:
3004                 *desc_len = QUERY_DESC_MAX_SIZE;
3005                 break;
3006         case QUERY_DESC_IDN_RFU_0:
3007         case QUERY_DESC_IDN_RFU_1:
3008                 *desc_len = 0;
3009                 break;
3010         default:
3011                 *desc_len = 0;
3012                 return -EINVAL;
3013         }
3014         return 0;
3015 }
3016 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3017
3018 /**
3019  * ufshcd_read_desc_param - read the specified descriptor parameter
3020  * @hba: Pointer to adapter instance
3021  * @desc_id: descriptor idn value
3022  * @desc_index: descriptor index
3023  * @param_offset: offset of the parameter to read
3024  * @param_read_buf: pointer to buffer where parameter would be read
3025  * @param_size: sizeof(param_read_buf)
3026  *
3027  * Return 0 in case of success, non-zero otherwise
3028  */
3029 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3030                                   enum desc_idn desc_id,
3031                                   int desc_index,
3032                                   u8 param_offset,
3033                                   u8 *param_read_buf,
3034                                   u8 param_size)
3035 {
3036         int ret;
3037         u8 *desc_buf;
3038         int buff_len;
3039         bool is_kmalloc = true;
3040
3041         /* Safety check */
3042         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3043                 return -EINVAL;
3044
3045         /* Get the max length of descriptor from structure filled up at probe
3046          * time.
3047          */
3048         ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3049
3050         /* Sanity checks */
3051         if (ret || !buff_len) {
3052                 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3053                         __func__);
3054                 return ret;
3055         }
3056
3057         /* Check whether we need temp memory */
3058         if (param_offset != 0 || param_size < buff_len) {
3059                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3060                 if (!desc_buf)
3061                         return -ENOMEM;
3062         } else {
3063                 desc_buf = param_read_buf;
3064                 is_kmalloc = false;
3065         }
3066
3067         /* Request for full descriptor */
3068         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3069                                         desc_id, desc_index, 0,
3070                                         desc_buf, &buff_len);
3071
3072         if (ret) {
3073                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3074                         __func__, desc_id, desc_index, param_offset, ret);
3075                 goto out;
3076         }
3077
3078         /* Sanity check */
3079         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3080                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3081                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3082                 ret = -EINVAL;
3083                 goto out;
3084         }
3085
3086         /* Check wherher we will not copy more data, than available */
3087         if (is_kmalloc && param_size > buff_len)
3088                 param_size = buff_len;
3089
3090         if (is_kmalloc)
3091                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3092 out:
3093         if (is_kmalloc)
3094                 kfree(desc_buf);
3095         return ret;
3096 }
3097
3098 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3099                                    enum desc_idn desc_id,
3100                                    int desc_index,
3101                                    u8 *buf,
3102                                    u32 size)
3103 {
3104         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3105 }
3106
3107 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3108                                          u8 *buf,
3109                                          u32 size)
3110 {
3111         return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3112 }
3113
3114 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3115 {
3116         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3117 }
3118
3119 /**
3120  * ufshcd_read_string_desc - read string descriptor
3121  * @hba: pointer to adapter instance
3122  * @desc_index: descriptor index
3123  * @buf: pointer to buffer where descriptor would be read
3124  * @size: size of buf
3125  * @ascii: if true convert from unicode to ascii characters
3126  *
3127  * Return 0 in case of success, non-zero otherwise
3128  */
3129 #define ASCII_STD true
3130 static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3131                                    u8 *buf, u32 size, bool ascii)
3132 {
3133         int err = 0;
3134
3135         err = ufshcd_read_desc(hba,
3136                                 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3137
3138         if (err) {
3139                 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3140                         __func__, QUERY_REQ_RETRIES, err);
3141                 goto out;
3142         }
3143
3144         if (ascii) {
3145                 int desc_len;
3146                 int ascii_len;
3147                 int i;
3148                 char *buff_ascii;
3149
3150                 desc_len = buf[0];
3151                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3152                 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3153                 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3154                         dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3155                                         __func__);
3156                         err = -ENOMEM;
3157                         goto out;
3158                 }
3159
3160                 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3161                 if (!buff_ascii) {
3162                         err = -ENOMEM;
3163                         goto out;
3164                 }
3165
3166                 /*
3167                  * the descriptor contains string in UTF16 format
3168                  * we need to convert to utf-8 so it can be displayed
3169                  */
3170                 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3171                                 desc_len - QUERY_DESC_HDR_SIZE,
3172                                 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3173
3174                 /* replace non-printable or non-ASCII characters with spaces */
3175                 for (i = 0; i < ascii_len; i++)
3176                         ufshcd_remove_non_printable(&buff_ascii[i]);
3177
3178                 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3179                                 size - QUERY_DESC_HDR_SIZE);
3180                 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3181                 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3182                 kfree(buff_ascii);
3183         }
3184 out:
3185         return err;
3186 }
3187
3188 /**
3189  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3190  * @hba: Pointer to adapter instance
3191  * @lun: lun id
3192  * @param_offset: offset of the parameter to read
3193  * @param_read_buf: pointer to buffer where parameter would be read
3194  * @param_size: sizeof(param_read_buf)
3195  *
3196  * Return 0 in case of success, non-zero otherwise
3197  */
3198 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3199                                               int lun,
3200                                               enum unit_desc_param param_offset,
3201                                               u8 *param_read_buf,
3202                                               u32 param_size)
3203 {
3204         /*
3205          * Unit descriptors are only available for general purpose LUs (LUN id
3206          * from 0 to 7) and RPMB Well known LU.
3207          */
3208         if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
3209                 return -EOPNOTSUPP;
3210
3211         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3212                                       param_offset, param_read_buf, param_size);
3213 }
3214
3215 /**
3216  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3217  * @hba: per adapter instance
3218  *
3219  * 1. Allocate DMA memory for Command Descriptor array
3220  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3221  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3222  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3223  *      (UTMRDL)
3224  * 4. Allocate memory for local reference block(lrb).
3225  *
3226  * Returns 0 for success, non-zero in case of failure
3227  */
3228 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3229 {
3230         size_t utmrdl_size, utrdl_size, ucdl_size;
3231
3232         /* Allocate memory for UTP command descriptors */
3233         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3234         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3235                                                   ucdl_size,
3236                                                   &hba->ucdl_dma_addr,
3237                                                   GFP_KERNEL);
3238
3239         /*
3240          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3241          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3242          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3243          * be aligned to 128 bytes as well
3244          */
3245         if (!hba->ucdl_base_addr ||
3246             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3247                 dev_err(hba->dev,
3248                         "Command Descriptor Memory allocation failed\n");
3249                 goto out;
3250         }
3251
3252         /*
3253          * Allocate memory for UTP Transfer descriptors
3254          * UFSHCI requires 1024 byte alignment of UTRD
3255          */
3256         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3257         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3258                                                    utrdl_size,
3259                                                    &hba->utrdl_dma_addr,
3260                                                    GFP_KERNEL);
3261         if (!hba->utrdl_base_addr ||
3262             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3263                 dev_err(hba->dev,
3264                         "Transfer Descriptor Memory allocation failed\n");
3265                 goto out;
3266         }
3267
3268         /*
3269          * Allocate memory for UTP Task Management descriptors
3270          * UFSHCI requires 1024 byte alignment of UTMRD
3271          */
3272         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3273         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3274                                                     utmrdl_size,
3275                                                     &hba->utmrdl_dma_addr,
3276                                                     GFP_KERNEL);
3277         if (!hba->utmrdl_base_addr ||
3278             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3279                 dev_err(hba->dev,
3280                 "Task Management Descriptor Memory allocation failed\n");
3281                 goto out;
3282         }
3283
3284         /* Allocate memory for local reference block */
3285         hba->lrb = devm_kzalloc(hba->dev,
3286                                 hba->nutrs * sizeof(struct ufshcd_lrb),
3287                                 GFP_KERNEL);
3288         if (!hba->lrb) {
3289                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3290                 goto out;
3291         }
3292         return 0;
3293 out:
3294         return -ENOMEM;
3295 }
3296
3297 /**
3298  * ufshcd_host_memory_configure - configure local reference block with
3299  *                              memory offsets
3300  * @hba: per adapter instance
3301  *
3302  * Configure Host memory space
3303  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3304  * address.
3305  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3306  * and PRDT offset.
3307  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3308  * into local reference block.
3309  */
3310 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3311 {
3312         struct utp_transfer_cmd_desc *cmd_descp;
3313         struct utp_transfer_req_desc *utrdlp;
3314         dma_addr_t cmd_desc_dma_addr;
3315         dma_addr_t cmd_desc_element_addr;
3316         u16 response_offset;
3317         u16 prdt_offset;
3318         int cmd_desc_size;
3319         int i;
3320
3321         utrdlp = hba->utrdl_base_addr;
3322         cmd_descp = hba->ucdl_base_addr;
3323
3324         response_offset =
3325                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3326         prdt_offset =
3327                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3328
3329         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3330         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3331
3332         for (i = 0; i < hba->nutrs; i++) {
3333                 /* Configure UTRD with command descriptor base address */
3334                 cmd_desc_element_addr =
3335                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3336                 utrdlp[i].command_desc_base_addr_lo =
3337                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3338                 utrdlp[i].command_desc_base_addr_hi =
3339                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3340
3341                 /* Response upiu and prdt offset should be in double words */
3342                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3343                         utrdlp[i].response_upiu_offset =
3344                                 cpu_to_le16(response_offset);
3345                         utrdlp[i].prd_table_offset =
3346                                 cpu_to_le16(prdt_offset);
3347                         utrdlp[i].response_upiu_length =
3348                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3349                 } else {
3350                         utrdlp[i].response_upiu_offset =
3351                                 cpu_to_le16((response_offset >> 2));
3352                         utrdlp[i].prd_table_offset =
3353                                 cpu_to_le16((prdt_offset >> 2));
3354                         utrdlp[i].response_upiu_length =
3355                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3356                 }
3357
3358                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3359                 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3360                                 (i * sizeof(struct utp_transfer_req_desc));
3361                 hba->lrb[i].ucd_req_ptr =
3362                         (struct utp_upiu_req *)(cmd_descp + i);
3363                 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3364                 hba->lrb[i].ucd_rsp_ptr =
3365                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3366                 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3367                                 response_offset;
3368                 hba->lrb[i].ucd_prdt_ptr =
3369                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3370                 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3371                                 prdt_offset;
3372         }
3373 }
3374
3375 /**
3376  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3377  * @hba: per adapter instance
3378  *
3379  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3380  * in order to initialize the Unipro link startup procedure.
3381  * Once the Unipro links are up, the device connected to the controller
3382  * is detected.
3383  *
3384  * Returns 0 on success, non-zero value on failure
3385  */
3386 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3387 {
3388         struct uic_command uic_cmd = {0};
3389         int ret;
3390
3391         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3392
3393         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3394         if (ret)
3395                 dev_dbg(hba->dev,
3396                         "dme-link-startup: error code %d\n", ret);
3397         return ret;
3398 }
3399
3400 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3401 {
3402         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3403         unsigned long min_sleep_time_us;
3404
3405         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3406                 return;
3407
3408         /*
3409          * last_dme_cmd_tstamp will be 0 only for 1st call to
3410          * this function
3411          */
3412         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3413                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3414         } else {
3415                 unsigned long delta =
3416                         (unsigned long) ktime_to_us(
3417                                 ktime_sub(ktime_get(),
3418                                 hba->last_dme_cmd_tstamp));
3419
3420                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3421                         min_sleep_time_us =
3422                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3423                 else
3424                         return; /* no more delay required */
3425         }
3426
3427         /* allow sleep for extra 50us if needed */
3428         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3429 }
3430
3431 /**
3432  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3433  * @hba: per adapter instance
3434  * @attr_sel: uic command argument1
3435  * @attr_set: attribute set type as uic command argument2
3436  * @mib_val: setting value as uic command argument3
3437  * @peer: indicate whether peer or local
3438  *
3439  * Returns 0 on success, non-zero value on failure
3440  */
3441 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3442                         u8 attr_set, u32 mib_val, u8 peer)
3443 {
3444         struct uic_command uic_cmd = {0};
3445         static const char *const action[] = {
3446                 "dme-set",
3447                 "dme-peer-set"
3448         };
3449         const char *set = action[!!peer];
3450         int ret;
3451         int retries = UFS_UIC_COMMAND_RETRIES;
3452
3453         uic_cmd.command = peer ?
3454                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3455         uic_cmd.argument1 = attr_sel;
3456         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3457         uic_cmd.argument3 = mib_val;
3458
3459         do {
3460                 /* for peer attributes we retry upon failure */
3461                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3462                 if (ret)
3463                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3464                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3465         } while (ret && peer && --retries);
3466
3467         if (ret)
3468                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3469                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3470                         UFS_UIC_COMMAND_RETRIES - retries);
3471
3472         return ret;
3473 }
3474 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3475
3476 /**
3477  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3478  * @hba: per adapter instance
3479  * @attr_sel: uic command argument1
3480  * @mib_val: the value of the attribute as returned by the UIC command
3481  * @peer: indicate whether peer or local
3482  *
3483  * Returns 0 on success, non-zero value on failure
3484  */
3485 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3486                         u32 *mib_val, u8 peer)
3487 {
3488         struct uic_command uic_cmd = {0};
3489         static const char *const action[] = {
3490                 "dme-get",
3491                 "dme-peer-get"
3492         };
3493         const char *get = action[!!peer];
3494         int ret;
3495         int retries = UFS_UIC_COMMAND_RETRIES;
3496         struct ufs_pa_layer_attr orig_pwr_info;
3497         struct ufs_pa_layer_attr temp_pwr_info;
3498         bool pwr_mode_change = false;
3499
3500         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3501                 orig_pwr_info = hba->pwr_info;
3502                 temp_pwr_info = orig_pwr_info;
3503
3504                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3505                     orig_pwr_info.pwr_rx == FAST_MODE) {
3506                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3507                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3508                         pwr_mode_change = true;
3509                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3510                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3511                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3512                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3513                         pwr_mode_change = true;
3514                 }
3515                 if (pwr_mode_change) {
3516                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3517                         if (ret)
3518                                 goto out;
3519                 }
3520         }
3521
3522         uic_cmd.command = peer ?
3523                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3524         uic_cmd.argument1 = attr_sel;
3525
3526         do {
3527                 /* for peer attributes we retry upon failure */
3528                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3529                 if (ret)
3530                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3531                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3532         } while (ret && peer && --retries);
3533
3534         if (ret)
3535                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3536                         get, UIC_GET_ATTR_ID(attr_sel),
3537                         UFS_UIC_COMMAND_RETRIES - retries);
3538
3539         if (mib_val && !ret)
3540                 *mib_val = uic_cmd.argument3;
3541
3542         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3543             && pwr_mode_change)
3544                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3545 out:
3546         return ret;
3547 }
3548 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3549
3550 /**
3551  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3552  * state) and waits for it to take effect.
3553  *
3554  * @hba: per adapter instance
3555  * @cmd: UIC command to execute
3556  *
3557  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3558  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3559  * and device UniPro link and hence it's final completion would be indicated by
3560  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3561  * addition to normal UIC command completion Status (UCCS). This function only
3562  * returns after the relevant status bits indicate the completion.
3563  *
3564  * Returns 0 on success, non-zero value on failure
3565  */
3566 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3567 {
3568         struct completion uic_async_done;
3569         unsigned long flags;
3570         u8 status;
3571         int ret;
3572         bool reenable_intr = false;
3573
3574         mutex_lock(&hba->uic_cmd_mutex);
3575         init_completion(&uic_async_done);
3576         ufshcd_add_delay_before_dme_cmd(hba);
3577
3578         spin_lock_irqsave(hba->host->host_lock, flags);
3579         hba->uic_async_done = &uic_async_done;
3580         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3581                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3582                 /*
3583                  * Make sure UIC command completion interrupt is disabled before
3584                  * issuing UIC command.
3585                  */
3586                 wmb();
3587                 reenable_intr = true;
3588         }
3589         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3590         spin_unlock_irqrestore(hba->host->host_lock, flags);
3591         if (ret) {
3592                 dev_err(hba->dev,
3593                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3594                         cmd->command, cmd->argument3, ret);
3595                 goto out;
3596         }
3597
3598         if (!wait_for_completion_timeout(hba->uic_async_done,
3599                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3600                 dev_err(hba->dev,
3601                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3602                         cmd->command, cmd->argument3);
3603                 ret = -ETIMEDOUT;
3604                 goto out;
3605         }
3606
3607         status = ufshcd_get_upmcrs(hba);
3608         if (status != PWR_LOCAL) {
3609                 dev_err(hba->dev,
3610                         "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3611                         cmd->command, status);
3612                 ret = (status != PWR_OK) ? status : -1;
3613         }
3614 out:
3615         if (ret) {
3616                 ufshcd_print_host_state(hba);
3617                 ufshcd_print_pwr_info(hba);
3618                 ufshcd_print_host_regs(hba);
3619         }
3620
3621         spin_lock_irqsave(hba->host->host_lock, flags);
3622         hba->active_uic_cmd = NULL;
3623         hba->uic_async_done = NULL;
3624         if (reenable_intr)
3625                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3626         spin_unlock_irqrestore(hba->host->host_lock, flags);
3627         mutex_unlock(&hba->uic_cmd_mutex);
3628
3629         return ret;
3630 }
3631
3632 /**
3633  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3634  *                              using DME_SET primitives.
3635  * @hba: per adapter instance
3636  * @mode: powr mode value
3637  *
3638  * Returns 0 on success, non-zero value on failure
3639  */
3640 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3641 {
3642         struct uic_command uic_cmd = {0};
3643         int ret;
3644
3645         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3646                 ret = ufshcd_dme_set(hba,
3647                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3648                 if (ret) {
3649                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3650                                                 __func__, ret);
3651                         goto out;
3652                 }
3653         }
3654
3655         uic_cmd.command = UIC_CMD_DME_SET;
3656         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3657         uic_cmd.argument3 = mode;
3658         ufshcd_hold(hba, false);
3659         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3660         ufshcd_release(hba);
3661
3662 out:
3663         return ret;
3664 }
3665
3666 static int ufshcd_link_recovery(struct ufs_hba *hba)
3667 {
3668         int ret;
3669         unsigned long flags;
3670
3671         spin_lock_irqsave(hba->host->host_lock, flags);
3672         hba->ufshcd_state = UFSHCD_STATE_RESET;
3673         ufshcd_set_eh_in_progress(hba);
3674         spin_unlock_irqrestore(hba->host->host_lock, flags);
3675
3676         ret = ufshcd_host_reset_and_restore(hba);
3677
3678         spin_lock_irqsave(hba->host->host_lock, flags);
3679         if (ret)
3680                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3681         ufshcd_clear_eh_in_progress(hba);
3682         spin_unlock_irqrestore(hba->host->host_lock, flags);
3683
3684         if (ret)
3685                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3686                         __func__, ret);
3687
3688         return ret;
3689 }
3690
3691 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3692 {
3693         int ret;
3694         struct uic_command uic_cmd = {0};
3695         ktime_t start = ktime_get();
3696
3697         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3698
3699         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3700         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3701         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3702                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3703
3704         if (ret) {
3705                 int err;
3706
3707                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3708                         __func__, ret);
3709
3710                 /*
3711                  * If link recovery fails then return error code returned from
3712                  * ufshcd_link_recovery().
3713                  * If link recovery succeeds then return -EAGAIN to attempt
3714                  * hibern8 enter retry again.
3715                  */
3716                 err = ufshcd_link_recovery(hba);
3717                 if (err) {
3718                         dev_err(hba->dev, "%s: link recovery failed", __func__);
3719                         ret = err;
3720                 } else {
3721                         ret = -EAGAIN;
3722                 }
3723         } else
3724                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3725                                                                 POST_CHANGE);
3726
3727         return ret;
3728 }
3729
3730 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3731 {
3732         int ret = 0, retries;
3733
3734         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3735                 ret = __ufshcd_uic_hibern8_enter(hba);
3736                 if (!ret)
3737                         goto out;
3738         }
3739 out:
3740         return ret;
3741 }
3742
3743 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3744 {
3745         struct uic_command uic_cmd = {0};
3746         int ret;
3747         ktime_t start = ktime_get();
3748
3749         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3750
3751         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3752         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3753         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3754                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3755
3756         if (ret) {
3757                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3758                         __func__, ret);
3759                 ret = ufshcd_link_recovery(hba);
3760         } else {
3761                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3762                                                                 POST_CHANGE);
3763                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3764                 hba->ufs_stats.hibern8_exit_cnt++;
3765         }
3766
3767         return ret;
3768 }
3769
3770  /**
3771  * ufshcd_init_pwr_info - setting the POR (power on reset)
3772  * values in hba power info
3773  * @hba: per-adapter instance
3774  */
3775 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3776 {
3777         hba->pwr_info.gear_rx = UFS_PWM_G1;
3778         hba->pwr_info.gear_tx = UFS_PWM_G1;
3779         hba->pwr_info.lane_rx = 1;
3780         hba->pwr_info.lane_tx = 1;
3781         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3782         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3783         hba->pwr_info.hs_rate = 0;
3784 }
3785
3786 /**
3787  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3788  * @hba: per-adapter instance
3789  */
3790 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3791 {
3792         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3793
3794         if (hba->max_pwr_info.is_valid)
3795                 return 0;
3796
3797         pwr_info->pwr_tx = FAST_MODE;
3798         pwr_info->pwr_rx = FAST_MODE;
3799         pwr_info->hs_rate = PA_HS_MODE_B;
3800
3801         /* Get the connected lane count */
3802         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3803                         &pwr_info->lane_rx);
3804         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3805                         &pwr_info->lane_tx);
3806
3807         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3808                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3809                                 __func__,
3810                                 pwr_info->lane_rx,
3811                                 pwr_info->lane_tx);
3812                 return -EINVAL;
3813         }
3814
3815         /*
3816          * First, get the maximum gears of HS speed.
3817          * If a zero value, it means there is no HSGEAR capability.
3818          * Then, get the maximum gears of PWM speed.
3819          */
3820         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3821         if (!pwr_info->gear_rx) {
3822                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3823                                 &pwr_info->gear_rx);
3824                 if (!pwr_info->gear_rx) {
3825                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3826                                 __func__, pwr_info->gear_rx);
3827                         return -EINVAL;
3828                 }
3829                 pwr_info->pwr_rx = SLOW_MODE;
3830         }
3831
3832         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3833                         &pwr_info->gear_tx);
3834         if (!pwr_info->gear_tx) {
3835                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3836                                 &pwr_info->gear_tx);
3837                 if (!pwr_info->gear_tx) {
3838                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3839                                 __func__, pwr_info->gear_tx);
3840                         return -EINVAL;
3841                 }
3842                 pwr_info->pwr_tx = SLOW_MODE;
3843         }
3844
3845         hba->max_pwr_info.is_valid = true;
3846         return 0;
3847 }
3848
3849 static int ufshcd_change_power_mode(struct ufs_hba *hba,
3850                              struct ufs_pa_layer_attr *pwr_mode)
3851 {
3852         int ret;
3853
3854         /* if already configured to the requested pwr_mode */
3855         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3856             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3857             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3858             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3859             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3860             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3861             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3862                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3863                 return 0;
3864         }
3865
3866         /*
3867          * Configure attributes for power mode change with below.
3868          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3869          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3870          * - PA_HSSERIES
3871          */
3872         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3873         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3874                         pwr_mode->lane_rx);
3875         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3876                         pwr_mode->pwr_rx == FAST_MODE)
3877                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
3878         else
3879                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
3880
3881         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
3882         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
3883                         pwr_mode->lane_tx);
3884         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
3885                         pwr_mode->pwr_tx == FAST_MODE)
3886                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
3887         else
3888                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
3889
3890         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3891             pwr_mode->pwr_tx == FASTAUTO_MODE ||
3892             pwr_mode->pwr_rx == FAST_MODE ||
3893             pwr_mode->pwr_tx == FAST_MODE)
3894                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
3895                                                 pwr_mode->hs_rate);
3896
3897         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
3898                         | pwr_mode->pwr_tx);
3899
3900         if (ret) {
3901                 dev_err(hba->dev,
3902                         "%s: power mode change failed %d\n", __func__, ret);
3903         } else {
3904                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
3905                                                                 pwr_mode);
3906
3907                 memcpy(&hba->pwr_info, pwr_mode,
3908                         sizeof(struct ufs_pa_layer_attr));
3909         }
3910
3911         return ret;
3912 }
3913
3914 /**
3915  * ufshcd_config_pwr_mode - configure a new power mode
3916  * @hba: per-adapter instance
3917  * @desired_pwr_mode: desired power configuration
3918  */
3919 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3920                 struct ufs_pa_layer_attr *desired_pwr_mode)
3921 {
3922         struct ufs_pa_layer_attr final_params = { 0 };
3923         int ret;
3924
3925         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3926                                         desired_pwr_mode, &final_params);
3927
3928         if (ret)
3929                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3930
3931         ret = ufshcd_change_power_mode(hba, &final_params);
3932         if (!ret)
3933                 ufshcd_print_pwr_info(hba);
3934
3935         return ret;
3936 }
3937
3938 /**
3939  * ufshcd_complete_dev_init() - checks device readiness
3940  * hba: per-adapter instance
3941  *
3942  * Set fDeviceInit flag and poll until device toggles it.
3943  */
3944 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3945 {
3946         int i;
3947         int err;
3948         bool flag_res = 1;
3949
3950         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3951                 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
3952         if (err) {
3953                 dev_err(hba->dev,
3954                         "%s setting fDeviceInit flag failed with error %d\n",
3955                         __func__, err);
3956                 goto out;
3957         }
3958
3959         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3960         for (i = 0; i < 1000 && !err && flag_res; i++)
3961                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3962                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3963
3964         if (err)
3965                 dev_err(hba->dev,
3966                         "%s reading fDeviceInit flag failed with error %d\n",
3967                         __func__, err);
3968         else if (flag_res)
3969                 dev_err(hba->dev,
3970                         "%s fDeviceInit was not cleared by the device\n",
3971                         __func__);
3972
3973 out:
3974         return err;
3975 }
3976
3977 /**
3978  * ufshcd_make_hba_operational - Make UFS controller operational
3979  * @hba: per adapter instance
3980  *
3981  * To bring UFS host controller to operational state,
3982  * 1. Enable required interrupts
3983  * 2. Configure interrupt aggregation
3984  * 3. Program UTRL and UTMRL base address
3985  * 4. Configure run-stop-registers
3986  *
3987  * Returns 0 on success, non-zero value on failure
3988  */
3989 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3990 {
3991         int err = 0;
3992         u32 reg;
3993
3994         /* Enable required interrupts */
3995         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3996
3997         /* Configure interrupt aggregation */
3998         if (ufshcd_is_intr_aggr_allowed(hba))
3999                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4000         else
4001                 ufshcd_disable_intr_aggr(hba);
4002
4003         /* Configure UTRL and UTMRL base address registers */
4004         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4005                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4006         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4007                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4008         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4009                         REG_UTP_TASK_REQ_LIST_BASE_L);
4010         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4011                         REG_UTP_TASK_REQ_LIST_BASE_H);
4012
4013         /*
4014          * Make sure base address and interrupt setup are updated before
4015          * enabling the run/stop registers below.
4016          */
4017         wmb();
4018
4019         /*
4020          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4021          */
4022         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4023         if (!(ufshcd_get_lists_status(reg))) {
4024                 ufshcd_enable_run_stop_reg(hba);
4025         } else {
4026                 dev_err(hba->dev,
4027                         "Host controller not ready to process requests");
4028                 err = -EIO;
4029                 goto out;
4030         }
4031
4032 out:
4033         return err;
4034 }
4035
4036 /**
4037  * ufshcd_hba_stop - Send controller to reset state
4038  * @hba: per adapter instance
4039  * @can_sleep: perform sleep or just spin
4040  */
4041 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4042 {
4043         int err;
4044
4045         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4046         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4047                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4048                                         10, 1, can_sleep);
4049         if (err)
4050                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4051 }
4052
4053 /**
4054  * ufshcd_hba_enable - initialize the controller
4055  * @hba: per adapter instance
4056  *
4057  * The controller resets itself and controller firmware initialization
4058  * sequence kicks off. When controller is ready it will set
4059  * the Host Controller Enable bit to 1.
4060  *
4061  * Returns 0 on success, non-zero value on failure
4062  */
4063 static int ufshcd_hba_enable(struct ufs_hba *hba)
4064 {
4065         int retry;
4066
4067         /*
4068          * msleep of 1 and 5 used in this function might result in msleep(20),
4069          * but it was necessary to send the UFS FPGA to reset mode during
4070          * development and testing of this driver. msleep can be changed to
4071          * mdelay and retry count can be reduced based on the controller.
4072          */
4073         if (!ufshcd_is_hba_active(hba))
4074                 /* change controller state to "reset state" */
4075                 ufshcd_hba_stop(hba, true);
4076
4077         /* UniPro link is disabled at this point */
4078         ufshcd_set_link_off(hba);
4079
4080         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4081
4082         /* start controller initialization sequence */
4083         ufshcd_hba_start(hba);
4084
4085         /*
4086          * To initialize a UFS host controller HCE bit must be set to 1.
4087          * During initialization the HCE bit value changes from 1->0->1.
4088          * When the host controller completes initialization sequence
4089          * it sets the value of HCE bit to 1. The same HCE bit is read back
4090          * to check if the controller has completed initialization sequence.
4091          * So without this delay the value HCE = 1, set in the previous
4092          * instruction might be read back.
4093          * This delay can be changed based on the controller.
4094          */
4095         msleep(1);
4096
4097         /* wait for the host controller to complete initialization */
4098         retry = 10;
4099         while (ufshcd_is_hba_active(hba)) {
4100                 if (retry) {
4101                         retry--;
4102                 } else {
4103                         dev_err(hba->dev,
4104                                 "Controller enable failed\n");
4105                         return -EIO;
4106                 }
4107                 msleep(5);
4108         }
4109
4110         /* enable UIC related interrupts */
4111         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4112
4113         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4114
4115         return 0;
4116 }
4117
4118 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4119 {
4120         int tx_lanes, i, err = 0;
4121
4122         if (!peer)
4123                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4124                                &tx_lanes);
4125         else
4126                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4127                                     &tx_lanes);
4128         for (i = 0; i < tx_lanes; i++) {
4129                 if (!peer)
4130                         err = ufshcd_dme_set(hba,
4131                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4132                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4133                                         0);
4134                 else
4135                         err = ufshcd_dme_peer_set(hba,
4136                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4137                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4138                                         0);
4139                 if (err) {
4140                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4141                                 __func__, peer, i, err);
4142                         break;
4143                 }
4144         }
4145
4146         return err;
4147 }
4148
4149 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4150 {
4151         return ufshcd_disable_tx_lcc(hba, true);
4152 }
4153
4154 /**
4155  * ufshcd_link_startup - Initialize unipro link startup
4156  * @hba: per adapter instance
4157  *
4158  * Returns 0 for success, non-zero in case of failure
4159  */
4160 static int ufshcd_link_startup(struct ufs_hba *hba)
4161 {
4162         int ret;
4163         int retries = DME_LINKSTARTUP_RETRIES;
4164         bool link_startup_again = false;
4165
4166         /*
4167          * If UFS device isn't active then we will have to issue link startup
4168          * 2 times to make sure the device state move to active.
4169          */
4170         if (!ufshcd_is_ufs_dev_active(hba))
4171                 link_startup_again = true;
4172
4173 link_startup:
4174         do {
4175                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4176
4177                 ret = ufshcd_dme_link_startup(hba);
4178
4179                 /* check if device is detected by inter-connect layer */
4180                 if (!ret && !ufshcd_is_device_present(hba)) {
4181                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4182                         ret = -ENXIO;
4183                         goto out;
4184                 }
4185
4186                 /*
4187                  * DME link lost indication is only received when link is up,
4188                  * but we can't be sure if the link is up until link startup
4189                  * succeeds. So reset the local Uni-Pro and try again.
4190                  */
4191                 if (ret && ufshcd_hba_enable(hba))
4192                         goto out;
4193         } while (ret && retries--);
4194
4195         if (ret)
4196                 /* failed to get the link up... retire */
4197                 goto out;
4198
4199         if (link_startup_again) {
4200                 link_startup_again = false;
4201                 retries = DME_LINKSTARTUP_RETRIES;
4202                 goto link_startup;
4203         }
4204
4205         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4206         ufshcd_init_pwr_info(hba);
4207         ufshcd_print_pwr_info(hba);
4208
4209         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4210                 ret = ufshcd_disable_device_tx_lcc(hba);
4211                 if (ret)
4212                         goto out;
4213         }
4214
4215         /* Include any host controller configuration via UIC commands */
4216         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4217         if (ret)
4218                 goto out;
4219
4220         ret = ufshcd_make_hba_operational(hba);
4221 out:
4222         if (ret) {
4223                 dev_err(hba->dev, "link startup failed %d\n", ret);
4224                 ufshcd_print_host_state(hba);
4225                 ufshcd_print_pwr_info(hba);
4226                 ufshcd_print_host_regs(hba);
4227         }
4228         return ret;
4229 }
4230
4231 /**
4232  * ufshcd_verify_dev_init() - Verify device initialization
4233  * @hba: per-adapter instance
4234  *
4235  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4236  * device Transport Protocol (UTP) layer is ready after a reset.
4237  * If the UTP layer at the device side is not initialized, it may
4238  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4239  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4240  */
4241 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4242 {
4243         int err = 0;
4244         int retries;
4245
4246         ufshcd_hold(hba, false);
4247         mutex_lock(&hba->dev_cmd.lock);
4248         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4249                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4250                                                NOP_OUT_TIMEOUT);
4251
4252                 if (!err || err == -ETIMEDOUT)
4253                         break;
4254
4255                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4256         }
4257         mutex_unlock(&hba->dev_cmd.lock);
4258         ufshcd_release(hba);
4259
4260         if (err)
4261                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4262         return err;
4263 }
4264
4265 /**
4266  * ufshcd_set_queue_depth - set lun queue depth
4267  * @sdev: pointer to SCSI device
4268  *
4269  * Read bLUQueueDepth value and activate scsi tagged command
4270  * queueing. For WLUN, queue depth is set to 1. For best-effort
4271  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4272  * value that host can queue.
4273  */
4274 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4275 {
4276         int ret = 0;
4277         u8 lun_qdepth;
4278         struct ufs_hba *hba;
4279
4280         hba = shost_priv(sdev->host);
4281
4282         lun_qdepth = hba->nutrs;
4283         ret = ufshcd_read_unit_desc_param(hba,
4284                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4285                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4286                                           &lun_qdepth,
4287                                           sizeof(lun_qdepth));
4288
4289         /* Some WLUN doesn't support unit descriptor */
4290         if (ret == -EOPNOTSUPP)
4291                 lun_qdepth = 1;
4292         else if (!lun_qdepth)
4293                 /* eventually, we can figure out the real queue depth */
4294                 lun_qdepth = hba->nutrs;
4295         else
4296                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4297
4298         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4299                         __func__, lun_qdepth);
4300         scsi_change_queue_depth(sdev, lun_qdepth);
4301 }
4302
4303 /*
4304  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4305  * @hba: per-adapter instance
4306  * @lun: UFS device lun id
4307  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4308  *
4309  * Returns 0 in case of success and b_lu_write_protect status would be returned
4310  * @b_lu_write_protect parameter.
4311  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4312  * Returns -EINVAL in case of invalid parameters passed to this function.
4313  */
4314 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4315                             u8 lun,
4316                             u8 *b_lu_write_protect)
4317 {
4318         int ret;
4319
4320         if (!b_lu_write_protect)
4321                 ret = -EINVAL;
4322         /*
4323          * According to UFS device spec, RPMB LU can't be write
4324          * protected so skip reading bLUWriteProtect parameter for
4325          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4326          */
4327         else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4328                 ret = -ENOTSUPP;
4329         else
4330                 ret = ufshcd_read_unit_desc_param(hba,
4331                                           lun,
4332                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4333                                           b_lu_write_protect,
4334                                           sizeof(*b_lu_write_protect));
4335         return ret;
4336 }
4337
4338 /**
4339  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4340  * status
4341  * @hba: per-adapter instance
4342  * @sdev: pointer to SCSI device
4343  *
4344  */
4345 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4346                                                     struct scsi_device *sdev)
4347 {
4348         if (hba->dev_info.f_power_on_wp_en &&
4349             !hba->dev_info.is_lu_power_on_wp) {
4350                 u8 b_lu_write_protect;
4351
4352                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4353                                       &b_lu_write_protect) &&
4354                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4355                         hba->dev_info.is_lu_power_on_wp = true;
4356         }
4357 }
4358
4359 /**
4360  * ufshcd_slave_alloc - handle initial SCSI device configurations
4361  * @sdev: pointer to SCSI device
4362  *
4363  * Returns success
4364  */
4365 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4366 {
4367         struct ufs_hba *hba;
4368
4369         hba = shost_priv(sdev->host);
4370
4371         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4372         sdev->use_10_for_ms = 1;
4373
4374         /* allow SCSI layer to restart the device in case of errors */
4375         sdev->allow_restart = 1;
4376
4377         /* REPORT SUPPORTED OPERATION CODES is not supported */
4378         sdev->no_report_opcodes = 1;
4379
4380         /* WRITE_SAME command is not supported */
4381         sdev->no_write_same = 1;
4382
4383         ufshcd_set_queue_depth(sdev);
4384
4385         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4386
4387         return 0;
4388 }
4389
4390 /**
4391  * ufshcd_change_queue_depth - change queue depth
4392  * @sdev: pointer to SCSI device
4393  * @depth: required depth to set
4394  *
4395  * Change queue depth and make sure the max. limits are not crossed.
4396  */
4397 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4398 {
4399         struct ufs_hba *hba = shost_priv(sdev->host);
4400
4401         if (depth > hba->nutrs)
4402                 depth = hba->nutrs;
4403         return scsi_change_queue_depth(sdev, depth);
4404 }
4405
4406 /**
4407  * ufshcd_slave_configure - adjust SCSI device configurations
4408  * @sdev: pointer to SCSI device
4409  */
4410 static int ufshcd_slave_configure(struct scsi_device *sdev)
4411 {
4412         struct request_queue *q = sdev->request_queue;
4413
4414         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4415         blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4416
4417         return 0;
4418 }
4419
4420 /**
4421  * ufshcd_slave_destroy - remove SCSI device configurations
4422  * @sdev: pointer to SCSI device
4423  */
4424 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4425 {
4426         struct ufs_hba *hba;
4427
4428         hba = shost_priv(sdev->host);
4429         /* Drop the reference as it won't be needed anymore */
4430         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4431                 unsigned long flags;
4432
4433                 spin_lock_irqsave(hba->host->host_lock, flags);
4434                 hba->sdev_ufs_device = NULL;
4435                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4436         }
4437 }
4438
4439 /**
4440  * ufshcd_task_req_compl - handle task management request completion
4441  * @hba: per adapter instance
4442  * @index: index of the completed request
4443  * @resp: task management service response
4444  *
4445  * Returns non-zero value on error, zero on success
4446  */
4447 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4448 {
4449         struct utp_task_req_desc *task_req_descp;
4450         struct utp_upiu_task_rsp *task_rsp_upiup;
4451         unsigned long flags;
4452         int ocs_value;
4453         int task_result;
4454
4455         spin_lock_irqsave(hba->host->host_lock, flags);
4456
4457         /* Clear completed tasks from outstanding_tasks */
4458         __clear_bit(index, &hba->outstanding_tasks);
4459
4460         task_req_descp = hba->utmrdl_base_addr;
4461         ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4462
4463         if (ocs_value == OCS_SUCCESS) {
4464                 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4465                                 task_req_descp[index].task_rsp_upiu;
4466                 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4467                 task_result = task_result & MASK_TM_SERVICE_RESP;
4468                 if (resp)
4469                         *resp = (u8)task_result;
4470         } else {
4471                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4472                                 __func__, ocs_value);
4473         }
4474         spin_unlock_irqrestore(hba->host->host_lock, flags);
4475
4476         return ocs_value;
4477 }
4478
4479 /**
4480  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4481  * @lrb: pointer to local reference block of completed command
4482  * @scsi_status: SCSI command status
4483  *
4484  * Returns value base on SCSI command status
4485  */
4486 static inline int
4487 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4488 {
4489         int result = 0;
4490
4491         switch (scsi_status) {
4492         case SAM_STAT_CHECK_CONDITION:
4493                 ufshcd_copy_sense_data(lrbp);
4494         case SAM_STAT_GOOD:
4495                 result |= DID_OK << 16 |
4496                           COMMAND_COMPLETE << 8 |
4497                           scsi_status;
4498                 break;
4499         case SAM_STAT_TASK_SET_FULL:
4500         case SAM_STAT_BUSY:
4501         case SAM_STAT_TASK_ABORTED:
4502                 ufshcd_copy_sense_data(lrbp);
4503                 result |= scsi_status;
4504                 break;
4505         default:
4506                 result |= DID_ERROR << 16;
4507                 break;
4508         } /* end of switch */
4509
4510         return result;
4511 }
4512
4513 /**
4514  * ufshcd_transfer_rsp_status - Get overall status of the response
4515  * @hba: per adapter instance
4516  * @lrb: pointer to local reference block of completed command
4517  *
4518  * Returns result of the command to notify SCSI midlayer
4519  */
4520 static inline int
4521 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4522 {
4523         int result = 0;
4524         int scsi_status;
4525         int ocs;
4526
4527         /* overall command status of utrd */
4528         ocs = ufshcd_get_tr_ocs(lrbp);
4529
4530         switch (ocs) {
4531         case OCS_SUCCESS:
4532                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4533                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4534                 switch (result) {
4535                 case UPIU_TRANSACTION_RESPONSE:
4536                         /*
4537                          * get the response UPIU result to extract
4538                          * the SCSI command status
4539                          */
4540                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4541
4542                         /*
4543                          * get the result based on SCSI status response
4544                          * to notify the SCSI midlayer of the command status
4545                          */
4546                         scsi_status = result & MASK_SCSI_STATUS;
4547                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4548
4549                         /*
4550                          * Currently we are only supporting BKOPs exception
4551                          * events hence we can ignore BKOPs exception event
4552                          * during power management callbacks. BKOPs exception
4553                          * event is not expected to be raised in runtime suspend
4554                          * callback as it allows the urgent bkops.
4555                          * During system suspend, we are anyway forcefully
4556                          * disabling the bkops and if urgent bkops is needed
4557                          * it will be enabled on system resume. Long term
4558                          * solution could be to abort the system suspend if
4559                          * UFS device needs urgent BKOPs.
4560                          */
4561                         if (!hba->pm_op_in_progress &&
4562                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4563                                 schedule_work(&hba->eeh_work);
4564                         break;
4565                 case UPIU_TRANSACTION_REJECT_UPIU:
4566                         /* TODO: handle Reject UPIU Response */
4567                         result = DID_ERROR << 16;
4568                         dev_err(hba->dev,
4569                                 "Reject UPIU not fully implemented\n");
4570                         break;
4571                 default:
4572                         result = DID_ERROR << 16;
4573                         dev_err(hba->dev,
4574                                 "Unexpected request response code = %x\n",
4575                                 result);
4576                         break;
4577                 }
4578                 break;
4579         case OCS_ABORTED:
4580                 result |= DID_ABORT << 16;
4581                 break;
4582         case OCS_INVALID_COMMAND_STATUS:
4583                 result |= DID_REQUEUE << 16;
4584                 break;
4585         case OCS_INVALID_CMD_TABLE_ATTR:
4586         case OCS_INVALID_PRDT_ATTR:
4587         case OCS_MISMATCH_DATA_BUF_SIZE:
4588         case OCS_MISMATCH_RESP_UPIU_SIZE:
4589         case OCS_PEER_COMM_FAILURE:
4590         case OCS_FATAL_ERROR:
4591         default:
4592                 result |= DID_ERROR << 16;
4593                 dev_err(hba->dev,
4594                                 "OCS error from controller = %x for tag %d\n",
4595                                 ocs, lrbp->task_tag);
4596                 ufshcd_print_host_regs(hba);
4597                 ufshcd_print_host_state(hba);
4598                 break;
4599         } /* end of switch */
4600
4601         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4602                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4603         return result;
4604 }
4605
4606 /**
4607  * ufshcd_uic_cmd_compl - handle completion of uic command
4608  * @hba: per adapter instance
4609  * @intr_status: interrupt status generated by the controller
4610  */
4611 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4612 {
4613         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4614                 hba->active_uic_cmd->argument2 |=
4615                         ufshcd_get_uic_cmd_result(hba);
4616                 hba->active_uic_cmd->argument3 =
4617                         ufshcd_get_dme_attr_val(hba);
4618                 complete(&hba->active_uic_cmd->done);
4619         }
4620
4621         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4622                 complete(hba->uic_async_done);
4623 }
4624
4625 /**
4626  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4627  * @hba: per adapter instance
4628  * @completed_reqs: requests to complete
4629  */
4630 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4631                                         unsigned long completed_reqs)
4632 {
4633         struct ufshcd_lrb *lrbp;
4634         struct scsi_cmnd *cmd;
4635         int result;
4636         int index;
4637
4638         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4639                 lrbp = &hba->lrb[index];
4640                 cmd = lrbp->cmd;
4641                 if (cmd) {
4642                         ufshcd_add_command_trace(hba, index, "complete");
4643                         result = ufshcd_transfer_rsp_status(hba, lrbp);
4644                         scsi_dma_unmap(cmd);
4645                         cmd->result = result;
4646                         /* Mark completed command as NULL in LRB */
4647                         lrbp->cmd = NULL;
4648                         clear_bit_unlock(index, &hba->lrb_in_use);
4649                         /* Do not touch lrbp after scsi done */
4650                         cmd->scsi_done(cmd);
4651                         __ufshcd_release(hba);
4652                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4653                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4654                         if (hba->dev_cmd.complete) {
4655                                 ufshcd_add_command_trace(hba, index,
4656                                                 "dev_complete");
4657                                 complete(hba->dev_cmd.complete);
4658                         }
4659                 }
4660                 if (ufshcd_is_clkscaling_supported(hba))
4661                         hba->clk_scaling.active_reqs--;
4662         }
4663
4664         /* clear corresponding bits of completed commands */
4665         hba->outstanding_reqs ^= completed_reqs;
4666
4667         ufshcd_clk_scaling_update_busy(hba);
4668
4669         /* we might have free'd some tags above */
4670         wake_up(&hba->dev_cmd.tag_wq);
4671 }
4672
4673 /**
4674  * ufshcd_transfer_req_compl - handle SCSI and query command completion
4675  * @hba: per adapter instance
4676  */
4677 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4678 {
4679         unsigned long completed_reqs;
4680         u32 tr_doorbell;
4681
4682         /* Resetting interrupt aggregation counters first and reading the
4683          * DOOR_BELL afterward allows us to handle all the completed requests.
4684          * In order to prevent other interrupts starvation the DB is read once
4685          * after reset. The down side of this solution is the possibility of
4686          * false interrupt if device completes another request after resetting
4687          * aggregation and before reading the DB.
4688          */
4689         if (ufshcd_is_intr_aggr_allowed(hba))
4690                 ufshcd_reset_intr_aggr(hba);
4691
4692         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4693         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4694
4695         __ufshcd_transfer_req_compl(hba, completed_reqs);
4696 }
4697
4698 /**
4699  * ufshcd_disable_ee - disable exception event
4700  * @hba: per-adapter instance
4701  * @mask: exception event to disable
4702  *
4703  * Disables exception event in the device so that the EVENT_ALERT
4704  * bit is not set.
4705  *
4706  * Returns zero on success, non-zero error value on failure.
4707  */
4708 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4709 {
4710         int err = 0;
4711         u32 val;
4712
4713         if (!(hba->ee_ctrl_mask & mask))
4714                 goto out;
4715
4716         val = hba->ee_ctrl_mask & ~mask;
4717         val &= MASK_EE_STATUS;
4718         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4719                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4720         if (!err)
4721                 hba->ee_ctrl_mask &= ~mask;
4722 out:
4723         return err;
4724 }
4725
4726 /**
4727  * ufshcd_enable_ee - enable exception event
4728  * @hba: per-adapter instance
4729  * @mask: exception event to enable
4730  *
4731  * Enable corresponding exception event in the device to allow
4732  * device to alert host in critical scenarios.
4733  *
4734  * Returns zero on success, non-zero error value on failure.
4735  */
4736 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4737 {
4738         int err = 0;
4739         u32 val;
4740
4741         if (hba->ee_ctrl_mask & mask)
4742                 goto out;
4743
4744         val = hba->ee_ctrl_mask | mask;
4745         val &= MASK_EE_STATUS;
4746         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4747                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4748         if (!err)
4749                 hba->ee_ctrl_mask |= mask;
4750 out:
4751         return err;
4752 }
4753
4754 /**
4755  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4756  * @hba: per-adapter instance
4757  *
4758  * Allow device to manage background operations on its own. Enabling
4759  * this might lead to inconsistent latencies during normal data transfers
4760  * as the device is allowed to manage its own way of handling background
4761  * operations.
4762  *
4763  * Returns zero on success, non-zero on failure.
4764  */
4765 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4766 {
4767         int err = 0;
4768
4769         if (hba->auto_bkops_enabled)
4770                 goto out;
4771
4772         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4773                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
4774         if (err) {
4775                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4776                                 __func__, err);
4777                 goto out;
4778         }
4779
4780         hba->auto_bkops_enabled = true;
4781         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4782
4783         /* No need of URGENT_BKOPS exception from the device */
4784         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4785         if (err)
4786                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4787                                 __func__, err);
4788 out:
4789         return err;
4790 }
4791
4792 /**
4793  * ufshcd_disable_auto_bkops - block device in doing background operations
4794  * @hba: per-adapter instance
4795  *
4796  * Disabling background operations improves command response latency but
4797  * has drawback of device moving into critical state where the device is
4798  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4799  * host is idle so that BKOPS are managed effectively without any negative
4800  * impacts.
4801  *
4802  * Returns zero on success, non-zero on failure.
4803  */
4804 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4805 {
4806         int err = 0;
4807
4808         if (!hba->auto_bkops_enabled)
4809                 goto out;
4810
4811         /*
4812          * If host assisted BKOPs is to be enabled, make sure
4813          * urgent bkops exception is allowed.
4814          */
4815         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4816         if (err) {
4817                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4818                                 __func__, err);
4819                 goto out;
4820         }
4821
4822         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4823                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
4824         if (err) {
4825                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4826                                 __func__, err);
4827                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4828                 goto out;
4829         }
4830
4831         hba->auto_bkops_enabled = false;
4832         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
4833         hba->is_urgent_bkops_lvl_checked = false;
4834 out:
4835         return err;
4836 }
4837
4838 /**
4839  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
4840  * @hba: per adapter instance
4841  *
4842  * After a device reset the device may toggle the BKOPS_EN flag
4843  * to default value. The s/w tracking variables should be updated
4844  * as well. This function would change the auto-bkops state based on
4845  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
4846  */
4847 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
4848 {
4849         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
4850                 hba->auto_bkops_enabled = false;
4851                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4852                 ufshcd_enable_auto_bkops(hba);
4853         } else {
4854                 hba->auto_bkops_enabled = true;
4855                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
4856                 ufshcd_disable_auto_bkops(hba);
4857         }
4858         hba->is_urgent_bkops_lvl_checked = false;
4859 }
4860
4861 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
4862 {
4863         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4864                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
4865 }
4866
4867 /**
4868  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
4869  * @hba: per-adapter instance
4870  * @status: bkops_status value
4871  *
4872  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
4873  * flag in the device to permit background operations if the device
4874  * bkops_status is greater than or equal to "status" argument passed to
4875  * this function, disable otherwise.
4876  *
4877  * Returns 0 for success, non-zero in case of failure.
4878  *
4879  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
4880  * to know whether auto bkops is enabled or disabled after this function
4881  * returns control to it.
4882  */
4883 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
4884                              enum bkops_status status)
4885 {
4886         int err;
4887         u32 curr_status = 0;
4888
4889         err = ufshcd_get_bkops_status(hba, &curr_status);
4890         if (err) {
4891                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4892                                 __func__, err);
4893                 goto out;
4894         } else if (curr_status > BKOPS_STATUS_MAX) {
4895                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
4896                                 __func__, curr_status);
4897                 err = -EINVAL;
4898                 goto out;
4899         }
4900
4901         if (curr_status >= status)
4902                 err = ufshcd_enable_auto_bkops(hba);
4903         else
4904                 err = ufshcd_disable_auto_bkops(hba);
4905 out:
4906         return err;
4907 }
4908
4909 /**
4910  * ufshcd_urgent_bkops - handle urgent bkops exception event
4911  * @hba: per-adapter instance
4912  *
4913  * Enable fBackgroundOpsEn flag in the device to permit background
4914  * operations.
4915  *
4916  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
4917  * and negative error value for any other failure.
4918  */
4919 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
4920 {
4921         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
4922 }
4923
4924 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
4925 {
4926         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4927                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
4928 }
4929
4930 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
4931 {
4932         int err;
4933         u32 curr_status = 0;
4934
4935         if (hba->is_urgent_bkops_lvl_checked)
4936                 goto enable_auto_bkops;
4937
4938         err = ufshcd_get_bkops_status(hba, &curr_status);
4939         if (err) {
4940                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4941                                 __func__, err);
4942                 goto out;
4943         }
4944
4945         /*
4946          * We are seeing that some devices are raising the urgent bkops
4947          * exception events even when BKOPS status doesn't indicate performace
4948          * impacted or critical. Handle these device by determining their urgent
4949          * bkops status at runtime.
4950          */
4951         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
4952                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4953                                 __func__, curr_status);
4954                 /* update the current status as the urgent bkops level */
4955                 hba->urgent_bkops_lvl = curr_status;
4956                 hba->is_urgent_bkops_lvl_checked = true;
4957         }
4958
4959 enable_auto_bkops:
4960         err = ufshcd_enable_auto_bkops(hba);
4961 out:
4962         if (err < 0)
4963                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
4964                                 __func__, err);
4965 }
4966
4967 /**
4968  * ufshcd_exception_event_handler - handle exceptions raised by device
4969  * @work: pointer to work data
4970  *
4971  * Read bExceptionEventStatus attribute from the device and handle the
4972  * exception event accordingly.
4973  */
4974 static void ufshcd_exception_event_handler(struct work_struct *work)
4975 {
4976         struct ufs_hba *hba;
4977         int err;
4978         u32 status = 0;
4979         hba = container_of(work, struct ufs_hba, eeh_work);
4980
4981         pm_runtime_get_sync(hba->dev);
4982         scsi_block_requests(hba->host);
4983         err = ufshcd_get_ee_status(hba, &status);
4984         if (err) {
4985                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
4986                                 __func__, err);
4987                 goto out;
4988         }
4989
4990         status &= hba->ee_ctrl_mask;
4991
4992         if (status & MASK_EE_URGENT_BKOPS)
4993                 ufshcd_bkops_exception_event_handler(hba);
4994
4995 out:
4996         scsi_unblock_requests(hba->host);
4997         pm_runtime_put_sync(hba->dev);
4998         return;
4999 }
5000
5001 /* Complete requests that have door-bell cleared */
5002 static void ufshcd_complete_requests(struct ufs_hba *hba)
5003 {
5004         ufshcd_transfer_req_compl(hba);
5005         ufshcd_tmc_handler(hba);
5006 }
5007
5008 /**
5009  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5010  *                              to recover from the DL NAC errors or not.
5011  * @hba: per-adapter instance
5012  *
5013  * Returns true if error handling is required, false otherwise
5014  */
5015 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5016 {
5017         unsigned long flags;
5018         bool err_handling = true;
5019
5020         spin_lock_irqsave(hba->host->host_lock, flags);
5021         /*
5022          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5023          * device fatal error and/or DL NAC & REPLAY timeout errors.
5024          */
5025         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5026                 goto out;
5027
5028         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5029             ((hba->saved_err & UIC_ERROR) &&
5030              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5031                 goto out;
5032
5033         if ((hba->saved_err & UIC_ERROR) &&
5034             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5035                 int err;
5036                 /*
5037                  * wait for 50ms to see if we can get any other errors or not.
5038                  */
5039                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5040                 msleep(50);
5041                 spin_lock_irqsave(hba->host->host_lock, flags);
5042
5043                 /*
5044                  * now check if we have got any other severe errors other than
5045                  * DL NAC error?
5046                  */
5047                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5048                     ((hba->saved_err & UIC_ERROR) &&
5049                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5050                         goto out;
5051
5052                 /*
5053                  * As DL NAC is the only error received so far, send out NOP
5054                  * command to confirm if link is still active or not.
5055                  *   - If we don't get any response then do error recovery.
5056                  *   - If we get response then clear the DL NAC error bit.
5057                  */
5058
5059                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5060                 err = ufshcd_verify_dev_init(hba);
5061                 spin_lock_irqsave(hba->host->host_lock, flags);
5062
5063                 if (err)
5064                         goto out;
5065
5066                 /* Link seems to be alive hence ignore the DL NAC errors */
5067                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5068                         hba->saved_err &= ~UIC_ERROR;
5069                 /* clear NAC error */
5070                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5071                 if (!hba->saved_uic_err) {
5072                         err_handling = false;
5073                         goto out;
5074                 }
5075         }
5076 out:
5077         spin_unlock_irqrestore(hba->host->host_lock, flags);
5078         return err_handling;
5079 }
5080
5081 /**
5082  * ufshcd_err_handler - handle UFS errors that require s/w attention
5083  * @work: pointer to work structure
5084  */
5085 static void ufshcd_err_handler(struct work_struct *work)
5086 {
5087         struct ufs_hba *hba;
5088         unsigned long flags;
5089         u32 err_xfer = 0;
5090         u32 err_tm = 0;
5091         int err = 0;
5092         int tag;
5093         bool needs_reset = false;
5094
5095         hba = container_of(work, struct ufs_hba, eh_work);
5096
5097         pm_runtime_get_sync(hba->dev);
5098         ufshcd_hold(hba, false);
5099
5100         spin_lock_irqsave(hba->host->host_lock, flags);
5101         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5102                 goto out;
5103
5104         hba->ufshcd_state = UFSHCD_STATE_RESET;
5105         ufshcd_set_eh_in_progress(hba);
5106
5107         /* Complete requests that have door-bell cleared by h/w */
5108         ufshcd_complete_requests(hba);
5109
5110         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5111                 bool ret;
5112
5113                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5114                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5115                 ret = ufshcd_quirk_dl_nac_errors(hba);
5116                 spin_lock_irqsave(hba->host->host_lock, flags);
5117                 if (!ret)
5118                         goto skip_err_handling;
5119         }
5120         if ((hba->saved_err & INT_FATAL_ERRORS) ||
5121             ((hba->saved_err & UIC_ERROR) &&
5122             (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5123                                    UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5124                                    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5125                 needs_reset = true;
5126
5127         /*
5128          * if host reset is required then skip clearing the pending
5129          * transfers forcefully because they will get cleared during
5130          * host reset and restore
5131          */
5132         if (needs_reset)
5133                 goto skip_pending_xfer_clear;
5134
5135         /* release lock as clear command might sleep */
5136         spin_unlock_irqrestore(hba->host->host_lock, flags);
5137         /* Clear pending transfer requests */
5138         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5139                 if (ufshcd_clear_cmd(hba, tag)) {
5140                         err_xfer = true;
5141                         goto lock_skip_pending_xfer_clear;
5142                 }
5143         }
5144
5145         /* Clear pending task management requests */
5146         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5147                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5148                         err_tm = true;
5149                         goto lock_skip_pending_xfer_clear;
5150                 }
5151         }
5152
5153 lock_skip_pending_xfer_clear:
5154         spin_lock_irqsave(hba->host->host_lock, flags);
5155
5156         /* Complete the requests that are cleared by s/w */
5157         ufshcd_complete_requests(hba);
5158
5159         if (err_xfer || err_tm)
5160                 needs_reset = true;
5161
5162 skip_pending_xfer_clear:
5163         /* Fatal errors need reset */
5164         if (needs_reset) {
5165                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5166
5167                 /*
5168                  * ufshcd_reset_and_restore() does the link reinitialization
5169                  * which will need atleast one empty doorbell slot to send the
5170                  * device management commands (NOP and query commands).
5171                  * If there is no slot empty at this moment then free up last
5172                  * slot forcefully.
5173                  */
5174                 if (hba->outstanding_reqs == max_doorbells)
5175                         __ufshcd_transfer_req_compl(hba,
5176                                                     (1UL << (hba->nutrs - 1)));
5177
5178                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5179                 err = ufshcd_reset_and_restore(hba);
5180                 spin_lock_irqsave(hba->host->host_lock, flags);
5181                 if (err) {
5182                         dev_err(hba->dev, "%s: reset and restore failed\n",
5183                                         __func__);
5184                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
5185                 }
5186                 /*
5187                  * Inform scsi mid-layer that we did reset and allow to handle
5188                  * Unit Attention properly.
5189                  */
5190                 scsi_report_bus_reset(hba->host, 0);
5191                 hba->saved_err = 0;
5192                 hba->saved_uic_err = 0;
5193         }
5194
5195 skip_err_handling:
5196         if (!needs_reset) {
5197                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5198                 if (hba->saved_err || hba->saved_uic_err)
5199                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5200                             __func__, hba->saved_err, hba->saved_uic_err);
5201         }
5202
5203         ufshcd_clear_eh_in_progress(hba);
5204
5205 out:
5206         spin_unlock_irqrestore(hba->host->host_lock, flags);
5207         scsi_unblock_requests(hba->host);
5208         ufshcd_release(hba);
5209         pm_runtime_put_sync(hba->dev);
5210 }
5211
5212 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5213                 u32 reg)
5214 {
5215         reg_hist->reg[reg_hist->pos] = reg;
5216         reg_hist->tstamp[reg_hist->pos] = ktime_get();
5217         reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5218 }
5219
5220 /**
5221  * ufshcd_update_uic_error - check and set fatal UIC error flags.
5222  * @hba: per-adapter instance
5223  */
5224 static void ufshcd_update_uic_error(struct ufs_hba *hba)
5225 {
5226         u32 reg;
5227
5228         /* PHY layer lane error */
5229         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5230         /* Ignore LINERESET indication, as this is not an error */
5231         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5232                         (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5233                 /*
5234                  * To know whether this error is fatal or not, DB timeout
5235                  * must be checked but this error is handled separately.
5236                  */
5237                 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5238                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5239         }
5240
5241         /* PA_INIT_ERROR is fatal and needs UIC reset */
5242         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5243         if (reg)
5244                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5245
5246         if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5247                 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5248         else if (hba->dev_quirks &
5249                    UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5250                 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5251                         hba->uic_error |=
5252                                 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5253                 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5254                         hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5255         }
5256
5257         /* UIC NL/TL/DME errors needs software retry */
5258         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5259         if (reg) {
5260                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5261                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5262         }
5263
5264         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5265         if (reg) {
5266                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5267                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5268         }
5269
5270         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5271         if (reg) {
5272                 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5273                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5274         }
5275
5276         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5277                         __func__, hba->uic_error);
5278 }
5279
5280 /**
5281  * ufshcd_check_errors - Check for errors that need s/w attention
5282  * @hba: per-adapter instance
5283  */
5284 static void ufshcd_check_errors(struct ufs_hba *hba)
5285 {
5286         bool queue_eh_work = false;
5287
5288         if (hba->errors & INT_FATAL_ERRORS)
5289                 queue_eh_work = true;
5290
5291         if (hba->errors & UIC_ERROR) {
5292                 hba->uic_error = 0;
5293                 ufshcd_update_uic_error(hba);
5294                 if (hba->uic_error)
5295                         queue_eh_work = true;
5296         }
5297
5298         if (queue_eh_work) {
5299                 /*
5300                  * update the transfer error masks to sticky bits, let's do this
5301                  * irrespective of current ufshcd_state.
5302                  */
5303                 hba->saved_err |= hba->errors;
5304                 hba->saved_uic_err |= hba->uic_error;
5305
5306                 /* handle fatal errors only when link is functional */
5307                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5308                         /* block commands from scsi mid-layer */
5309                         scsi_block_requests(hba->host);
5310
5311                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5312
5313                         /* dump controller state before resetting */
5314                         if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5315                                 bool pr_prdt = !!(hba->saved_err &
5316                                                 SYSTEM_BUS_FATAL_ERROR);
5317
5318                                 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5319                                         __func__, hba->saved_err,
5320                                         hba->saved_uic_err);
5321
5322                                 ufshcd_print_host_regs(hba);
5323                                 ufshcd_print_pwr_info(hba);
5324                                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5325                                 ufshcd_print_trs(hba, hba->outstanding_reqs,
5326                                                         pr_prdt);
5327                         }
5328                         schedule_work(&hba->eh_work);
5329                 }
5330         }
5331         /*
5332          * if (!queue_eh_work) -
5333          * Other errors are either non-fatal where host recovers
5334          * itself without s/w intervention or errors that will be
5335          * handled by the SCSI core layer.
5336          */
5337 }
5338
5339 /**
5340  * ufshcd_tmc_handler - handle task management function completion
5341  * @hba: per adapter instance
5342  */
5343 static void ufshcd_tmc_handler(struct ufs_hba *hba)
5344 {
5345         u32 tm_doorbell;
5346
5347         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5348         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5349         wake_up(&hba->tm_wq);
5350 }
5351
5352 /**
5353  * ufshcd_sl_intr - Interrupt service routine
5354  * @hba: per adapter instance
5355  * @intr_status: contains interrupts generated by the controller
5356  */
5357 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5358 {
5359         hba->errors = UFSHCD_ERROR_MASK & intr_status;
5360         if (hba->errors)
5361                 ufshcd_check_errors(hba);
5362
5363         if (intr_status & UFSHCD_UIC_MASK)
5364                 ufshcd_uic_cmd_compl(hba, intr_status);
5365
5366         if (intr_status & UTP_TASK_REQ_COMPL)
5367                 ufshcd_tmc_handler(hba);
5368
5369         if (intr_status & UTP_TRANSFER_REQ_COMPL)
5370                 ufshcd_transfer_req_compl(hba);
5371 }
5372
5373 /**
5374  * ufshcd_intr - Main interrupt service routine
5375  * @irq: irq number
5376  * @__hba: pointer to adapter instance
5377  *
5378  * Returns IRQ_HANDLED - If interrupt is valid
5379  *              IRQ_NONE - If invalid interrupt
5380  */
5381 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5382 {
5383         u32 intr_status, enabled_intr_status = 0;
5384         irqreturn_t retval = IRQ_NONE;
5385         struct ufs_hba *hba = __hba;
5386         int retries = hba->nutrs;
5387
5388         spin_lock(hba->host->host_lock);
5389         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5390
5391         /*
5392          * There could be max of hba->nutrs reqs in flight and in worst case
5393          * if the reqs get finished 1 by 1 after the interrupt status is
5394          * read, make sure we handle them by checking the interrupt status
5395          * again in a loop until we process all of the reqs before returning.
5396          */
5397         while (intr_status && retries--) {
5398                 enabled_intr_status =
5399                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5400                 if (intr_status)
5401                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5402                 if (enabled_intr_status) {
5403                         ufshcd_sl_intr(hba, enabled_intr_status);
5404                         retval = IRQ_HANDLED;
5405                 }
5406
5407                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5408         }
5409
5410         spin_unlock(hba->host->host_lock);
5411         return retval;
5412 }
5413
5414 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5415 {
5416         int err = 0;
5417         u32 mask = 1 << tag;
5418         unsigned long flags;
5419
5420         if (!test_bit(tag, &hba->outstanding_tasks))
5421                 goto out;
5422
5423         spin_lock_irqsave(hba->host->host_lock, flags);
5424         ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
5425         spin_unlock_irqrestore(hba->host->host_lock, flags);
5426
5427         /* poll for max. 1 sec to clear door bell register by h/w */
5428         err = ufshcd_wait_for_register(hba,
5429                         REG_UTP_TASK_REQ_DOOR_BELL,
5430                         mask, 0, 1000, 1000, true);
5431 out:
5432         return err;
5433 }
5434
5435 /**
5436  * ufshcd_issue_tm_cmd - issues task management commands to controller
5437  * @hba: per adapter instance
5438  * @lun_id: LUN ID to which TM command is sent
5439  * @task_id: task ID to which the TM command is applicable
5440  * @tm_function: task management function opcode
5441  * @tm_response: task management service response return value
5442  *
5443  * Returns non-zero value on error, zero on success.
5444  */
5445 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5446                 u8 tm_function, u8 *tm_response)
5447 {
5448         struct utp_task_req_desc *task_req_descp;
5449         struct utp_upiu_task_req *task_req_upiup;
5450         struct Scsi_Host *host;
5451         unsigned long flags;
5452         int free_slot;
5453         int err;
5454         int task_tag;
5455
5456         host = hba->host;
5457
5458         /*
5459          * Get free slot, sleep if slots are unavailable.
5460          * Even though we use wait_event() which sleeps indefinitely,
5461          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5462          */
5463         wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5464         ufshcd_hold(hba, false);
5465
5466         spin_lock_irqsave(host->host_lock, flags);
5467         task_req_descp = hba->utmrdl_base_addr;
5468         task_req_descp += free_slot;
5469
5470         /* Configure task request descriptor */
5471         task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5472         task_req_descp->header.dword_2 =
5473                         cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5474
5475         /* Configure task request UPIU */
5476         task_req_upiup =
5477                 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5478         task_tag = hba->nutrs + free_slot;
5479         task_req_upiup->header.dword_0 =
5480                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5481                                               lun_id, task_tag);
5482         task_req_upiup->header.dword_1 =
5483                 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5484         /*
5485          * The host shall provide the same value for LUN field in the basic
5486          * header and for Input Parameter.
5487          */
5488         task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5489         task_req_upiup->input_param2 = cpu_to_be32(task_id);
5490
5491         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5492
5493         /* send command to the controller */
5494         __set_bit(free_slot, &hba->outstanding_tasks);
5495
5496         /* Make sure descriptors are ready before ringing the task doorbell */
5497         wmb();
5498
5499         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5500         /* Make sure that doorbell is committed immediately */
5501         wmb();
5502
5503         spin_unlock_irqrestore(host->host_lock, flags);
5504
5505         /* wait until the task management command is completed */
5506         err = wait_event_timeout(hba->tm_wq,
5507                         test_bit(free_slot, &hba->tm_condition),
5508                         msecs_to_jiffies(TM_CMD_TIMEOUT));
5509         if (!err) {
5510                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5511                                 __func__, tm_function);
5512                 if (ufshcd_clear_tm_cmd(hba, free_slot))
5513                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5514                                         __func__, free_slot);
5515                 err = -ETIMEDOUT;
5516         } else {
5517                 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5518         }
5519
5520         clear_bit(free_slot, &hba->tm_condition);
5521         ufshcd_put_tm_slot(hba, free_slot);
5522         wake_up(&hba->tm_tag_wq);
5523
5524         ufshcd_release(hba);
5525         return err;
5526 }
5527
5528 /**
5529  * ufshcd_eh_device_reset_handler - device reset handler registered to
5530  *                                    scsi layer.
5531  * @cmd: SCSI command pointer
5532  *
5533  * Returns SUCCESS/FAILED
5534  */
5535 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5536 {
5537         struct Scsi_Host *host;
5538         struct ufs_hba *hba;
5539         u32 pos;
5540         int err;
5541         u8 resp = 0xF, lun;
5542         unsigned long flags;
5543
5544         host = cmd->device->host;
5545         hba = shost_priv(host);
5546
5547         lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
5548         err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
5549         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5550                 if (!err)
5551                         err = resp;
5552                 goto out;
5553         }
5554
5555         /* clear the commands that were pending for corresponding LUN */
5556         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5557                 if (hba->lrb[pos].lun == lun) {
5558                         err = ufshcd_clear_cmd(hba, pos);
5559                         if (err)
5560                                 break;
5561                 }
5562         }
5563         spin_lock_irqsave(host->host_lock, flags);
5564         ufshcd_transfer_req_compl(hba);
5565         spin_unlock_irqrestore(host->host_lock, flags);
5566
5567 out:
5568         hba->req_abort_count = 0;
5569         if (!err) {
5570                 err = SUCCESS;
5571         } else {
5572                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5573                 err = FAILED;
5574         }
5575         return err;
5576 }
5577
5578 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5579 {
5580         struct ufshcd_lrb *lrbp;
5581         int tag;
5582
5583         for_each_set_bit(tag, &bitmap, hba->nutrs) {
5584                 lrbp = &hba->lrb[tag];
5585                 lrbp->req_abort_skip = true;
5586         }
5587 }
5588
5589 /**
5590  * ufshcd_abort - abort a specific command
5591  * @cmd: SCSI command pointer
5592  *
5593  * Abort the pending command in device by sending UFS_ABORT_TASK task management
5594  * command, and in host controller by clearing the door-bell register. There can
5595  * be race between controller sending the command to the device while abort is
5596  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5597  * really issued and then try to abort it.
5598  *
5599  * Returns SUCCESS/FAILED
5600  */
5601 static int ufshcd_abort(struct scsi_cmnd *cmd)
5602 {
5603         struct Scsi_Host *host;
5604         struct ufs_hba *hba;
5605         unsigned long flags;
5606         unsigned int tag;
5607         int err = 0;
5608         int poll_cnt;
5609         u8 resp = 0xF;
5610         struct ufshcd_lrb *lrbp;
5611         u32 reg;
5612
5613         host = cmd->device->host;
5614         hba = shost_priv(host);
5615         tag = cmd->request->tag;
5616         lrbp = &hba->lrb[tag];
5617         if (!ufshcd_valid_tag(hba, tag)) {
5618                 dev_err(hba->dev,
5619                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5620                         __func__, tag, cmd, cmd->request);
5621                 BUG();
5622         }
5623
5624         /*
5625          * Task abort to the device W-LUN is illegal. When this command
5626          * will fail, due to spec violation, scsi err handling next step
5627          * will be to send LU reset which, again, is a spec violation.
5628          * To avoid these unnecessary/illegal step we skip to the last error
5629          * handling stage: reset and restore.
5630          */
5631         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5632                 return ufshcd_eh_host_reset_handler(cmd);
5633
5634         ufshcd_hold(hba, false);
5635         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5636         /* If command is already aborted/completed, return SUCCESS */
5637         if (!(test_bit(tag, &hba->outstanding_reqs))) {
5638                 dev_err(hba->dev,
5639                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5640                         __func__, tag, hba->outstanding_reqs, reg);
5641                 goto out;
5642         }
5643
5644         if (!(reg & (1 << tag))) {
5645                 dev_err(hba->dev,
5646                 "%s: cmd was completed, but without a notifying intr, tag = %d",
5647                 __func__, tag);
5648         }
5649
5650         /* Print Transfer Request of aborted task */
5651         dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5652
5653         /*
5654          * Print detailed info about aborted request.
5655          * As more than one request might get aborted at the same time,
5656          * print full information only for the first aborted request in order
5657          * to reduce repeated printouts. For other aborted requests only print
5658          * basic details.
5659          */
5660         scsi_print_command(hba->lrb[tag].cmd);
5661         if (!hba->req_abort_count) {
5662                 ufshcd_print_host_regs(hba);
5663                 ufshcd_print_host_state(hba);
5664                 ufshcd_print_pwr_info(hba);
5665                 ufshcd_print_trs(hba, 1 << tag, true);
5666         } else {
5667                 ufshcd_print_trs(hba, 1 << tag, false);
5668         }
5669         hba->req_abort_count++;
5670
5671         /* Skip task abort in case previous aborts failed and report failure */
5672         if (lrbp->req_abort_skip) {
5673                 err = -EIO;
5674                 goto out;
5675         }
5676
5677         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5678                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5679                                 UFS_QUERY_TASK, &resp);
5680                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5681                         /* cmd pending in the device */
5682                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5683                                 __func__, tag);
5684                         break;
5685                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5686                         /*
5687                          * cmd not pending in the device, check if it is
5688                          * in transition.
5689                          */
5690                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5691                                 __func__, tag);
5692                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5693                         if (reg & (1 << tag)) {
5694                                 /* sleep for max. 200us to stabilize */
5695                                 usleep_range(100, 200);
5696                                 continue;
5697                         }
5698                         /* command completed already */
5699                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5700                                 __func__, tag);
5701                         goto cleanup;
5702                 } else {
5703                         dev_err(hba->dev,
5704                                 "%s: no response from device. tag = %d, err %d\n",
5705                                 __func__, tag, err);
5706                         if (!err)
5707                                 err = resp; /* service response error */
5708                         goto out;
5709                 }
5710         }
5711
5712         if (!poll_cnt) {
5713                 err = -EBUSY;
5714                 goto out;
5715         }
5716
5717         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5718                         UFS_ABORT_TASK, &resp);
5719         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5720                 if (!err) {
5721                         err = resp; /* service response error */
5722                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5723                                 __func__, tag, err);
5724                 }
5725                 goto out;
5726         }
5727
5728         err = ufshcd_clear_cmd(hba, tag);
5729         if (err) {
5730                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5731                         __func__, tag, err);
5732                 goto out;
5733         }
5734
5735 cleanup:
5736         scsi_dma_unmap(cmd);
5737
5738         spin_lock_irqsave(host->host_lock, flags);
5739         ufshcd_outstanding_req_clear(hba, tag);
5740         hba->lrb[tag].cmd = NULL;
5741         spin_unlock_irqrestore(host->host_lock, flags);
5742
5743         clear_bit_unlock(tag, &hba->lrb_in_use);
5744         wake_up(&hba->dev_cmd.tag_wq);
5745
5746 out:
5747         if (!err) {
5748                 err = SUCCESS;
5749         } else {
5750                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5751                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
5752                 err = FAILED;
5753         }
5754
5755         /*
5756          * This ufshcd_release() corresponds to the original scsi cmd that got
5757          * aborted here (as we won't get any IRQ for it).
5758          */
5759         ufshcd_release(hba);
5760         return err;
5761 }
5762
5763 /**
5764  * ufshcd_host_reset_and_restore - reset and restore host controller
5765  * @hba: per-adapter instance
5766  *
5767  * Note that host controller reset may issue DME_RESET to
5768  * local and remote (device) Uni-Pro stack and the attributes
5769  * are reset to default state.
5770  *
5771  * Returns zero on success, non-zero on failure
5772  */
5773 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5774 {
5775         int err;
5776         unsigned long flags;
5777
5778         /*
5779          * Stop the host controller and complete the requests
5780          * cleared by h/w
5781          */
5782         spin_lock_irqsave(hba->host->host_lock, flags);
5783         ufshcd_hba_stop(hba, false);
5784         hba->silence_err_logs = true;
5785         ufshcd_complete_requests(hba);
5786         hba->silence_err_logs = false;
5787         spin_unlock_irqrestore(hba->host->host_lock, flags);
5788
5789         /* scale up clocks to max frequency before full reinitialization */
5790         ufshcd_scale_clks(hba, true);
5791
5792         err = ufshcd_hba_enable(hba);
5793         if (err)
5794                 goto out;
5795
5796         /* Establish the link again and restore the device */
5797         err = ufshcd_probe_hba(hba);
5798
5799         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
5800                 err = -EIO;
5801 out:
5802         if (err)
5803                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5804
5805         return err;
5806 }
5807
5808 /**
5809  * ufshcd_reset_and_restore - reset and re-initialize host/device
5810  * @hba: per-adapter instance
5811  *
5812  * Reset and recover device, host and re-establish link. This
5813  * is helpful to recover the communication in fatal error conditions.
5814  *
5815  * Returns zero on success, non-zero on failure
5816  */
5817 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
5818 {
5819         int err = 0;
5820         int retries = MAX_HOST_RESET_RETRIES;
5821
5822         do {
5823                 err = ufshcd_host_reset_and_restore(hba);
5824         } while (err && --retries);
5825
5826         return err;
5827 }
5828
5829 /**
5830  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
5831  * @cmd - SCSI command pointer
5832  *
5833  * Returns SUCCESS/FAILED
5834  */
5835 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
5836 {
5837         int err;
5838         unsigned long flags;
5839         struct ufs_hba *hba;
5840
5841         hba = shost_priv(cmd->device->host);
5842
5843         ufshcd_hold(hba, false);
5844         /*
5845          * Check if there is any race with fatal error handling.
5846          * If so, wait for it to complete. Even though fatal error
5847          * handling does reset and restore in some cases, don't assume
5848          * anything out of it. We are just avoiding race here.
5849          */
5850         do {
5851                 spin_lock_irqsave(hba->host->host_lock, flags);
5852                 if (!(work_pending(&hba->eh_work) ||
5853                             hba->ufshcd_state == UFSHCD_STATE_RESET ||
5854                             hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
5855                         break;
5856                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5857                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
5858                 flush_work(&hba->eh_work);
5859         } while (1);
5860
5861         hba->ufshcd_state = UFSHCD_STATE_RESET;
5862         ufshcd_set_eh_in_progress(hba);
5863         spin_unlock_irqrestore(hba->host->host_lock, flags);
5864
5865         err = ufshcd_reset_and_restore(hba);
5866
5867         spin_lock_irqsave(hba->host->host_lock, flags);
5868         if (!err) {
5869                 err = SUCCESS;
5870                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5871         } else {
5872                 err = FAILED;
5873                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5874         }
5875         ufshcd_clear_eh_in_progress(hba);
5876         spin_unlock_irqrestore(hba->host->host_lock, flags);
5877
5878         ufshcd_release(hba);
5879         return err;
5880 }
5881
5882 /**
5883  * ufshcd_get_max_icc_level - calculate the ICC level
5884  * @sup_curr_uA: max. current supported by the regulator
5885  * @start_scan: row at the desc table to start scan from
5886  * @buff: power descriptor buffer
5887  *
5888  * Returns calculated max ICC level for specific regulator
5889  */
5890 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
5891 {
5892         int i;
5893         int curr_uA;
5894         u16 data;
5895         u16 unit;
5896
5897         for (i = start_scan; i >= 0; i--) {
5898                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
5899                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
5900                                                 ATTR_ICC_LVL_UNIT_OFFSET;
5901                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
5902                 switch (unit) {
5903                 case UFSHCD_NANO_AMP:
5904                         curr_uA = curr_uA / 1000;
5905                         break;
5906                 case UFSHCD_MILI_AMP:
5907                         curr_uA = curr_uA * 1000;
5908                         break;
5909                 case UFSHCD_AMP:
5910                         curr_uA = curr_uA * 1000 * 1000;
5911                         break;
5912                 case UFSHCD_MICRO_AMP:
5913                 default:
5914                         break;
5915                 }
5916                 if (sup_curr_uA >= curr_uA)
5917                         break;
5918         }
5919         if (i < 0) {
5920                 i = 0;
5921                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
5922         }
5923
5924         return (u32)i;
5925 }
5926
5927 /**
5928  * ufshcd_calc_icc_level - calculate the max ICC level
5929  * In case regulators are not initialized we'll return 0
5930  * @hba: per-adapter instance
5931  * @desc_buf: power descriptor buffer to extract ICC levels from.
5932  * @len: length of desc_buff
5933  *
5934  * Returns calculated ICC level
5935  */
5936 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
5937                                                         u8 *desc_buf, int len)
5938 {
5939         u32 icc_level = 0;
5940
5941         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
5942                                                 !hba->vreg_info.vccq2) {
5943                 dev_err(hba->dev,
5944                         "%s: Regulator capability was not set, actvIccLevel=%d",
5945                                                         __func__, icc_level);
5946                 goto out;
5947         }
5948
5949         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
5950                 icc_level = ufshcd_get_max_icc_level(
5951                                 hba->vreg_info.vcc->max_uA,
5952                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
5953                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
5954
5955         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
5956                 icc_level = ufshcd_get_max_icc_level(
5957                                 hba->vreg_info.vccq->max_uA,
5958                                 icc_level,
5959                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
5960
5961         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
5962                 icc_level = ufshcd_get_max_icc_level(
5963                                 hba->vreg_info.vccq2->max_uA,
5964                                 icc_level,
5965                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
5966 out:
5967         return icc_level;
5968 }
5969
5970 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5971 {
5972         int ret;
5973         int buff_len = hba->desc_size.pwr_desc;
5974         u8 desc_buf[hba->desc_size.pwr_desc];
5975
5976         ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5977         if (ret) {
5978                 dev_err(hba->dev,
5979                         "%s: Failed reading power descriptor.len = %d ret = %d",
5980                         __func__, buff_len, ret);
5981                 return;
5982         }
5983
5984         hba->init_prefetch_data.icc_level =
5985                         ufshcd_find_max_sup_active_icc_level(hba,
5986                         desc_buf, buff_len);
5987         dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
5988                         __func__, hba->init_prefetch_data.icc_level);
5989
5990         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5991                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
5992                 &hba->init_prefetch_data.icc_level);
5993
5994         if (ret)
5995                 dev_err(hba->dev,
5996                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
5997                         __func__, hba->init_prefetch_data.icc_level , ret);
5998
5999 }
6000
6001 /**
6002  * ufshcd_scsi_add_wlus - Adds required W-LUs
6003  * @hba: per-adapter instance
6004  *
6005  * UFS device specification requires the UFS devices to support 4 well known
6006  * logical units:
6007  *      "REPORT_LUNS" (address: 01h)
6008  *      "UFS Device" (address: 50h)
6009  *      "RPMB" (address: 44h)
6010  *      "BOOT" (address: 30h)
6011  * UFS device's power management needs to be controlled by "POWER CONDITION"
6012  * field of SSU (START STOP UNIT) command. But this "power condition" field
6013  * will take effect only when its sent to "UFS device" well known logical unit
6014  * hence we require the scsi_device instance to represent this logical unit in
6015  * order for the UFS host driver to send the SSU command for power management.
6016
6017  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6018  * Block) LU so user space process can control this LU. User space may also
6019  * want to have access to BOOT LU.
6020
6021  * This function adds scsi device instances for each of all well known LUs
6022  * (except "REPORT LUNS" LU).
6023  *
6024  * Returns zero on success (all required W-LUs are added successfully),
6025  * non-zero error value on failure (if failed to add any of the required W-LU).
6026  */
6027 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6028 {
6029         int ret = 0;
6030         struct scsi_device *sdev_rpmb;
6031         struct scsi_device *sdev_boot;
6032
6033         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6034                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6035         if (IS_ERR(hba->sdev_ufs_device)) {
6036                 ret = PTR_ERR(hba->sdev_ufs_device);
6037                 hba->sdev_ufs_device = NULL;
6038                 goto out;
6039         }
6040         scsi_device_put(hba->sdev_ufs_device);
6041
6042         sdev_boot = __scsi_add_device(hba->host, 0, 0,
6043                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6044         if (IS_ERR(sdev_boot)) {
6045                 ret = PTR_ERR(sdev_boot);
6046                 goto remove_sdev_ufs_device;
6047         }
6048         scsi_device_put(sdev_boot);
6049
6050         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6051                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6052         if (IS_ERR(sdev_rpmb)) {
6053                 ret = PTR_ERR(sdev_rpmb);
6054                 goto remove_sdev_boot;
6055         }
6056         scsi_device_put(sdev_rpmb);
6057         goto out;
6058
6059 remove_sdev_boot:
6060         scsi_remove_device(sdev_boot);
6061 remove_sdev_ufs_device:
6062         scsi_remove_device(hba->sdev_ufs_device);
6063 out:
6064         return ret;
6065 }
6066
6067 static int ufs_get_device_desc(struct ufs_hba *hba,
6068                                struct ufs_dev_desc *dev_desc)
6069 {
6070         int err;
6071         u8 model_index;
6072         u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6073         u8 desc_buf[hba->desc_size.dev_desc];
6074
6075         err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6076         if (err) {
6077                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6078                         __func__, err);
6079                 goto out;
6080         }
6081
6082         /*
6083          * getting vendor (manufacturerID) and Bank Index in big endian
6084          * format
6085          */
6086         dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6087                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6088
6089         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6090
6091         err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6092                                 QUERY_DESC_MAX_SIZE, ASCII_STD);
6093         if (err) {
6094                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6095                         __func__, err);
6096                 goto out;
6097         }
6098
6099         str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6100         strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6101                 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6102                       MAX_MODEL_LEN));
6103
6104         /* Null terminate the model string */
6105         dev_desc->model[MAX_MODEL_LEN] = '\0';
6106
6107 out:
6108         return err;
6109 }
6110
6111 static void ufs_fixup_device_setup(struct ufs_hba *hba,
6112                                    struct ufs_dev_desc *dev_desc)
6113 {
6114         struct ufs_dev_fix *f;
6115
6116         for (f = ufs_fixups; f->quirk; f++) {
6117                 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6118                      f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6119                     (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6120                      !strcmp(f->card.model, UFS_ANY_MODEL)))
6121                         hba->dev_quirks |= f->quirk;
6122         }
6123 }
6124
6125 /**
6126  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6127  * @hba: per-adapter instance
6128  *
6129  * PA_TActivate parameter can be tuned manually if UniPro version is less than
6130  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6131  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6132  * the hibern8 exit latency.
6133  *
6134  * Returns zero on success, non-zero error value on failure.
6135  */
6136 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6137 {
6138         int ret = 0;
6139         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6140
6141         ret = ufshcd_dme_peer_get(hba,
6142                                   UIC_ARG_MIB_SEL(
6143                                         RX_MIN_ACTIVATETIME_CAPABILITY,
6144                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6145                                   &peer_rx_min_activatetime);
6146         if (ret)
6147                 goto out;
6148
6149         /* make sure proper unit conversion is applied */
6150         tuned_pa_tactivate =
6151                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6152                  / PA_TACTIVATE_TIME_UNIT_US);
6153         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6154                              tuned_pa_tactivate);
6155
6156 out:
6157         return ret;
6158 }
6159
6160 /**
6161  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6162  * @hba: per-adapter instance
6163  *
6164  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6165  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6166  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6167  * This optimal value can help reduce the hibern8 exit latency.
6168  *
6169  * Returns zero on success, non-zero error value on failure.
6170  */
6171 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6172 {
6173         int ret = 0;
6174         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6175         u32 max_hibern8_time, tuned_pa_hibern8time;
6176
6177         ret = ufshcd_dme_get(hba,
6178                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6179                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6180                                   &local_tx_hibern8_time_cap);
6181         if (ret)
6182                 goto out;
6183
6184         ret = ufshcd_dme_peer_get(hba,
6185                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6186                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6187                                   &peer_rx_hibern8_time_cap);
6188         if (ret)
6189                 goto out;
6190
6191         max_hibern8_time = max(local_tx_hibern8_time_cap,
6192                                peer_rx_hibern8_time_cap);
6193         /* make sure proper unit conversion is applied */
6194         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6195                                 / PA_HIBERN8_TIME_UNIT_US);
6196         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6197                              tuned_pa_hibern8time);
6198 out:
6199         return ret;
6200 }
6201
6202 /**
6203  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6204  * less than device PA_TACTIVATE time.
6205  * @hba: per-adapter instance
6206  *
6207  * Some UFS devices require host PA_TACTIVATE to be lower than device
6208  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6209  * for such devices.
6210  *
6211  * Returns zero on success, non-zero error value on failure.
6212  */
6213 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6214 {
6215         int ret = 0;
6216         u32 granularity, peer_granularity;
6217         u32 pa_tactivate, peer_pa_tactivate;
6218         u32 pa_tactivate_us, peer_pa_tactivate_us;
6219         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6220
6221         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6222                                   &granularity);
6223         if (ret)
6224                 goto out;
6225
6226         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6227                                   &peer_granularity);
6228         if (ret)
6229                 goto out;
6230
6231         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6232             (granularity > PA_GRANULARITY_MAX_VAL)) {
6233                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6234                         __func__, granularity);
6235                 return -EINVAL;
6236         }
6237
6238         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6239             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6240                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6241                         __func__, peer_granularity);
6242                 return -EINVAL;
6243         }
6244
6245         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6246         if (ret)
6247                 goto out;
6248
6249         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6250                                   &peer_pa_tactivate);
6251         if (ret)
6252                 goto out;
6253
6254         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6255         peer_pa_tactivate_us = peer_pa_tactivate *
6256                              gran_to_us_table[peer_granularity - 1];
6257
6258         if (pa_tactivate_us > peer_pa_tactivate_us) {
6259                 u32 new_peer_pa_tactivate;
6260
6261                 new_peer_pa_tactivate = pa_tactivate_us /
6262                                       gran_to_us_table[peer_granularity - 1];
6263                 new_peer_pa_tactivate++;
6264                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6265                                           new_peer_pa_tactivate);
6266         }
6267
6268 out:
6269         return ret;
6270 }
6271
6272 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6273 {
6274         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6275                 ufshcd_tune_pa_tactivate(hba);
6276                 ufshcd_tune_pa_hibern8time(hba);
6277         }
6278
6279         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6280                 /* set 1ms timeout for PA_TACTIVATE */
6281                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6282
6283         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6284                 ufshcd_quirk_tune_host_pa_tactivate(hba);
6285
6286         ufshcd_vops_apply_dev_quirks(hba);
6287 }
6288
6289 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6290 {
6291         int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6292
6293         hba->ufs_stats.hibern8_exit_cnt = 0;
6294         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6295
6296         memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6297         memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6298         memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6299         memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6300         memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6301
6302         hba->req_abort_count = 0;
6303 }
6304
6305 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6306 {
6307         int err;
6308
6309         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6310                 &hba->desc_size.dev_desc);
6311         if (err)
6312                 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6313
6314         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6315                 &hba->desc_size.pwr_desc);
6316         if (err)
6317                 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6318
6319         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6320                 &hba->desc_size.interc_desc);
6321         if (err)
6322                 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6323
6324         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6325                 &hba->desc_size.conf_desc);
6326         if (err)
6327                 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6328
6329         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6330                 &hba->desc_size.unit_desc);
6331         if (err)
6332                 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6333
6334         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6335                 &hba->desc_size.geom_desc);
6336         if (err)
6337                 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6338 }
6339
6340 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6341 {
6342         hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6343         hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6344         hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6345         hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6346         hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6347         hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6348 }
6349
6350 /**
6351  * ufshcd_probe_hba - probe hba to detect device and initialize
6352  * @hba: per-adapter instance
6353  *
6354  * Execute link-startup and verify device initialization
6355  */
6356 static int ufshcd_probe_hba(struct ufs_hba *hba)
6357 {
6358         struct ufs_dev_desc card = {0};
6359         int ret;
6360         ktime_t start = ktime_get();
6361
6362         ret = ufshcd_link_startup(hba);
6363         if (ret)
6364                 goto out;
6365
6366         /* set the default level for urgent bkops */
6367         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6368         hba->is_urgent_bkops_lvl_checked = false;
6369
6370         /* Debug counters initialization */
6371         ufshcd_clear_dbg_ufs_stats(hba);
6372
6373         /* UniPro link is active now */
6374         ufshcd_set_link_active(hba);
6375
6376         ret = ufshcd_verify_dev_init(hba);
6377         if (ret)
6378                 goto out;
6379
6380         ret = ufshcd_complete_dev_init(hba);
6381         if (ret)
6382                 goto out;
6383
6384         /* Init check for device descriptor sizes */
6385         ufshcd_init_desc_sizes(hba);
6386
6387         ret = ufs_get_device_desc(hba, &card);
6388         if (ret) {
6389                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6390                         __func__, ret);
6391                 goto out;
6392         }
6393
6394         ufs_fixup_device_setup(hba, &card);
6395         ufshcd_tune_unipro_params(hba);
6396
6397         ret = ufshcd_set_vccq_rail_unused(hba,
6398                 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6399         if (ret)
6400                 goto out;
6401
6402         /* UFS device is also active now */
6403         ufshcd_set_ufs_dev_active(hba);
6404         ufshcd_force_reset_auto_bkops(hba);
6405         hba->wlun_dev_clr_ua = true;
6406
6407         if (ufshcd_get_max_pwr_mode(hba)) {
6408                 dev_err(hba->dev,
6409                         "%s: Failed getting max supported power mode\n",
6410                         __func__);
6411         } else {
6412                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6413                 if (ret) {
6414                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6415                                         __func__, ret);
6416                         goto out;
6417                 }
6418         }
6419
6420         /* set the state as operational after switching to desired gear */
6421         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6422
6423         /*
6424          * If we are in error handling context or in power management callbacks
6425          * context, no need to scan the host
6426          */
6427         if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6428                 bool flag;
6429
6430                 /* clear any previous UFS device information */
6431                 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6432                 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6433                                 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6434                         hba->dev_info.f_power_on_wp_en = flag;
6435
6436                 if (!hba->is_init_prefetch)
6437                         ufshcd_init_icc_levels(hba);
6438
6439                 /* Add required well known logical units to scsi mid layer */
6440                 ret = ufshcd_scsi_add_wlus(hba);
6441                 if (ret)
6442                         goto out;
6443
6444                 /* Initialize devfreq after UFS device is detected */
6445                 if (ufshcd_is_clkscaling_supported(hba)) {
6446                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
6447                                 &hba->pwr_info,
6448                                 sizeof(struct ufs_pa_layer_attr));
6449                         hba->clk_scaling.saved_pwr_info.is_valid = true;
6450                         if (!hba->devfreq) {
6451                                 hba->devfreq = devm_devfreq_add_device(hba->dev,
6452                                                         &ufs_devfreq_profile,
6453                                                         "simple_ondemand",
6454                                                         NULL);
6455                                 if (IS_ERR(hba->devfreq)) {
6456                                         ret = PTR_ERR(hba->devfreq);
6457                                         dev_err(hba->dev, "Unable to register with devfreq %d\n",
6458                                                         ret);
6459                                         goto out;
6460                                 }
6461                         }
6462                         hba->clk_scaling.is_allowed = true;
6463                 }
6464
6465                 scsi_scan_host(hba->host);
6466                 pm_runtime_put_sync(hba->dev);
6467         }
6468
6469         if (!hba->is_init_prefetch)
6470                 hba->is_init_prefetch = true;
6471
6472 out:
6473         /*
6474          * If we failed to initialize the device or the device is not
6475          * present, turn off the power/clocks etc.
6476          */
6477         if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6478                 pm_runtime_put_sync(hba->dev);
6479                 ufshcd_hba_exit(hba);
6480         }
6481
6482         trace_ufshcd_init(dev_name(hba->dev), ret,
6483                 ktime_to_us(ktime_sub(ktime_get(), start)),
6484                 hba->curr_dev_pwr_mode, hba->uic_link_state);
6485         return ret;
6486 }
6487
6488 /**
6489  * ufshcd_async_scan - asynchronous execution for probing hba
6490  * @data: data pointer to pass to this function
6491  * @cookie: cookie data
6492  */
6493 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6494 {
6495         struct ufs_hba *hba = (struct ufs_hba *)data;
6496
6497         ufshcd_probe_hba(hba);
6498 }
6499
6500 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6501 {
6502         unsigned long flags;
6503         struct Scsi_Host *host;
6504         struct ufs_hba *hba;
6505         int index;
6506         bool found = false;
6507
6508         if (!scmd || !scmd->device || !scmd->device->host)
6509                 return BLK_EH_NOT_HANDLED;
6510
6511         host = scmd->device->host;
6512         hba = shost_priv(host);
6513         if (!hba)
6514                 return BLK_EH_NOT_HANDLED;
6515
6516         spin_lock_irqsave(host->host_lock, flags);
6517
6518         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6519                 if (hba->lrb[index].cmd == scmd) {
6520                         found = true;
6521                         break;
6522                 }
6523         }
6524
6525         spin_unlock_irqrestore(host->host_lock, flags);
6526
6527         /*
6528          * Bypass SCSI error handling and reset the block layer timer if this
6529          * SCSI command was not actually dispatched to UFS driver, otherwise
6530          * let SCSI layer handle the error as usual.
6531          */
6532         return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
6533 }
6534
6535 static struct scsi_host_template ufshcd_driver_template = {
6536         .module                 = THIS_MODULE,
6537         .name                   = UFSHCD,
6538         .proc_name              = UFSHCD,
6539         .queuecommand           = ufshcd_queuecommand,
6540         .slave_alloc            = ufshcd_slave_alloc,
6541         .slave_configure        = ufshcd_slave_configure,
6542         .slave_destroy          = ufshcd_slave_destroy,
6543         .change_queue_depth     = ufshcd_change_queue_depth,
6544         .eh_abort_handler       = ufshcd_abort,
6545         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6546         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
6547         .eh_timed_out           = ufshcd_eh_timed_out,
6548         .this_id                = -1,
6549         .sg_tablesize           = SG_ALL,
6550         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
6551         .can_queue              = UFSHCD_CAN_QUEUE,
6552         .max_host_blocked       = 1,
6553         .track_queue_depth      = 1,
6554 };
6555
6556 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6557                                    int ua)
6558 {
6559         int ret;
6560
6561         if (!vreg)
6562                 return 0;
6563
6564         /*
6565          * "set_load" operation shall be required on those regulators
6566          * which specifically configured current limitation. Otherwise
6567          * zero max_uA may cause unexpected behavior when regulator is
6568          * enabled or set as high power mode.
6569          */
6570         if (!vreg->max_uA)
6571                 return 0;
6572
6573         ret = regulator_set_load(vreg->reg, ua);
6574         if (ret < 0) {
6575                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6576                                 __func__, vreg->name, ua, ret);
6577         }
6578
6579         return ret;
6580 }
6581
6582 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6583                                          struct ufs_vreg *vreg)
6584 {
6585         if (!vreg)
6586                 return 0;
6587         else if (vreg->unused)
6588                 return 0;
6589         else
6590                 return ufshcd_config_vreg_load(hba->dev, vreg,
6591                                                UFS_VREG_LPM_LOAD_UA);
6592 }
6593
6594 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6595                                          struct ufs_vreg *vreg)
6596 {
6597         if (!vreg)
6598                 return 0;
6599         else if (vreg->unused)
6600                 return 0;
6601         else
6602                 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
6603 }
6604
6605 static int ufshcd_config_vreg(struct device *dev,
6606                 struct ufs_vreg *vreg, bool on)
6607 {
6608         int ret = 0;
6609         struct regulator *reg;
6610         const char *name;
6611         int min_uV, uA_load;
6612
6613         BUG_ON(!vreg);
6614
6615         reg = vreg->reg;
6616         name = vreg->name;
6617
6618         if (regulator_count_voltages(reg) > 0) {
6619                 if (vreg->min_uV && vreg->max_uV) {
6620                         min_uV = on ? vreg->min_uV : 0;
6621                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6622                         if (ret) {
6623                                 dev_err(dev,
6624                                         "%s: %s set voltage failed, err=%d\n",
6625                                         __func__, name, ret);
6626                                 goto out;
6627                         }
6628                 }
6629
6630                 uA_load = on ? vreg->max_uA : 0;
6631                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6632                 if (ret)
6633                         goto out;
6634         }
6635 out:
6636         return ret;
6637 }
6638
6639 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6640 {
6641         int ret = 0;
6642
6643         if (!vreg)
6644                 goto out;
6645         else if (vreg->enabled || vreg->unused)
6646                 goto out;
6647
6648         ret = ufshcd_config_vreg(dev, vreg, true);
6649         if (!ret)
6650                 ret = regulator_enable(vreg->reg);
6651
6652         if (!ret)
6653                 vreg->enabled = true;
6654         else
6655                 dev_err(dev, "%s: %s enable failed, err=%d\n",
6656                                 __func__, vreg->name, ret);
6657 out:
6658         return ret;
6659 }
6660
6661 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6662 {
6663         int ret = 0;
6664
6665         if (!vreg)
6666                 goto out;
6667         else if (!vreg->enabled || vreg->unused)
6668                 goto out;
6669
6670         ret = regulator_disable(vreg->reg);
6671
6672         if (!ret) {
6673                 /* ignore errors on applying disable config */
6674                 ufshcd_config_vreg(dev, vreg, false);
6675                 vreg->enabled = false;
6676         } else {
6677                 dev_err(dev, "%s: %s disable failed, err=%d\n",
6678                                 __func__, vreg->name, ret);
6679         }
6680 out:
6681         return ret;
6682 }
6683
6684 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6685 {
6686         int ret = 0;
6687         struct device *dev = hba->dev;
6688         struct ufs_vreg_info *info = &hba->vreg_info;
6689
6690         if (!info)
6691                 goto out;
6692
6693         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6694         if (ret)
6695                 goto out;
6696
6697         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6698         if (ret)
6699                 goto out;
6700
6701         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
6702         if (ret)
6703                 goto out;
6704
6705 out:
6706         if (ret) {
6707                 ufshcd_toggle_vreg(dev, info->vccq2, false);
6708                 ufshcd_toggle_vreg(dev, info->vccq, false);
6709                 ufshcd_toggle_vreg(dev, info->vcc, false);
6710         }
6711         return ret;
6712 }
6713
6714 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
6715 {
6716         struct ufs_vreg_info *info = &hba->vreg_info;
6717
6718         if (info)
6719                 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6720
6721         return 0;
6722 }
6723
6724 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
6725 {
6726         int ret = 0;
6727
6728         if (!vreg)
6729                 goto out;
6730
6731         vreg->reg = devm_regulator_get(dev, vreg->name);
6732         if (IS_ERR(vreg->reg)) {
6733                 ret = PTR_ERR(vreg->reg);
6734                 dev_err(dev, "%s: %s get failed, err=%d\n",
6735                                 __func__, vreg->name, ret);
6736         }
6737 out:
6738         return ret;
6739 }
6740
6741 static int ufshcd_init_vreg(struct ufs_hba *hba)
6742 {
6743         int ret = 0;
6744         struct device *dev = hba->dev;
6745         struct ufs_vreg_info *info = &hba->vreg_info;
6746
6747         if (!info)
6748                 goto out;
6749
6750         ret = ufshcd_get_vreg(dev, info->vcc);
6751         if (ret)
6752                 goto out;
6753
6754         ret = ufshcd_get_vreg(dev, info->vccq);
6755         if (ret)
6756                 goto out;
6757
6758         ret = ufshcd_get_vreg(dev, info->vccq2);
6759 out:
6760         return ret;
6761 }
6762
6763 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
6764 {
6765         struct ufs_vreg_info *info = &hba->vreg_info;
6766
6767         if (info)
6768                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6769
6770         return 0;
6771 }
6772
6773 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
6774 {
6775         int ret = 0;
6776         struct ufs_vreg_info *info = &hba->vreg_info;
6777
6778         if (!info)
6779                 goto out;
6780         else if (!info->vccq)
6781                 goto out;
6782
6783         if (unused) {
6784                 /* shut off the rail here */
6785                 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
6786                 /*
6787                  * Mark this rail as no longer used, so it doesn't get enabled
6788                  * later by mistake
6789                  */
6790                 if (!ret)
6791                         info->vccq->unused = true;
6792         } else {
6793                 /*
6794                  * rail should have been already enabled hence just make sure
6795                  * that unused flag is cleared.
6796                  */
6797                 info->vccq->unused = false;
6798         }
6799 out:
6800         return ret;
6801 }
6802
6803 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6804                                         bool skip_ref_clk)
6805 {
6806         int ret = 0;
6807         struct ufs_clk_info *clki;
6808         struct list_head *head = &hba->clk_list_head;
6809         unsigned long flags;
6810         ktime_t start = ktime_get();
6811         bool clk_state_changed = false;
6812
6813         if (list_empty(head))
6814                 goto out;
6815
6816         /*
6817          * vendor specific setup_clocks ops may depend on clocks managed by
6818          * this standard driver hence call the vendor specific setup_clocks
6819          * before disabling the clocks managed here.
6820          */
6821         if (!on) {
6822                 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
6823                 if (ret)
6824                         return ret;
6825         }
6826
6827         list_for_each_entry(clki, head, list) {
6828                 if (!IS_ERR_OR_NULL(clki->clk)) {
6829                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
6830                                 continue;
6831
6832                         clk_state_changed = on ^ clki->enabled;
6833                         if (on && !clki->enabled) {
6834                                 ret = clk_prepare_enable(clki->clk);
6835                                 if (ret) {
6836                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
6837                                                 __func__, clki->name, ret);
6838                                         goto out;
6839                                 }
6840                         } else if (!on && clki->enabled) {
6841                                 clk_disable_unprepare(clki->clk);
6842                         }
6843                         clki->enabled = on;
6844                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
6845                                         clki->name, on ? "en" : "dis");
6846                 }
6847         }
6848
6849         /*
6850          * vendor specific setup_clocks ops may depend on clocks managed by
6851          * this standard driver hence call the vendor specific setup_clocks
6852          * after enabling the clocks managed here.
6853          */
6854         if (on) {
6855                 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
6856                 if (ret)
6857                         return ret;
6858         }
6859
6860 out:
6861         if (ret) {
6862                 list_for_each_entry(clki, head, list) {
6863                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
6864                                 clk_disable_unprepare(clki->clk);
6865                 }
6866         } else if (!ret && on) {
6867                 spin_lock_irqsave(hba->host->host_lock, flags);
6868                 hba->clk_gating.state = CLKS_ON;
6869                 trace_ufshcd_clk_gating(dev_name(hba->dev),
6870                                         hba->clk_gating.state);
6871                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6872         }
6873
6874         if (clk_state_changed)
6875                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
6876                         (on ? "on" : "off"),
6877                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
6878         return ret;
6879 }
6880
6881 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
6882 {
6883         return  __ufshcd_setup_clocks(hba, on, false);
6884 }
6885
6886 static int ufshcd_init_clocks(struct ufs_hba *hba)
6887 {
6888         int ret = 0;
6889         struct ufs_clk_info *clki;
6890         struct device *dev = hba->dev;
6891         struct list_head *head = &hba->clk_list_head;
6892
6893         if (list_empty(head))
6894                 goto out;
6895
6896         list_for_each_entry(clki, head, list) {
6897                 if (!clki->name)
6898                         continue;
6899
6900                 clki->clk = devm_clk_get(dev, clki->name);
6901                 if (IS_ERR(clki->clk)) {
6902                         ret = PTR_ERR(clki->clk);
6903                         dev_err(dev, "%s: %s clk get failed, %d\n",
6904                                         __func__, clki->name, ret);
6905                         goto out;
6906                 }
6907
6908                 if (clki->max_freq) {
6909                         ret = clk_set_rate(clki->clk, clki->max_freq);
6910                         if (ret) {
6911                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6912                                         __func__, clki->name,
6913                                         clki->max_freq, ret);
6914                                 goto out;
6915                         }
6916                         clki->curr_freq = clki->max_freq;
6917                 }
6918                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
6919                                 clki->name, clk_get_rate(clki->clk));
6920         }
6921 out:
6922         return ret;
6923 }
6924
6925 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
6926 {
6927         int err = 0;
6928
6929         if (!hba->vops)
6930                 goto out;
6931
6932         err = ufshcd_vops_init(hba);
6933         if (err)
6934                 goto out;
6935
6936         err = ufshcd_vops_setup_regulators(hba, true);
6937         if (err)
6938                 goto out_exit;
6939
6940         goto out;
6941
6942 out_exit:
6943         ufshcd_vops_exit(hba);
6944 out:
6945         if (err)
6946                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
6947                         __func__, ufshcd_get_var_name(hba), err);
6948         return err;
6949 }
6950
6951 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
6952 {
6953         if (!hba->vops)
6954                 return;
6955
6956         ufshcd_vops_setup_regulators(hba, false);
6957
6958         ufshcd_vops_exit(hba);
6959 }
6960
6961 static int ufshcd_hba_init(struct ufs_hba *hba)
6962 {
6963         int err;
6964
6965         /*
6966          * Handle host controller power separately from the UFS device power
6967          * rails as it will help controlling the UFS host controller power
6968          * collapse easily which is different than UFS device power collapse.
6969          * Also, enable the host controller power before we go ahead with rest
6970          * of the initialization here.
6971          */
6972         err = ufshcd_init_hba_vreg(hba);
6973         if (err)
6974                 goto out;
6975
6976         err = ufshcd_setup_hba_vreg(hba, true);
6977         if (err)
6978                 goto out;
6979
6980         err = ufshcd_init_clocks(hba);
6981         if (err)
6982                 goto out_disable_hba_vreg;
6983
6984         err = ufshcd_setup_clocks(hba, true);
6985         if (err)
6986                 goto out_disable_hba_vreg;
6987
6988         err = ufshcd_init_vreg(hba);
6989         if (err)
6990                 goto out_disable_clks;
6991
6992         err = ufshcd_setup_vreg(hba, true);
6993         if (err)
6994                 goto out_disable_clks;
6995
6996         err = ufshcd_variant_hba_init(hba);
6997         if (err)
6998                 goto out_disable_vreg;
6999
7000         hba->is_powered = true;
7001         goto out;
7002
7003 out_disable_vreg:
7004         ufshcd_setup_vreg(hba, false);
7005 out_disable_clks:
7006         ufshcd_setup_clocks(hba, false);
7007 out_disable_hba_vreg:
7008         ufshcd_setup_hba_vreg(hba, false);
7009 out:
7010         return err;
7011 }
7012
7013 static void ufshcd_hba_exit(struct ufs_hba *hba)
7014 {
7015         if (hba->is_powered) {
7016                 ufshcd_variant_hba_exit(hba);
7017                 ufshcd_setup_vreg(hba, false);
7018                 ufshcd_suspend_clkscaling(hba);
7019                 if (ufshcd_is_clkscaling_supported(hba)) {
7020                         if (hba->devfreq)
7021                                 ufshcd_suspend_clkscaling(hba);
7022                         destroy_workqueue(hba->clk_scaling.workq);
7023                 }
7024                 ufshcd_setup_clocks(hba, false);
7025                 ufshcd_setup_hba_vreg(hba, false);
7026                 hba->is_powered = false;
7027         }
7028 }
7029
7030 static int
7031 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7032 {
7033         unsigned char cmd[6] = {REQUEST_SENSE,
7034                                 0,
7035                                 0,
7036                                 0,
7037                                 UFSHCD_REQ_SENSE_SIZE,
7038                                 0};
7039         char *buffer;
7040         int ret;
7041
7042         buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7043         if (!buffer) {
7044                 ret = -ENOMEM;
7045                 goto out;
7046         }
7047
7048         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7049                         UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7050                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7051         if (ret)
7052                 pr_err("%s: failed with err %d\n", __func__, ret);
7053
7054         kfree(buffer);
7055 out:
7056         return ret;
7057 }
7058
7059 /**
7060  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7061  *                           power mode
7062  * @hba: per adapter instance
7063  * @pwr_mode: device power mode to set
7064  *
7065  * Returns 0 if requested power mode is set successfully
7066  * Returns non-zero if failed to set the requested power mode
7067  */
7068 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7069                                      enum ufs_dev_pwr_mode pwr_mode)
7070 {
7071         unsigned char cmd[6] = { START_STOP };
7072         struct scsi_sense_hdr sshdr;
7073         struct scsi_device *sdp;
7074         unsigned long flags;
7075         int ret;
7076
7077         spin_lock_irqsave(hba->host->host_lock, flags);
7078         sdp = hba->sdev_ufs_device;
7079         if (sdp) {
7080                 ret = scsi_device_get(sdp);
7081                 if (!ret && !scsi_device_online(sdp)) {
7082                         ret = -ENODEV;
7083                         scsi_device_put(sdp);
7084                 }
7085         } else {
7086                 ret = -ENODEV;
7087         }
7088         spin_unlock_irqrestore(hba->host->host_lock, flags);
7089
7090         if (ret)
7091                 return ret;
7092
7093         /*
7094          * If scsi commands fail, the scsi mid-layer schedules scsi error-
7095          * handling, which would wait for host to be resumed. Since we know
7096          * we are functional while we are here, skip host resume in error
7097          * handling context.
7098          */
7099         hba->host->eh_noresume = 1;
7100         if (hba->wlun_dev_clr_ua) {
7101                 ret = ufshcd_send_request_sense(hba, sdp);
7102                 if (ret)
7103                         goto out;
7104                 /* Unit attention condition is cleared now */
7105                 hba->wlun_dev_clr_ua = false;
7106         }
7107
7108         cmd[4] = pwr_mode << 4;
7109
7110         /*
7111          * Current function would be generally called from the power management
7112          * callbacks hence set the RQF_PM flag so that it doesn't resume the
7113          * already suspended childs.
7114          */
7115         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7116                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7117         if (ret) {
7118                 sdev_printk(KERN_WARNING, sdp,
7119                             "START_STOP failed for power mode: %d, result %x\n",
7120                             pwr_mode, ret);
7121                 if (driver_byte(ret) & DRIVER_SENSE)
7122                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
7123         }
7124
7125         if (!ret)
7126                 hba->curr_dev_pwr_mode = pwr_mode;
7127 out:
7128         scsi_device_put(sdp);
7129         hba->host->eh_noresume = 0;
7130         return ret;
7131 }
7132
7133 static int ufshcd_link_state_transition(struct ufs_hba *hba,
7134                                         enum uic_link_state req_link_state,
7135                                         int check_for_bkops)
7136 {
7137         int ret = 0;
7138
7139         if (req_link_state == hba->uic_link_state)
7140                 return 0;
7141
7142         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7143                 ret = ufshcd_uic_hibern8_enter(hba);
7144                 if (!ret)
7145                         ufshcd_set_link_hibern8(hba);
7146                 else
7147                         goto out;
7148         }
7149         /*
7150          * If autobkops is enabled, link can't be turned off because
7151          * turning off the link would also turn off the device.
7152          */
7153         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7154                    (!check_for_bkops || (check_for_bkops &&
7155                     !hba->auto_bkops_enabled))) {
7156                 /*
7157                  * Let's make sure that link is in low power mode, we are doing
7158                  * this currently by putting the link in Hibern8. Otherway to
7159                  * put the link in low power mode is to send the DME end point
7160                  * to device and then send the DME reset command to local
7161                  * unipro. But putting the link in hibern8 is much faster.
7162                  */
7163                 ret = ufshcd_uic_hibern8_enter(hba);
7164                 if (ret)
7165                         goto out;
7166                 /*
7167                  * Change controller state to "reset state" which
7168                  * should also put the link in off/reset state
7169                  */
7170                 ufshcd_hba_stop(hba, true);
7171                 /*
7172                  * TODO: Check if we need any delay to make sure that
7173                  * controller is reset
7174                  */
7175                 ufshcd_set_link_off(hba);
7176         }
7177
7178 out:
7179         return ret;
7180 }
7181
7182 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7183 {
7184         /*
7185          * It seems some UFS devices may keep drawing more than sleep current
7186          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7187          * To avoid this situation, add 2ms delay before putting these UFS
7188          * rails in LPM mode.
7189          */
7190         if (!ufshcd_is_link_active(hba) &&
7191             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7192                 usleep_range(2000, 2100);
7193
7194         /*
7195          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7196          * power.
7197          *
7198          * If UFS device and link is in OFF state, all power supplies (VCC,
7199          * VCCQ, VCCQ2) can be turned off if power on write protect is not
7200          * required. If UFS link is inactive (Hibern8 or OFF state) and device
7201          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7202          *
7203          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7204          * in low power state which would save some power.
7205          */
7206         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7207             !hba->dev_info.is_lu_power_on_wp) {
7208                 ufshcd_setup_vreg(hba, false);
7209         } else if (!ufshcd_is_ufs_dev_active(hba)) {
7210                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7211                 if (!ufshcd_is_link_active(hba)) {
7212                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7213                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7214                 }
7215         }
7216 }
7217
7218 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7219 {
7220         int ret = 0;
7221
7222         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7223             !hba->dev_info.is_lu_power_on_wp) {
7224                 ret = ufshcd_setup_vreg(hba, true);
7225         } else if (!ufshcd_is_ufs_dev_active(hba)) {
7226                 if (!ret && !ufshcd_is_link_active(hba)) {
7227                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7228                         if (ret)
7229                                 goto vcc_disable;
7230                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7231                         if (ret)
7232                                 goto vccq_lpm;
7233                 }
7234                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7235         }
7236         goto out;
7237
7238 vccq_lpm:
7239         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7240 vcc_disable:
7241         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7242 out:
7243         return ret;
7244 }
7245
7246 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7247 {
7248         if (ufshcd_is_link_off(hba))
7249                 ufshcd_setup_hba_vreg(hba, false);
7250 }
7251
7252 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7253 {
7254         if (ufshcd_is_link_off(hba))
7255                 ufshcd_setup_hba_vreg(hba, true);
7256 }
7257
7258 /**
7259  * ufshcd_suspend - helper function for suspend operations
7260  * @hba: per adapter instance
7261  * @pm_op: desired low power operation type
7262  *
7263  * This function will try to put the UFS device and link into low power
7264  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7265  * (System PM level).
7266  *
7267  * If this function is called during shutdown, it will make sure that
7268  * both UFS device and UFS link is powered off.
7269  *
7270  * NOTE: UFS device & link must be active before we enter in this function.
7271  *
7272  * Returns 0 for success and non-zero for failure
7273  */
7274 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7275 {
7276         int ret = 0;
7277         enum ufs_pm_level pm_lvl;
7278         enum ufs_dev_pwr_mode req_dev_pwr_mode;
7279         enum uic_link_state req_link_state;
7280
7281         hba->pm_op_in_progress = 1;
7282         if (!ufshcd_is_shutdown_pm(pm_op)) {
7283                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7284                          hba->rpm_lvl : hba->spm_lvl;
7285                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7286                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7287         } else {
7288                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7289                 req_link_state = UIC_LINK_OFF_STATE;
7290         }
7291
7292         /*
7293          * If we can't transition into any of the low power modes
7294          * just gate the clocks.
7295          */
7296         ufshcd_hold(hba, false);
7297         hba->clk_gating.is_suspended = true;
7298
7299         if (hba->clk_scaling.is_allowed) {
7300                 cancel_work_sync(&hba->clk_scaling.suspend_work);
7301                 cancel_work_sync(&hba->clk_scaling.resume_work);
7302                 ufshcd_suspend_clkscaling(hba);
7303         }
7304
7305         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7306                         req_link_state == UIC_LINK_ACTIVE_STATE) {
7307                 goto disable_clks;
7308         }
7309
7310         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7311             (req_link_state == hba->uic_link_state))
7312                 goto enable_gating;
7313
7314         /* UFS device & link must be active before we enter in this function */
7315         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7316                 ret = -EINVAL;
7317                 goto enable_gating;
7318         }
7319
7320         if (ufshcd_is_runtime_pm(pm_op)) {
7321                 if (ufshcd_can_autobkops_during_suspend(hba)) {
7322                         /*
7323                          * The device is idle with no requests in the queue,
7324                          * allow background operations if bkops status shows
7325                          * that performance might be impacted.
7326                          */
7327                         ret = ufshcd_urgent_bkops(hba);
7328                         if (ret)
7329                                 goto enable_gating;
7330                 } else {
7331                         /* make sure that auto bkops is disabled */
7332                         ufshcd_disable_auto_bkops(hba);
7333                 }
7334         }
7335
7336         if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7337              ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7338                !ufshcd_is_runtime_pm(pm_op))) {
7339                 /* ensure that bkops is disabled */
7340                 ufshcd_disable_auto_bkops(hba);
7341                 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7342                 if (ret)
7343                         goto enable_gating;
7344         }
7345
7346         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7347         if (ret)
7348                 goto set_dev_active;
7349
7350         ufshcd_vreg_set_lpm(hba);
7351
7352 disable_clks:
7353         /*
7354          * Call vendor specific suspend callback. As these callbacks may access
7355          * vendor specific host controller register space call them before the
7356          * host clocks are ON.
7357          */
7358         ret = ufshcd_vops_suspend(hba, pm_op);
7359         if (ret)
7360                 goto set_link_active;
7361
7362         if (!ufshcd_is_link_active(hba))
7363                 ufshcd_setup_clocks(hba, false);
7364         else
7365                 /* If link is active, device ref_clk can't be switched off */
7366                 __ufshcd_setup_clocks(hba, false, true);
7367
7368         hba->clk_gating.state = CLKS_OFF;
7369         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7370         /*
7371          * Disable the host irq as host controller as there won't be any
7372          * host controller transaction expected till resume.
7373          */
7374         ufshcd_disable_irq(hba);
7375         /* Put the host controller in low power mode if possible */
7376         ufshcd_hba_vreg_set_lpm(hba);
7377         goto out;
7378
7379 set_link_active:
7380         if (hba->clk_scaling.is_allowed)
7381                 ufshcd_resume_clkscaling(hba);
7382         ufshcd_vreg_set_hpm(hba);
7383         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7384                 ufshcd_set_link_active(hba);
7385         else if (ufshcd_is_link_off(hba))
7386                 ufshcd_host_reset_and_restore(hba);
7387 set_dev_active:
7388         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7389                 ufshcd_disable_auto_bkops(hba);
7390 enable_gating:
7391         if (hba->clk_scaling.is_allowed)
7392                 ufshcd_resume_clkscaling(hba);
7393         hba->clk_gating.is_suspended = false;
7394         ufshcd_release(hba);
7395 out:
7396         hba->pm_op_in_progress = 0;
7397         return ret;
7398 }
7399
7400 /**
7401  * ufshcd_resume - helper function for resume operations
7402  * @hba: per adapter instance
7403  * @pm_op: runtime PM or system PM
7404  *
7405  * This function basically brings the UFS device, UniPro link and controller
7406  * to active state.
7407  *
7408  * Returns 0 for success and non-zero for failure
7409  */
7410 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7411 {
7412         int ret;
7413         enum uic_link_state old_link_state;
7414
7415         hba->pm_op_in_progress = 1;
7416         old_link_state = hba->uic_link_state;
7417
7418         ufshcd_hba_vreg_set_hpm(hba);
7419         /* Make sure clocks are enabled before accessing controller */
7420         ret = ufshcd_setup_clocks(hba, true);
7421         if (ret)
7422                 goto out;
7423
7424         /* enable the host irq as host controller would be active soon */
7425         ret = ufshcd_enable_irq(hba);
7426         if (ret)
7427                 goto disable_irq_and_vops_clks;
7428
7429         ret = ufshcd_vreg_set_hpm(hba);
7430         if (ret)
7431                 goto disable_irq_and_vops_clks;
7432
7433         /*
7434          * Call vendor specific resume callback. As these callbacks may access
7435          * vendor specific host controller register space call them when the
7436          * host clocks are ON.
7437          */
7438         ret = ufshcd_vops_resume(hba, pm_op);
7439         if (ret)
7440                 goto disable_vreg;
7441
7442         if (ufshcd_is_link_hibern8(hba)) {
7443                 ret = ufshcd_uic_hibern8_exit(hba);
7444                 if (!ret)
7445                         ufshcd_set_link_active(hba);
7446                 else
7447                         goto vendor_suspend;
7448         } else if (ufshcd_is_link_off(hba)) {
7449                 ret = ufshcd_host_reset_and_restore(hba);
7450                 /*
7451                  * ufshcd_host_reset_and_restore() should have already
7452                  * set the link state as active
7453                  */
7454                 if (ret || !ufshcd_is_link_active(hba))
7455                         goto vendor_suspend;
7456         }
7457
7458         if (!ufshcd_is_ufs_dev_active(hba)) {
7459                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7460                 if (ret)
7461                         goto set_old_link_state;
7462         }
7463
7464         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7465                 ufshcd_enable_auto_bkops(hba);
7466         else
7467                 /*
7468                  * If BKOPs operations are urgently needed at this moment then
7469                  * keep auto-bkops enabled or else disable it.
7470                  */
7471                 ufshcd_urgent_bkops(hba);
7472
7473         hba->clk_gating.is_suspended = false;
7474
7475         if (hba->clk_scaling.is_allowed)
7476                 ufshcd_resume_clkscaling(hba);
7477
7478         /* Schedule clock gating in case of no access to UFS device yet */
7479         ufshcd_release(hba);
7480         goto out;
7481
7482 set_old_link_state:
7483         ufshcd_link_state_transition(hba, old_link_state, 0);
7484 vendor_suspend:
7485         ufshcd_vops_suspend(hba, pm_op);
7486 disable_vreg:
7487         ufshcd_vreg_set_lpm(hba);
7488 disable_irq_and_vops_clks:
7489         ufshcd_disable_irq(hba);
7490         if (hba->clk_scaling.is_allowed)
7491                 ufshcd_suspend_clkscaling(hba);
7492         ufshcd_setup_clocks(hba, false);
7493 out:
7494         hba->pm_op_in_progress = 0;
7495         return ret;
7496 }
7497
7498 /**
7499  * ufshcd_system_suspend - system suspend routine
7500  * @hba: per adapter instance
7501  * @pm_op: runtime PM or system PM
7502  *
7503  * Check the description of ufshcd_suspend() function for more details.
7504  *
7505  * Returns 0 for success and non-zero for failure
7506  */
7507 int ufshcd_system_suspend(struct ufs_hba *hba)
7508 {
7509         int ret = 0;
7510         ktime_t start = ktime_get();
7511
7512         if (!hba || !hba->is_powered)
7513                 return 0;
7514
7515         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7516              hba->curr_dev_pwr_mode) &&
7517             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7518              hba->uic_link_state))
7519                 goto out;
7520
7521         if (pm_runtime_suspended(hba->dev)) {
7522                 /*
7523                  * UFS device and/or UFS link low power states during runtime
7524                  * suspend seems to be different than what is expected during
7525                  * system suspend. Hence runtime resume the devic & link and
7526                  * let the system suspend low power states to take effect.
7527                  * TODO: If resume takes longer time, we might have optimize
7528                  * it in future by not resuming everything if possible.
7529                  */
7530                 ret = ufshcd_runtime_resume(hba);
7531                 if (ret)
7532                         goto out;
7533         }
7534
7535         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7536 out:
7537         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7538                 ktime_to_us(ktime_sub(ktime_get(), start)),
7539                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7540         if (!ret)
7541                 hba->is_sys_suspended = true;
7542         return ret;
7543 }
7544 EXPORT_SYMBOL(ufshcd_system_suspend);
7545
7546 /**
7547  * ufshcd_system_resume - system resume routine
7548  * @hba: per adapter instance
7549  *
7550  * Returns 0 for success and non-zero for failure
7551  */
7552
7553 int ufshcd_system_resume(struct ufs_hba *hba)
7554 {
7555         int ret = 0;
7556         ktime_t start = ktime_get();
7557
7558         if (!hba)
7559                 return -EINVAL;
7560
7561         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7562                 /*
7563                  * Let the runtime resume take care of resuming
7564                  * if runtime suspended.
7565                  */
7566                 goto out;
7567         else
7568                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7569 out:
7570         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7571                 ktime_to_us(ktime_sub(ktime_get(), start)),
7572                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7573         if (!ret)
7574                 hba->is_sys_suspended = false;
7575         return ret;
7576 }
7577 EXPORT_SYMBOL(ufshcd_system_resume);
7578
7579 /**
7580  * ufshcd_runtime_suspend - runtime suspend routine
7581  * @hba: per adapter instance
7582  *
7583  * Check the description of ufshcd_suspend() function for more details.
7584  *
7585  * Returns 0 for success and non-zero for failure
7586  */
7587 int ufshcd_runtime_suspend(struct ufs_hba *hba)
7588 {
7589         int ret = 0;
7590         ktime_t start = ktime_get();
7591
7592         if (!hba)
7593                 return -EINVAL;
7594
7595         if (!hba->is_powered)
7596                 goto out;
7597         else
7598                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7599 out:
7600         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7601                 ktime_to_us(ktime_sub(ktime_get(), start)),
7602                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7603         return ret;
7604 }
7605 EXPORT_SYMBOL(ufshcd_runtime_suspend);
7606
7607 /**
7608  * ufshcd_runtime_resume - runtime resume routine
7609  * @hba: per adapter instance
7610  *
7611  * This function basically brings the UFS device, UniPro link and controller
7612  * to active state. Following operations are done in this function:
7613  *
7614  * 1. Turn on all the controller related clocks
7615  * 2. Bring the UniPro link out of Hibernate state
7616  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
7617  *    to active state.
7618  * 4. If auto-bkops is enabled on the device, disable it.
7619  *
7620  * So following would be the possible power state after this function return
7621  * successfully:
7622  *      S1: UFS device in Active state with VCC rail ON
7623  *          UniPro link in Active state
7624  *          All the UFS/UniPro controller clocks are ON
7625  *
7626  * Returns 0 for success and non-zero for failure
7627  */
7628 int ufshcd_runtime_resume(struct ufs_hba *hba)
7629 {
7630         int ret = 0;
7631         ktime_t start = ktime_get();
7632
7633         if (!hba)
7634                 return -EINVAL;
7635
7636         if (!hba->is_powered)
7637                 goto out;
7638         else
7639                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7640 out:
7641         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7642                 ktime_to_us(ktime_sub(ktime_get(), start)),
7643                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7644         return ret;
7645 }
7646 EXPORT_SYMBOL(ufshcd_runtime_resume);
7647
7648 int ufshcd_runtime_idle(struct ufs_hba *hba)
7649 {
7650         return 0;
7651 }
7652 EXPORT_SYMBOL(ufshcd_runtime_idle);
7653
7654 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
7655                                            struct device_attribute *attr,
7656                                            const char *buf, size_t count,
7657                                            bool rpm)
7658 {
7659         struct ufs_hba *hba = dev_get_drvdata(dev);
7660         unsigned long flags, value;
7661
7662         if (kstrtoul(buf, 0, &value))
7663                 return -EINVAL;
7664
7665         if (value >= UFS_PM_LVL_MAX)
7666                 return -EINVAL;
7667
7668         spin_lock_irqsave(hba->host->host_lock, flags);
7669         if (rpm)
7670                 hba->rpm_lvl = value;
7671         else
7672                 hba->spm_lvl = value;
7673         spin_unlock_irqrestore(hba->host->host_lock, flags);
7674         return count;
7675 }
7676
7677 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
7678                 struct device_attribute *attr, char *buf)
7679 {
7680         struct ufs_hba *hba = dev_get_drvdata(dev);
7681         int curr_len;
7682         u8 lvl;
7683
7684         curr_len = snprintf(buf, PAGE_SIZE,
7685                             "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
7686                             hba->rpm_lvl,
7687                             ufschd_ufs_dev_pwr_mode_to_string(
7688                                 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
7689                             ufschd_uic_link_state_to_string(
7690                                 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
7691
7692         curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7693                              "\nAll available Runtime PM levels info:\n");
7694         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7695                 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7696                                      "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
7697                                     lvl,
7698                                     ufschd_ufs_dev_pwr_mode_to_string(
7699                                         ufs_pm_lvl_states[lvl].dev_state),
7700                                     ufschd_uic_link_state_to_string(
7701                                         ufs_pm_lvl_states[lvl].link_state));
7702
7703         return curr_len;
7704 }
7705
7706 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
7707                 struct device_attribute *attr, const char *buf, size_t count)
7708 {
7709         return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
7710 }
7711
7712 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
7713 {
7714         hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
7715         hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
7716         sysfs_attr_init(&hba->rpm_lvl_attr.attr);
7717         hba->rpm_lvl_attr.attr.name = "rpm_lvl";
7718         hba->rpm_lvl_attr.attr.mode = 0644;
7719         if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
7720                 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
7721 }
7722
7723 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
7724                 struct device_attribute *attr, char *buf)
7725 {
7726         struct ufs_hba *hba = dev_get_drvdata(dev);
7727         int curr_len;
7728         u8 lvl;
7729
7730         curr_len = snprintf(buf, PAGE_SIZE,
7731                             "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
7732                             hba->spm_lvl,
7733                             ufschd_ufs_dev_pwr_mode_to_string(
7734                                 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
7735                             ufschd_uic_link_state_to_string(
7736                                 ufs_pm_lvl_states[hba->spm_lvl].link_state));
7737
7738         curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7739                              "\nAll available System PM levels info:\n");
7740         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7741                 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7742                                      "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
7743                                     lvl,
7744                                     ufschd_ufs_dev_pwr_mode_to_string(
7745                                         ufs_pm_lvl_states[lvl].dev_state),
7746                                     ufschd_uic_link_state_to_string(
7747                                         ufs_pm_lvl_states[lvl].link_state));
7748
7749         return curr_len;
7750 }
7751
7752 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
7753                 struct device_attribute *attr, const char *buf, size_t count)
7754 {
7755         return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
7756 }
7757
7758 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
7759 {
7760         hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
7761         hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
7762         sysfs_attr_init(&hba->spm_lvl_attr.attr);
7763         hba->spm_lvl_attr.attr.name = "spm_lvl";
7764         hba->spm_lvl_attr.attr.mode = 0644;
7765         if (device_create_file(hba->dev, &hba->spm_lvl_attr))
7766                 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
7767 }
7768
7769 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
7770 {
7771         ufshcd_add_rpm_lvl_sysfs_nodes(hba);
7772         ufshcd_add_spm_lvl_sysfs_nodes(hba);
7773 }
7774
7775 static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
7776 {
7777         device_remove_file(hba->dev, &hba->rpm_lvl_attr);
7778         device_remove_file(hba->dev, &hba->spm_lvl_attr);
7779 }
7780
7781 /**
7782  * ufshcd_shutdown - shutdown routine
7783  * @hba: per adapter instance
7784  *
7785  * This function would power off both UFS device and UFS link.
7786  *
7787  * Returns 0 always to allow force shutdown even in case of errors.
7788  */
7789 int ufshcd_shutdown(struct ufs_hba *hba)
7790 {
7791         int ret = 0;
7792
7793         if (!hba->is_powered)
7794                 goto out;
7795
7796         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7797                 goto out;
7798
7799         pm_runtime_get_sync(hba->dev);
7800
7801         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7802 out:
7803         if (ret)
7804                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7805         /* allow force shutdown even in case of errors */
7806         return 0;
7807 }
7808 EXPORT_SYMBOL(ufshcd_shutdown);
7809
7810 /**
7811  * ufshcd_remove - de-allocate SCSI host and host memory space
7812  *              data structure memory
7813  * @hba - per adapter instance
7814  */
7815 void ufshcd_remove(struct ufs_hba *hba)
7816 {
7817         ufshcd_remove_sysfs_nodes(hba);
7818         scsi_remove_host(hba->host);
7819         /* disable interrupts */
7820         ufshcd_disable_intr(hba, hba->intr_mask);
7821         ufshcd_hba_stop(hba, true);
7822
7823         ufshcd_exit_clk_gating(hba);
7824         if (ufshcd_is_clkscaling_supported(hba))
7825                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
7826         ufshcd_hba_exit(hba);
7827 }
7828 EXPORT_SYMBOL_GPL(ufshcd_remove);
7829
7830 /**
7831  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
7832  * @hba: pointer to Host Bus Adapter (HBA)
7833  */
7834 void ufshcd_dealloc_host(struct ufs_hba *hba)
7835 {
7836         scsi_host_put(hba->host);
7837 }
7838 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
7839
7840 /**
7841  * ufshcd_set_dma_mask - Set dma mask based on the controller
7842  *                       addressing capability
7843  * @hba: per adapter instance
7844  *
7845  * Returns 0 for success, non-zero for failure
7846  */
7847 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7848 {
7849         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7850                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7851                         return 0;
7852         }
7853         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7854 }
7855
7856 /**
7857  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
7858  * @dev: pointer to device handle
7859  * @hba_handle: driver private handle
7860  * Returns 0 on success, non-zero value on failure
7861  */
7862 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7863 {
7864         struct Scsi_Host *host;
7865         struct ufs_hba *hba;
7866         int err = 0;
7867
7868         if (!dev) {
7869                 dev_err(dev,
7870                 "Invalid memory reference for dev is NULL\n");
7871                 err = -ENODEV;
7872                 goto out_error;
7873         }
7874
7875         host = scsi_host_alloc(&ufshcd_driver_template,
7876                                 sizeof(struct ufs_hba));
7877         if (!host) {
7878                 dev_err(dev, "scsi_host_alloc failed\n");
7879                 err = -ENOMEM;
7880                 goto out_error;
7881         }
7882         hba = shost_priv(host);
7883         hba->host = host;
7884         hba->dev = dev;
7885         *hba_handle = hba;
7886
7887         INIT_LIST_HEAD(&hba->clk_list_head);
7888
7889 out_error:
7890         return err;
7891 }
7892 EXPORT_SYMBOL(ufshcd_alloc_host);
7893
7894 /**
7895  * ufshcd_init - Driver initialization routine
7896  * @hba: per-adapter instance
7897  * @mmio_base: base register address
7898  * @irq: Interrupt line of device
7899  * Returns 0 on success, non-zero value on failure
7900  */
7901 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7902 {
7903         int err;
7904         struct Scsi_Host *host = hba->host;
7905         struct device *dev = hba->dev;
7906
7907         if (!mmio_base) {
7908                 dev_err(hba->dev,
7909                 "Invalid memory reference for mmio_base is NULL\n");
7910                 err = -ENODEV;
7911                 goto out_error;
7912         }
7913
7914         hba->mmio_base = mmio_base;
7915         hba->irq = irq;
7916
7917         /* Set descriptor lengths to specification defaults */
7918         ufshcd_def_desc_sizes(hba);
7919
7920         err = ufshcd_hba_init(hba);
7921         if (err)
7922                 goto out_error;
7923
7924         /* Read capabilities registers */
7925         ufshcd_hba_capabilities(hba);
7926
7927         /* Get UFS version supported by the controller */
7928         hba->ufs_version = ufshcd_get_ufs_version(hba);
7929
7930         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7931             (hba->ufs_version != UFSHCI_VERSION_11) &&
7932             (hba->ufs_version != UFSHCI_VERSION_20) &&
7933             (hba->ufs_version != UFSHCI_VERSION_21))
7934                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
7935                         hba->ufs_version);
7936
7937         /* Get Interrupt bit mask per version */
7938         hba->intr_mask = ufshcd_get_intr_mask(hba);
7939
7940         err = ufshcd_set_dma_mask(hba);
7941         if (err) {
7942                 dev_err(hba->dev, "set dma mask failed\n");
7943                 goto out_disable;
7944         }
7945
7946         /* Allocate memory for host memory space */
7947         err = ufshcd_memory_alloc(hba);
7948         if (err) {
7949                 dev_err(hba->dev, "Memory allocation failed\n");
7950                 goto out_disable;
7951         }
7952
7953         /* Configure LRB */
7954         ufshcd_host_memory_configure(hba);
7955
7956         host->can_queue = hba->nutrs;
7957         host->cmd_per_lun = hba->nutrs;
7958         host->max_id = UFSHCD_MAX_ID;
7959         host->max_lun = UFS_MAX_LUNS;
7960         host->max_channel = UFSHCD_MAX_CHANNEL;
7961         host->unique_id = host->host_no;
7962         host->max_cmd_len = MAX_CDB_SIZE;
7963
7964         hba->max_pwr_info.is_valid = false;
7965
7966         /* Initailize wait queue for task management */
7967         init_waitqueue_head(&hba->tm_wq);
7968         init_waitqueue_head(&hba->tm_tag_wq);
7969
7970         /* Initialize work queues */
7971         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
7972         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7973
7974         /* Initialize UIC command mutex */
7975         mutex_init(&hba->uic_cmd_mutex);
7976
7977         /* Initialize mutex for device management commands */
7978         mutex_init(&hba->dev_cmd.lock);
7979
7980         init_rwsem(&hba->clk_scaling_lock);
7981
7982         /* Initialize device management tag acquire wait queue */
7983         init_waitqueue_head(&hba->dev_cmd.tag_wq);
7984
7985         ufshcd_init_clk_gating(hba);
7986
7987         /*
7988          * In order to avoid any spurious interrupt immediately after
7989          * registering UFS controller interrupt handler, clear any pending UFS
7990          * interrupt status and disable all the UFS interrupts.
7991          */
7992         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
7993                       REG_INTERRUPT_STATUS);
7994         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
7995         /*
7996          * Make sure that UFS interrupts are disabled and any pending interrupt
7997          * status is cleared before registering UFS interrupt handler.
7998          */
7999         mb();
8000
8001         /* IRQ registration */
8002         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8003         if (err) {
8004                 dev_err(hba->dev, "request irq failed\n");
8005                 goto exit_gating;
8006         } else {
8007                 hba->is_irq_enabled = true;
8008         }
8009
8010         err = scsi_add_host(host, hba->dev);
8011         if (err) {
8012                 dev_err(hba->dev, "scsi_add_host failed\n");
8013                 goto exit_gating;
8014         }
8015
8016         /* Host controller enable */
8017         err = ufshcd_hba_enable(hba);
8018         if (err) {
8019                 dev_err(hba->dev, "Host controller enable failed\n");
8020                 ufshcd_print_host_regs(hba);
8021                 ufshcd_print_host_state(hba);
8022                 goto out_remove_scsi_host;
8023         }
8024
8025         if (ufshcd_is_clkscaling_supported(hba)) {
8026                 char wq_name[sizeof("ufs_clkscaling_00")];
8027
8028                 INIT_WORK(&hba->clk_scaling.suspend_work,
8029                           ufshcd_clk_scaling_suspend_work);
8030                 INIT_WORK(&hba->clk_scaling.resume_work,
8031                           ufshcd_clk_scaling_resume_work);
8032
8033                 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
8034                          host->host_no);
8035                 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
8036
8037                 ufshcd_clkscaling_init_sysfs(hba);
8038         }
8039
8040         /*
8041          * Set the default power management level for runtime and system PM.
8042          * Default power saving mode is to keep UFS link in Hibern8 state
8043          * and UFS device in sleep state.
8044          */
8045         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8046                                                 UFS_SLEEP_PWR_MODE,
8047                                                 UIC_LINK_HIBERN8_STATE);
8048         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8049                                                 UFS_SLEEP_PWR_MODE,
8050                                                 UIC_LINK_HIBERN8_STATE);
8051
8052         /* Hold auto suspend until async scan completes */
8053         pm_runtime_get_sync(dev);
8054
8055         /*
8056          * We are assuming that device wasn't put in sleep/power-down
8057          * state exclusively during the boot stage before kernel.
8058          * This assumption helps avoid doing link startup twice during
8059          * ufshcd_probe_hba().
8060          */
8061         ufshcd_set_ufs_dev_active(hba);
8062
8063         async_schedule(ufshcd_async_scan, hba);
8064         ufshcd_add_sysfs_nodes(hba);
8065
8066         return 0;
8067
8068 out_remove_scsi_host:
8069         scsi_remove_host(hba->host);
8070 exit_gating:
8071         ufshcd_exit_clk_gating(hba);
8072 out_disable:
8073         hba->is_irq_enabled = false;
8074         ufshcd_hba_exit(hba);
8075 out_error:
8076         return err;
8077 }
8078 EXPORT_SYMBOL_GPL(ufshcd_init);
8079
8080 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8081 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8082 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8083 MODULE_LICENSE("GPL");
8084 MODULE_VERSION(UFSHCD_DRIVER_VERSION);