Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include "ufshcd.h"
20 #include "ufs_quirks.h"
21 #include "unipro.h"
22 #include "ufs-sysfs.h"
23 #include "ufs_bsg.h"
24 #include "ufshcd-crypto.h"
25 #include <asm/unaligned.h>
26 #include <linux/blkdev.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/ufs.h>
30
31 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
32                                  UTP_TASK_REQ_COMPL |\
33                                  UFSHCD_ERROR_MASK)
34 /* UIC command timeout, unit: ms */
35 #define UIC_CMD_TIMEOUT 500
36
37 /* NOP OUT retries waiting for NOP IN response */
38 #define NOP_OUT_RETRIES    10
39 /* Timeout after 50 msecs if NOP OUT hangs without response */
40 #define NOP_OUT_TIMEOUT    50 /* msecs */
41
42 /* Query request retries */
43 #define QUERY_REQ_RETRIES 3
44 /* Query request timeout */
45 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
46
47 /* Task management command timeout */
48 #define TM_CMD_TIMEOUT  100 /* msecs */
49
50 /* maximum number of retries for a general UIC command  */
51 #define UFS_UIC_COMMAND_RETRIES 3
52
53 /* maximum number of link-startup retries */
54 #define DME_LINKSTARTUP_RETRIES 3
55
56 /* Maximum retries for Hibern8 enter */
57 #define UIC_HIBERN8_ENTER_RETRIES 3
58
59 /* maximum number of reset retries before giving up */
60 #define MAX_HOST_RESET_RETRIES 5
61
62 /* Expose the flag value from utp_upiu_query.value */
63 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
64
65 /* Interrupt aggregation default timeout, unit: 40us */
66 #define INT_AGGR_DEF_TO 0x02
67
68 /* default delay of autosuspend: 2000 ms */
69 #define RPM_AUTOSUSPEND_DELAY_MS 2000
70
71 /* Default delay of RPM device flush delayed work */
72 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
73
74 /* Default value of wait time before gating device ref clock */
75 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
76
77 /* Polling time to wait for fDeviceInit */
78 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
79
80 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
81         ({                                                              \
82                 int _ret;                                               \
83                 if (_on)                                                \
84                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
85                 else                                                    \
86                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
87                 _ret;                                                   \
88         })
89
90 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
91         size_t __len = (len);                                            \
92         print_hex_dump(KERN_ERR, prefix_str,                             \
93                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
94                        16, 4, buf, __len, false);                        \
95 } while (0)
96
97 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
98                      const char *prefix)
99 {
100         u32 *regs;
101         size_t pos;
102
103         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
104                 return -EINVAL;
105
106         regs = kzalloc(len, GFP_ATOMIC);
107         if (!regs)
108                 return -ENOMEM;
109
110         for (pos = 0; pos < len; pos += 4)
111                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
112
113         ufshcd_hex_dump(prefix, regs, len);
114         kfree(regs);
115
116         return 0;
117 }
118 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
119
120 enum {
121         UFSHCD_MAX_CHANNEL      = 0,
122         UFSHCD_MAX_ID           = 1,
123         UFSHCD_CMD_PER_LUN      = 32,
124         UFSHCD_CAN_QUEUE        = 32,
125 };
126
127 /* UFSHCD states */
128 enum {
129         UFSHCD_STATE_RESET,
130         UFSHCD_STATE_ERROR,
131         UFSHCD_STATE_OPERATIONAL,
132         UFSHCD_STATE_EH_SCHEDULED_FATAL,
133         UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
134 };
135
136 /* UFSHCD error handling flags */
137 enum {
138         UFSHCD_EH_IN_PROGRESS = (1 << 0),
139 };
140
141 /* UFSHCD UIC layer error flags */
142 enum {
143         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
144         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
145         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
146         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
147         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
148         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
149         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
150 };
151
152 #define ufshcd_set_eh_in_progress(h) \
153         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
154 #define ufshcd_eh_in_progress(h) \
155         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
156 #define ufshcd_clear_eh_in_progress(h) \
157         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
158
159 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
160         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
161         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
162         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
163         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
164         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
165         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
166         /*
167          * For DeepSleep, the link is first put in hibern8 and then off.
168          * Leaving the link in hibern8 is not supported.
169          */
170         {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
171 };
172
173 static inline enum ufs_dev_pwr_mode
174 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
175 {
176         return ufs_pm_lvl_states[lvl].dev_state;
177 }
178
179 static inline enum uic_link_state
180 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
181 {
182         return ufs_pm_lvl_states[lvl].link_state;
183 }
184
185 static inline enum ufs_pm_level
186 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
187                                         enum uic_link_state link_state)
188 {
189         enum ufs_pm_level lvl;
190
191         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
192                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
193                         (ufs_pm_lvl_states[lvl].link_state == link_state))
194                         return lvl;
195         }
196
197         /* if no match found, return the level 0 */
198         return UFS_PM_LVL_0;
199 }
200
201 static struct ufs_dev_fix ufs_fixups[] = {
202         /* UFS cards deviations table */
203         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
204                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
206                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
207                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
208                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
209         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
210                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
211         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
212                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
213         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
214                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
215         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
216                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
217         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
218                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
219         END_FIX
220 };
221
222 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
223 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
224 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
225 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
226 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
227 static void ufshcd_hba_exit(struct ufs_hba *hba);
228 static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
229 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
230 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
231 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
232 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
233 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
234 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
235 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
236 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
237 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
238 static irqreturn_t ufshcd_intr(int irq, void *__hba);
239 static int ufshcd_change_power_mode(struct ufs_hba *hba,
240                              struct ufs_pa_layer_attr *pwr_mode);
241 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
242 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
243 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
244 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
245                                          struct ufs_vreg *vreg);
246 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
247 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
248 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
249 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
250 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
251 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
252 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
253 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
254
255 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
256 {
257         return tag >= 0 && tag < hba->nutrs;
258 }
259
260 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
261 {
262         if (!hba->is_irq_enabled) {
263                 enable_irq(hba->irq);
264                 hba->is_irq_enabled = true;
265         }
266 }
267
268 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
269 {
270         if (hba->is_irq_enabled) {
271                 disable_irq(hba->irq);
272                 hba->is_irq_enabled = false;
273         }
274 }
275
276 static inline void ufshcd_wb_config(struct ufs_hba *hba)
277 {
278         int ret;
279
280         if (!ufshcd_is_wb_allowed(hba))
281                 return;
282
283         ret = ufshcd_wb_ctrl(hba, true);
284         if (ret)
285                 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
286         else
287                 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
288         ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
289         if (ret)
290                 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
291                         __func__, ret);
292         ufshcd_wb_toggle_flush(hba, true);
293 }
294
295 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
296 {
297         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
298                 scsi_unblock_requests(hba->host);
299 }
300
301 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
302 {
303         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
304                 scsi_block_requests(hba->host);
305 }
306
307 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
308                 const char *str)
309 {
310         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
311
312         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
313 }
314
315 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
316                 const char *str)
317 {
318         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
319
320         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
321 }
322
323 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
324                 const char *str)
325 {
326         int off = (int)tag - hba->nutrs;
327         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
328
329         trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
330                         &descp->input_param1);
331 }
332
333 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
334                                          struct uic_command *ucmd,
335                                          const char *str)
336 {
337         u32 cmd;
338
339         if (!trace_ufshcd_uic_command_enabled())
340                 return;
341
342         if (!strcmp(str, "send"))
343                 cmd = ucmd->command;
344         else
345                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
346
347         trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
348                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
349                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
350                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
351 }
352
353 static void ufshcd_add_command_trace(struct ufs_hba *hba,
354                 unsigned int tag, const char *str)
355 {
356         sector_t lba = -1;
357         u8 opcode = 0, group_id = 0;
358         u32 intr, doorbell;
359         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
360         struct scsi_cmnd *cmd = lrbp->cmd;
361         int transfer_len = -1;
362
363         if (!trace_ufshcd_command_enabled()) {
364                 /* trace UPIU W/O tracing command */
365                 if (cmd)
366                         ufshcd_add_cmd_upiu_trace(hba, tag, str);
367                 return;
368         }
369
370         if (cmd) { /* data phase exists */
371                 /* trace UPIU also */
372                 ufshcd_add_cmd_upiu_trace(hba, tag, str);
373                 opcode = cmd->cmnd[0];
374                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
375                         /*
376                          * Currently we only fully trace read(10) and write(10)
377                          * commands
378                          */
379                         if (cmd->request && cmd->request->bio)
380                                 lba = cmd->request->bio->bi_iter.bi_sector;
381                         transfer_len = be32_to_cpu(
382                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
383                         if (opcode == WRITE_10)
384                                 group_id = lrbp->cmd->cmnd[6];
385                 } else if (opcode == UNMAP) {
386                         if (cmd->request) {
387                                 lba = scsi_get_lba(cmd);
388                                 transfer_len = blk_rq_bytes(cmd->request);
389                         }
390                 }
391         }
392
393         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
394         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
395         trace_ufshcd_command(dev_name(hba->dev), str, tag,
396                         doorbell, transfer_len, intr, lba, opcode, group_id);
397 }
398
399 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
400 {
401         struct ufs_clk_info *clki;
402         struct list_head *head = &hba->clk_list_head;
403
404         if (list_empty(head))
405                 return;
406
407         list_for_each_entry(clki, head, list) {
408                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
409                                 clki->max_freq)
410                         dev_err(hba->dev, "clk: %s, rate: %u\n",
411                                         clki->name, clki->curr_freq);
412         }
413 }
414
415 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
416                              char *err_name)
417 {
418         int i;
419         bool found = false;
420         struct ufs_event_hist *e;
421
422         if (id >= UFS_EVT_CNT)
423                 return;
424
425         e = &hba->ufs_stats.event[id];
426
427         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
428                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
429
430                 if (e->tstamp[p] == 0)
431                         continue;
432                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
433                         e->val[p], ktime_to_us(e->tstamp[p]));
434                 found = true;
435         }
436
437         if (!found)
438                 dev_err(hba->dev, "No record of %s\n", err_name);
439 }
440
441 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
442 {
443         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
444
445         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
446         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
447         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
448         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
449         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
450         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
451                          "auto_hibern8_err");
452         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
453         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
454                          "link_startup_fail");
455         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
456         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
457                          "suspend_fail");
458         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
459         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
460         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
461
462         ufshcd_vops_dbg_register_dump(hba);
463 }
464
465 static
466 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
467 {
468         struct ufshcd_lrb *lrbp;
469         int prdt_length;
470         int tag;
471
472         for_each_set_bit(tag, &bitmap, hba->nutrs) {
473                 lrbp = &hba->lrb[tag];
474
475                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
476                                 tag, ktime_to_us(lrbp->issue_time_stamp));
477                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
478                                 tag, ktime_to_us(lrbp->compl_time_stamp));
479                 dev_err(hba->dev,
480                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
481                         tag, (u64)lrbp->utrd_dma_addr);
482
483                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
484                                 sizeof(struct utp_transfer_req_desc));
485                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
486                         (u64)lrbp->ucd_req_dma_addr);
487                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
488                                 sizeof(struct utp_upiu_req));
489                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
490                         (u64)lrbp->ucd_rsp_dma_addr);
491                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
492                                 sizeof(struct utp_upiu_rsp));
493
494                 prdt_length = le16_to_cpu(
495                         lrbp->utr_descriptor_ptr->prd_table_length);
496                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
497                         prdt_length /= sizeof(struct ufshcd_sg_entry);
498
499                 dev_err(hba->dev,
500                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
501                         tag, prdt_length,
502                         (u64)lrbp->ucd_prdt_dma_addr);
503
504                 if (pr_prdt)
505                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
506                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
507         }
508 }
509
510 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
511 {
512         int tag;
513
514         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
515                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
516
517                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
518                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
519         }
520 }
521
522 static void ufshcd_print_host_state(struct ufs_hba *hba)
523 {
524         struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
525
526         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
527         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
528                 hba->outstanding_reqs, hba->outstanding_tasks);
529         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
530                 hba->saved_err, hba->saved_uic_err);
531         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
532                 hba->curr_dev_pwr_mode, hba->uic_link_state);
533         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
534                 hba->pm_op_in_progress, hba->is_sys_suspended);
535         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
536                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
537         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
538         dev_err(hba->dev,
539                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
540                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
541                 hba->ufs_stats.hibern8_exit_cnt);
542         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
543                 ktime_to_us(hba->ufs_stats.last_intr_ts),
544                 hba->ufs_stats.last_intr_status);
545         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
546                 hba->eh_flags, hba->req_abort_count);
547         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
548                 hba->ufs_version, hba->capabilities, hba->caps);
549         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
550                 hba->dev_quirks);
551         if (sdev_ufs)
552                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
553                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
554
555         ufshcd_print_clk_freqs(hba);
556 }
557
558 /**
559  * ufshcd_print_pwr_info - print power params as saved in hba
560  * power info
561  * @hba: per-adapter instance
562  */
563 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
564 {
565         static const char * const names[] = {
566                 "INVALID MODE",
567                 "FAST MODE",
568                 "SLOW_MODE",
569                 "INVALID MODE",
570                 "FASTAUTO_MODE",
571                 "SLOWAUTO_MODE",
572                 "INVALID MODE",
573         };
574
575         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
576                  __func__,
577                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
578                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
579                  names[hba->pwr_info.pwr_rx],
580                  names[hba->pwr_info.pwr_tx],
581                  hba->pwr_info.hs_rate);
582 }
583
584 static void ufshcd_device_reset(struct ufs_hba *hba)
585 {
586         int err;
587
588         err = ufshcd_vops_device_reset(hba);
589
590         if (!err) {
591                 ufshcd_set_ufs_dev_active(hba);
592                 if (ufshcd_is_wb_allowed(hba)) {
593                         hba->wb_enabled = false;
594                         hba->wb_buf_flush_enabled = false;
595                 }
596         }
597         if (err != -EOPNOTSUPP)
598                 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
599 }
600
601 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
602 {
603         if (!us)
604                 return;
605
606         if (us < 10)
607                 udelay(us);
608         else
609                 usleep_range(us, us + tolerance);
610 }
611 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
612
613 /**
614  * ufshcd_wait_for_register - wait for register value to change
615  * @hba: per-adapter interface
616  * @reg: mmio register offset
617  * @mask: mask to apply to the read register value
618  * @val: value to wait for
619  * @interval_us: polling interval in microseconds
620  * @timeout_ms: timeout in milliseconds
621  *
622  * Return:
623  * -ETIMEDOUT on error, zero on success.
624  */
625 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
626                                 u32 val, unsigned long interval_us,
627                                 unsigned long timeout_ms)
628 {
629         int err = 0;
630         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
631
632         /* ignore bits that we don't intend to wait on */
633         val = val & mask;
634
635         while ((ufshcd_readl(hba, reg) & mask) != val) {
636                 usleep_range(interval_us, interval_us + 50);
637                 if (time_after(jiffies, timeout)) {
638                         if ((ufshcd_readl(hba, reg) & mask) != val)
639                                 err = -ETIMEDOUT;
640                         break;
641                 }
642         }
643
644         return err;
645 }
646
647 /**
648  * ufshcd_get_intr_mask - Get the interrupt bit mask
649  * @hba: Pointer to adapter instance
650  *
651  * Returns interrupt bit mask per version
652  */
653 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
654 {
655         u32 intr_mask = 0;
656
657         switch (hba->ufs_version) {
658         case UFSHCI_VERSION_10:
659                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
660                 break;
661         case UFSHCI_VERSION_11:
662         case UFSHCI_VERSION_20:
663                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
664                 break;
665         case UFSHCI_VERSION_21:
666         default:
667                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
668                 break;
669         }
670
671         return intr_mask;
672 }
673
674 /**
675  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
676  * @hba: Pointer to adapter instance
677  *
678  * Returns UFSHCI version supported by the controller
679  */
680 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
681 {
682         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
683                 return ufshcd_vops_get_ufs_hci_version(hba);
684
685         return ufshcd_readl(hba, REG_UFS_VERSION);
686 }
687
688 /**
689  * ufshcd_is_device_present - Check if any device connected to
690  *                            the host controller
691  * @hba: pointer to adapter instance
692  *
693  * Returns true if device present, false if no device detected
694  */
695 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
696 {
697         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
698                                                 DEVICE_PRESENT) ? true : false;
699 }
700
701 /**
702  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
703  * @lrbp: pointer to local command reference block
704  *
705  * This function is used to get the OCS field from UTRD
706  * Returns the OCS field in the UTRD
707  */
708 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
709 {
710         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
711 }
712
713 /**
714  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
715  * @hba: per adapter instance
716  * @pos: position of the bit to be cleared
717  */
718 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
719 {
720         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
721                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
722         else
723                 ufshcd_writel(hba, ~(1 << pos),
724                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
725 }
726
727 /**
728  * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
729  * @hba: per adapter instance
730  * @pos: position of the bit to be cleared
731  */
732 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
733 {
734         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
735                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
736         else
737                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
738 }
739
740 /**
741  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
742  * @hba: per adapter instance
743  * @tag: position of the bit to be cleared
744  */
745 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
746 {
747         __clear_bit(tag, &hba->outstanding_reqs);
748 }
749
750 /**
751  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
752  * @reg: Register value of host controller status
753  *
754  * Returns integer, 0 on Success and positive value if failed
755  */
756 static inline int ufshcd_get_lists_status(u32 reg)
757 {
758         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
759 }
760
761 /**
762  * ufshcd_get_uic_cmd_result - Get the UIC command result
763  * @hba: Pointer to adapter instance
764  *
765  * This function gets the result of UIC command completion
766  * Returns 0 on success, non zero value on error
767  */
768 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
769 {
770         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
771                MASK_UIC_COMMAND_RESULT;
772 }
773
774 /**
775  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
776  * @hba: Pointer to adapter instance
777  *
778  * This function gets UIC command argument3
779  * Returns 0 on success, non zero value on error
780  */
781 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
782 {
783         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
784 }
785
786 /**
787  * ufshcd_get_req_rsp - returns the TR response transaction type
788  * @ucd_rsp_ptr: pointer to response UPIU
789  */
790 static inline int
791 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
792 {
793         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
794 }
795
796 /**
797  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
798  * @ucd_rsp_ptr: pointer to response UPIU
799  *
800  * This function gets the response status and scsi_status from response UPIU
801  * Returns the response result code.
802  */
803 static inline int
804 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
805 {
806         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
807 }
808
809 /*
810  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
811  *                              from response UPIU
812  * @ucd_rsp_ptr: pointer to response UPIU
813  *
814  * Return the data segment length.
815  */
816 static inline unsigned int
817 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
818 {
819         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
820                 MASK_RSP_UPIU_DATA_SEG_LEN;
821 }
822
823 /**
824  * ufshcd_is_exception_event - Check if the device raised an exception event
825  * @ucd_rsp_ptr: pointer to response UPIU
826  *
827  * The function checks if the device raised an exception event indicated in
828  * the Device Information field of response UPIU.
829  *
830  * Returns true if exception is raised, false otherwise.
831  */
832 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
833 {
834         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
835                         MASK_RSP_EXCEPTION_EVENT ? true : false;
836 }
837
838 /**
839  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
840  * @hba: per adapter instance
841  */
842 static inline void
843 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
844 {
845         ufshcd_writel(hba, INT_AGGR_ENABLE |
846                       INT_AGGR_COUNTER_AND_TIMER_RESET,
847                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
848 }
849
850 /**
851  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
852  * @hba: per adapter instance
853  * @cnt: Interrupt aggregation counter threshold
854  * @tmout: Interrupt aggregation timeout value
855  */
856 static inline void
857 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
858 {
859         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
860                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
861                       INT_AGGR_TIMEOUT_VAL(tmout),
862                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
863 }
864
865 /**
866  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
867  * @hba: per adapter instance
868  */
869 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
870 {
871         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
872 }
873
874 /**
875  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
876  *                      When run-stop registers are set to 1, it indicates the
877  *                      host controller that it can process the requests
878  * @hba: per adapter instance
879  */
880 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
881 {
882         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
883                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
884         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
885                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
886 }
887
888 /**
889  * ufshcd_hba_start - Start controller initialization sequence
890  * @hba: per adapter instance
891  */
892 static inline void ufshcd_hba_start(struct ufs_hba *hba)
893 {
894         u32 val = CONTROLLER_ENABLE;
895
896         if (ufshcd_crypto_enable(hba))
897                 val |= CRYPTO_GENERAL_ENABLE;
898
899         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
900 }
901
902 /**
903  * ufshcd_is_hba_active - Get controller state
904  * @hba: per adapter instance
905  *
906  * Returns false if controller is active, true otherwise
907  */
908 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
909 {
910         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
911                 ? false : true;
912 }
913
914 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
915 {
916         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
917         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
918             (hba->ufs_version == UFSHCI_VERSION_11))
919                 return UFS_UNIPRO_VER_1_41;
920         else
921                 return UFS_UNIPRO_VER_1_6;
922 }
923 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
924
925 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
926 {
927         /*
928          * If both host and device support UniPro ver1.6 or later, PA layer
929          * parameters tuning happens during link startup itself.
930          *
931          * We can manually tune PA layer parameters if either host or device
932          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
933          * logic simple, we will only do manual tuning if local unipro version
934          * doesn't support ver1.6 or later.
935          */
936         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
937                 return true;
938         else
939                 return false;
940 }
941
942 /**
943  * ufshcd_set_clk_freq - set UFS controller clock frequencies
944  * @hba: per adapter instance
945  * @scale_up: If True, set max possible frequency othewise set low frequency
946  *
947  * Returns 0 if successful
948  * Returns < 0 for any other errors
949  */
950 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
951 {
952         int ret = 0;
953         struct ufs_clk_info *clki;
954         struct list_head *head = &hba->clk_list_head;
955
956         if (list_empty(head))
957                 goto out;
958
959         list_for_each_entry(clki, head, list) {
960                 if (!IS_ERR_OR_NULL(clki->clk)) {
961                         if (scale_up && clki->max_freq) {
962                                 if (clki->curr_freq == clki->max_freq)
963                                         continue;
964
965                                 ret = clk_set_rate(clki->clk, clki->max_freq);
966                                 if (ret) {
967                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
968                                                 __func__, clki->name,
969                                                 clki->max_freq, ret);
970                                         break;
971                                 }
972                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
973                                                 "scaled up", clki->name,
974                                                 clki->curr_freq,
975                                                 clki->max_freq);
976
977                                 clki->curr_freq = clki->max_freq;
978
979                         } else if (!scale_up && clki->min_freq) {
980                                 if (clki->curr_freq == clki->min_freq)
981                                         continue;
982
983                                 ret = clk_set_rate(clki->clk, clki->min_freq);
984                                 if (ret) {
985                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
986                                                 __func__, clki->name,
987                                                 clki->min_freq, ret);
988                                         break;
989                                 }
990                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
991                                                 "scaled down", clki->name,
992                                                 clki->curr_freq,
993                                                 clki->min_freq);
994                                 clki->curr_freq = clki->min_freq;
995                         }
996                 }
997                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
998                                 clki->name, clk_get_rate(clki->clk));
999         }
1000
1001 out:
1002         return ret;
1003 }
1004
1005 /**
1006  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1007  * @hba: per adapter instance
1008  * @scale_up: True if scaling up and false if scaling down
1009  *
1010  * Returns 0 if successful
1011  * Returns < 0 for any other errors
1012  */
1013 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1014 {
1015         int ret = 0;
1016         ktime_t start = ktime_get();
1017
1018         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1019         if (ret)
1020                 goto out;
1021
1022         ret = ufshcd_set_clk_freq(hba, scale_up);
1023         if (ret)
1024                 goto out;
1025
1026         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1027         if (ret)
1028                 ufshcd_set_clk_freq(hba, !scale_up);
1029
1030 out:
1031         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1032                         (scale_up ? "up" : "down"),
1033                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1034         return ret;
1035 }
1036
1037 /**
1038  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1039  * @hba: per adapter instance
1040  * @scale_up: True if scaling up and false if scaling down
1041  *
1042  * Returns true if scaling is required, false otherwise.
1043  */
1044 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1045                                                bool scale_up)
1046 {
1047         struct ufs_clk_info *clki;
1048         struct list_head *head = &hba->clk_list_head;
1049
1050         if (list_empty(head))
1051                 return false;
1052
1053         list_for_each_entry(clki, head, list) {
1054                 if (!IS_ERR_OR_NULL(clki->clk)) {
1055                         if (scale_up && clki->max_freq) {
1056                                 if (clki->curr_freq == clki->max_freq)
1057                                         continue;
1058                                 return true;
1059                         } else if (!scale_up && clki->min_freq) {
1060                                 if (clki->curr_freq == clki->min_freq)
1061                                         continue;
1062                                 return true;
1063                         }
1064                 }
1065         }
1066
1067         return false;
1068 }
1069
1070 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1071                                         u64 wait_timeout_us)
1072 {
1073         unsigned long flags;
1074         int ret = 0;
1075         u32 tm_doorbell;
1076         u32 tr_doorbell;
1077         bool timeout = false, do_last_check = false;
1078         ktime_t start;
1079
1080         ufshcd_hold(hba, false);
1081         spin_lock_irqsave(hba->host->host_lock, flags);
1082         /*
1083          * Wait for all the outstanding tasks/transfer requests.
1084          * Verify by checking the doorbell registers are clear.
1085          */
1086         start = ktime_get();
1087         do {
1088                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1089                         ret = -EBUSY;
1090                         goto out;
1091                 }
1092
1093                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1094                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1095                 if (!tm_doorbell && !tr_doorbell) {
1096                         timeout = false;
1097                         break;
1098                 } else if (do_last_check) {
1099                         break;
1100                 }
1101
1102                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1103                 schedule();
1104                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1105                     wait_timeout_us) {
1106                         timeout = true;
1107                         /*
1108                          * We might have scheduled out for long time so make
1109                          * sure to check if doorbells are cleared by this time
1110                          * or not.
1111                          */
1112                         do_last_check = true;
1113                 }
1114                 spin_lock_irqsave(hba->host->host_lock, flags);
1115         } while (tm_doorbell || tr_doorbell);
1116
1117         if (timeout) {
1118                 dev_err(hba->dev,
1119                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1120                         __func__, tm_doorbell, tr_doorbell);
1121                 ret = -EBUSY;
1122         }
1123 out:
1124         spin_unlock_irqrestore(hba->host->host_lock, flags);
1125         ufshcd_release(hba);
1126         return ret;
1127 }
1128
1129 /**
1130  * ufshcd_scale_gear - scale up/down UFS gear
1131  * @hba: per adapter instance
1132  * @scale_up: True for scaling up gear and false for scaling down
1133  *
1134  * Returns 0 for success,
1135  * Returns -EBUSY if scaling can't happen at this time
1136  * Returns non-zero for any other errors
1137  */
1138 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1139 {
1140         int ret = 0;
1141         struct ufs_pa_layer_attr new_pwr_info;
1142
1143         if (scale_up) {
1144                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1145                        sizeof(struct ufs_pa_layer_attr));
1146         } else {
1147                 memcpy(&new_pwr_info, &hba->pwr_info,
1148                        sizeof(struct ufs_pa_layer_attr));
1149
1150                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1151                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1152                         /* save the current power mode */
1153                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1154                                 &hba->pwr_info,
1155                                 sizeof(struct ufs_pa_layer_attr));
1156
1157                         /* scale down gear */
1158                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1159                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1160                 }
1161         }
1162
1163         /* check if the power mode needs to be changed or not? */
1164         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1165         if (ret)
1166                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1167                         __func__, ret,
1168                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1169                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1170
1171         return ret;
1172 }
1173
1174 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1175 {
1176         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1177         int ret = 0;
1178         /*
1179          * make sure that there are no outstanding requests when
1180          * clock scaling is in progress
1181          */
1182         ufshcd_scsi_block_requests(hba);
1183         down_write(&hba->clk_scaling_lock);
1184         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1185                 ret = -EBUSY;
1186                 up_write(&hba->clk_scaling_lock);
1187                 ufshcd_scsi_unblock_requests(hba);
1188         }
1189
1190         return ret;
1191 }
1192
1193 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1194 {
1195         up_write(&hba->clk_scaling_lock);
1196         ufshcd_scsi_unblock_requests(hba);
1197 }
1198
1199 /**
1200  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1201  * @hba: per adapter instance
1202  * @scale_up: True for scaling up and false for scalin down
1203  *
1204  * Returns 0 for success,
1205  * Returns -EBUSY if scaling can't happen at this time
1206  * Returns non-zero for any other errors
1207  */
1208 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1209 {
1210         int ret = 0;
1211
1212         /* let's not get into low power until clock scaling is completed */
1213         ufshcd_hold(hba, false);
1214
1215         ret = ufshcd_clock_scaling_prepare(hba);
1216         if (ret)
1217                 goto out;
1218
1219         /* scale down the gear before scaling down clocks */
1220         if (!scale_up) {
1221                 ret = ufshcd_scale_gear(hba, false);
1222                 if (ret)
1223                         goto out_unprepare;
1224         }
1225
1226         ret = ufshcd_scale_clks(hba, scale_up);
1227         if (ret) {
1228                 if (!scale_up)
1229                         ufshcd_scale_gear(hba, true);
1230                 goto out_unprepare;
1231         }
1232
1233         /* scale up the gear after scaling up clocks */
1234         if (scale_up) {
1235                 ret = ufshcd_scale_gear(hba, true);
1236                 if (ret) {
1237                         ufshcd_scale_clks(hba, false);
1238                         goto out_unprepare;
1239                 }
1240         }
1241
1242         /* Enable Write Booster if we have scaled up else disable it */
1243         up_write(&hba->clk_scaling_lock);
1244         ufshcd_wb_ctrl(hba, scale_up);
1245         down_write(&hba->clk_scaling_lock);
1246
1247 out_unprepare:
1248         ufshcd_clock_scaling_unprepare(hba);
1249 out:
1250         ufshcd_release(hba);
1251         return ret;
1252 }
1253
1254 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1255 {
1256         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1257                                            clk_scaling.suspend_work);
1258         unsigned long irq_flags;
1259
1260         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1261         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1262                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1263                 return;
1264         }
1265         hba->clk_scaling.is_suspended = true;
1266         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1267
1268         __ufshcd_suspend_clkscaling(hba);
1269 }
1270
1271 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1272 {
1273         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1274                                            clk_scaling.resume_work);
1275         unsigned long irq_flags;
1276
1277         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1278         if (!hba->clk_scaling.is_suspended) {
1279                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1280                 return;
1281         }
1282         hba->clk_scaling.is_suspended = false;
1283         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1284
1285         devfreq_resume_device(hba->devfreq);
1286 }
1287
1288 static int ufshcd_devfreq_target(struct device *dev,
1289                                 unsigned long *freq, u32 flags)
1290 {
1291         int ret = 0;
1292         struct ufs_hba *hba = dev_get_drvdata(dev);
1293         ktime_t start;
1294         bool scale_up, sched_clk_scaling_suspend_work = false;
1295         struct list_head *clk_list = &hba->clk_list_head;
1296         struct ufs_clk_info *clki;
1297         unsigned long irq_flags;
1298
1299         if (!ufshcd_is_clkscaling_supported(hba))
1300                 return -EINVAL;
1301
1302         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1303         /* Override with the closest supported frequency */
1304         *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1305         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1306         if (ufshcd_eh_in_progress(hba)) {
1307                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1308                 return 0;
1309         }
1310
1311         if (!hba->clk_scaling.active_reqs)
1312                 sched_clk_scaling_suspend_work = true;
1313
1314         if (list_empty(clk_list)) {
1315                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1316                 goto out;
1317         }
1318
1319         /* Decide based on the rounded-off frequency and update */
1320         scale_up = (*freq == clki->max_freq) ? true : false;
1321         if (!scale_up)
1322                 *freq = clki->min_freq;
1323         /* Update the frequency */
1324         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1325                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1326                 ret = 0;
1327                 goto out; /* no state change required */
1328         }
1329         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1330
1331         pm_runtime_get_noresume(hba->dev);
1332         if (!pm_runtime_active(hba->dev)) {
1333                 pm_runtime_put_noidle(hba->dev);
1334                 ret = -EAGAIN;
1335                 goto out;
1336         }
1337         start = ktime_get();
1338         ret = ufshcd_devfreq_scale(hba, scale_up);
1339         pm_runtime_put(hba->dev);
1340
1341         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1342                 (scale_up ? "up" : "down"),
1343                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1344
1345 out:
1346         if (sched_clk_scaling_suspend_work)
1347                 queue_work(hba->clk_scaling.workq,
1348                            &hba->clk_scaling.suspend_work);
1349
1350         return ret;
1351 }
1352
1353 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1354 {
1355         int *busy = priv;
1356
1357         WARN_ON_ONCE(reserved);
1358         (*busy)++;
1359         return false;
1360 }
1361
1362 /* Whether or not any tag is in use by a request that is in progress. */
1363 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1364 {
1365         struct request_queue *q = hba->cmd_queue;
1366         int busy = 0;
1367
1368         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1369         return busy;
1370 }
1371
1372 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1373                 struct devfreq_dev_status *stat)
1374 {
1375         struct ufs_hba *hba = dev_get_drvdata(dev);
1376         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1377         unsigned long flags;
1378         struct list_head *clk_list = &hba->clk_list_head;
1379         struct ufs_clk_info *clki;
1380         ktime_t curr_t;
1381
1382         if (!ufshcd_is_clkscaling_supported(hba))
1383                 return -EINVAL;
1384
1385         memset(stat, 0, sizeof(*stat));
1386
1387         spin_lock_irqsave(hba->host->host_lock, flags);
1388         curr_t = ktime_get();
1389         if (!scaling->window_start_t)
1390                 goto start_window;
1391
1392         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1393         /*
1394          * If current frequency is 0, then the ondemand governor considers
1395          * there's no initial frequency set. And it always requests to set
1396          * to max. frequency.
1397          */
1398         stat->current_frequency = clki->curr_freq;
1399         if (scaling->is_busy_started)
1400                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1401                                 scaling->busy_start_t);
1402
1403         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1404         stat->busy_time = scaling->tot_busy_t;
1405 start_window:
1406         scaling->window_start_t = curr_t;
1407         scaling->tot_busy_t = 0;
1408
1409         if (hba->outstanding_reqs) {
1410                 scaling->busy_start_t = curr_t;
1411                 scaling->is_busy_started = true;
1412         } else {
1413                 scaling->busy_start_t = 0;
1414                 scaling->is_busy_started = false;
1415         }
1416         spin_unlock_irqrestore(hba->host->host_lock, flags);
1417         return 0;
1418 }
1419
1420 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1421 {
1422         struct list_head *clk_list = &hba->clk_list_head;
1423         struct ufs_clk_info *clki;
1424         struct devfreq *devfreq;
1425         int ret;
1426
1427         /* Skip devfreq if we don't have any clocks in the list */
1428         if (list_empty(clk_list))
1429                 return 0;
1430
1431         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1432         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1433         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1434
1435         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1436                                          &hba->vps->ondemand_data);
1437         devfreq = devfreq_add_device(hba->dev,
1438                         &hba->vps->devfreq_profile,
1439                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1440                         &hba->vps->ondemand_data);
1441         if (IS_ERR(devfreq)) {
1442                 ret = PTR_ERR(devfreq);
1443                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1444
1445                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1446                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1447                 return ret;
1448         }
1449
1450         hba->devfreq = devfreq;
1451
1452         return 0;
1453 }
1454
1455 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1456 {
1457         struct list_head *clk_list = &hba->clk_list_head;
1458         struct ufs_clk_info *clki;
1459
1460         if (!hba->devfreq)
1461                 return;
1462
1463         devfreq_remove_device(hba->devfreq);
1464         hba->devfreq = NULL;
1465
1466         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1467         dev_pm_opp_remove(hba->dev, clki->min_freq);
1468         dev_pm_opp_remove(hba->dev, clki->max_freq);
1469 }
1470
1471 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1472 {
1473         unsigned long flags;
1474
1475         devfreq_suspend_device(hba->devfreq);
1476         spin_lock_irqsave(hba->host->host_lock, flags);
1477         hba->clk_scaling.window_start_t = 0;
1478         spin_unlock_irqrestore(hba->host->host_lock, flags);
1479 }
1480
1481 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1482 {
1483         unsigned long flags;
1484         bool suspend = false;
1485
1486         if (!ufshcd_is_clkscaling_supported(hba))
1487                 return;
1488
1489         spin_lock_irqsave(hba->host->host_lock, flags);
1490         if (!hba->clk_scaling.is_suspended) {
1491                 suspend = true;
1492                 hba->clk_scaling.is_suspended = true;
1493         }
1494         spin_unlock_irqrestore(hba->host->host_lock, flags);
1495
1496         if (suspend)
1497                 __ufshcd_suspend_clkscaling(hba);
1498 }
1499
1500 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1501 {
1502         unsigned long flags;
1503         bool resume = false;
1504
1505         if (!ufshcd_is_clkscaling_supported(hba))
1506                 return;
1507
1508         spin_lock_irqsave(hba->host->host_lock, flags);
1509         if (hba->clk_scaling.is_suspended) {
1510                 resume = true;
1511                 hba->clk_scaling.is_suspended = false;
1512         }
1513         spin_unlock_irqrestore(hba->host->host_lock, flags);
1514
1515         if (resume)
1516                 devfreq_resume_device(hba->devfreq);
1517 }
1518
1519 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1520                 struct device_attribute *attr, char *buf)
1521 {
1522         struct ufs_hba *hba = dev_get_drvdata(dev);
1523
1524         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1525 }
1526
1527 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1528                 struct device_attribute *attr, const char *buf, size_t count)
1529 {
1530         struct ufs_hba *hba = dev_get_drvdata(dev);
1531         u32 value;
1532         int err;
1533
1534         if (kstrtou32(buf, 0, &value))
1535                 return -EINVAL;
1536
1537         value = !!value;
1538         if (value == hba->clk_scaling.is_allowed)
1539                 goto out;
1540
1541         pm_runtime_get_sync(hba->dev);
1542         ufshcd_hold(hba, false);
1543
1544         cancel_work_sync(&hba->clk_scaling.suspend_work);
1545         cancel_work_sync(&hba->clk_scaling.resume_work);
1546
1547         hba->clk_scaling.is_allowed = value;
1548
1549         if (value) {
1550                 ufshcd_resume_clkscaling(hba);
1551         } else {
1552                 ufshcd_suspend_clkscaling(hba);
1553                 err = ufshcd_devfreq_scale(hba, true);
1554                 if (err)
1555                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1556                                         __func__, err);
1557         }
1558
1559         ufshcd_release(hba);
1560         pm_runtime_put_sync(hba->dev);
1561 out:
1562         return count;
1563 }
1564
1565 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1566 {
1567         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1568         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1569         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1570         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1571         hba->clk_scaling.enable_attr.attr.mode = 0644;
1572         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1573                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1574 }
1575
1576 static void ufshcd_ungate_work(struct work_struct *work)
1577 {
1578         int ret;
1579         unsigned long flags;
1580         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1581                         clk_gating.ungate_work);
1582
1583         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1584
1585         spin_lock_irqsave(hba->host->host_lock, flags);
1586         if (hba->clk_gating.state == CLKS_ON) {
1587                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1588                 goto unblock_reqs;
1589         }
1590
1591         spin_unlock_irqrestore(hba->host->host_lock, flags);
1592         ufshcd_hba_vreg_set_hpm(hba);
1593         ufshcd_setup_clocks(hba, true);
1594
1595         ufshcd_enable_irq(hba);
1596
1597         /* Exit from hibern8 */
1598         if (ufshcd_can_hibern8_during_gating(hba)) {
1599                 /* Prevent gating in this path */
1600                 hba->clk_gating.is_suspended = true;
1601                 if (ufshcd_is_link_hibern8(hba)) {
1602                         ret = ufshcd_uic_hibern8_exit(hba);
1603                         if (ret)
1604                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1605                                         __func__, ret);
1606                         else
1607                                 ufshcd_set_link_active(hba);
1608                 }
1609                 hba->clk_gating.is_suspended = false;
1610         }
1611 unblock_reqs:
1612         ufshcd_scsi_unblock_requests(hba);
1613 }
1614
1615 /**
1616  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1617  * Also, exit from hibern8 mode and set the link as active.
1618  * @hba: per adapter instance
1619  * @async: This indicates whether caller should ungate clocks asynchronously.
1620  */
1621 int ufshcd_hold(struct ufs_hba *hba, bool async)
1622 {
1623         int rc = 0;
1624         bool flush_result;
1625         unsigned long flags;
1626
1627         if (!ufshcd_is_clkgating_allowed(hba))
1628                 goto out;
1629         spin_lock_irqsave(hba->host->host_lock, flags);
1630         hba->clk_gating.active_reqs++;
1631
1632 start:
1633         switch (hba->clk_gating.state) {
1634         case CLKS_ON:
1635                 /*
1636                  * Wait for the ungate work to complete if in progress.
1637                  * Though the clocks may be in ON state, the link could
1638                  * still be in hibner8 state if hibern8 is allowed
1639                  * during clock gating.
1640                  * Make sure we exit hibern8 state also in addition to
1641                  * clocks being ON.
1642                  */
1643                 if (ufshcd_can_hibern8_during_gating(hba) &&
1644                     ufshcd_is_link_hibern8(hba)) {
1645                         if (async) {
1646                                 rc = -EAGAIN;
1647                                 hba->clk_gating.active_reqs--;
1648                                 break;
1649                         }
1650                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1651                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1652                         if (hba->clk_gating.is_suspended && !flush_result)
1653                                 goto out;
1654                         spin_lock_irqsave(hba->host->host_lock, flags);
1655                         goto start;
1656                 }
1657                 break;
1658         case REQ_CLKS_OFF:
1659                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1660                         hba->clk_gating.state = CLKS_ON;
1661                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1662                                                 hba->clk_gating.state);
1663                         break;
1664                 }
1665                 /*
1666                  * If we are here, it means gating work is either done or
1667                  * currently running. Hence, fall through to cancel gating
1668                  * work and to enable clocks.
1669                  */
1670                 fallthrough;
1671         case CLKS_OFF:
1672                 hba->clk_gating.state = REQ_CLKS_ON;
1673                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1674                                         hba->clk_gating.state);
1675                 if (queue_work(hba->clk_gating.clk_gating_workq,
1676                                &hba->clk_gating.ungate_work))
1677                         ufshcd_scsi_block_requests(hba);
1678                 /*
1679                  * fall through to check if we should wait for this
1680                  * work to be done or not.
1681                  */
1682                 fallthrough;
1683         case REQ_CLKS_ON:
1684                 if (async) {
1685                         rc = -EAGAIN;
1686                         hba->clk_gating.active_reqs--;
1687                         break;
1688                 }
1689
1690                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1691                 flush_work(&hba->clk_gating.ungate_work);
1692                 /* Make sure state is CLKS_ON before returning */
1693                 spin_lock_irqsave(hba->host->host_lock, flags);
1694                 goto start;
1695         default:
1696                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1697                                 __func__, hba->clk_gating.state);
1698                 break;
1699         }
1700         spin_unlock_irqrestore(hba->host->host_lock, flags);
1701 out:
1702         return rc;
1703 }
1704 EXPORT_SYMBOL_GPL(ufshcd_hold);
1705
1706 static void ufshcd_gate_work(struct work_struct *work)
1707 {
1708         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1709                         clk_gating.gate_work.work);
1710         unsigned long flags;
1711         int ret;
1712
1713         spin_lock_irqsave(hba->host->host_lock, flags);
1714         /*
1715          * In case you are here to cancel this work the gating state
1716          * would be marked as REQ_CLKS_ON. In this case save time by
1717          * skipping the gating work and exit after changing the clock
1718          * state to CLKS_ON.
1719          */
1720         if (hba->clk_gating.is_suspended ||
1721                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1722                 hba->clk_gating.state = CLKS_ON;
1723                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1724                                         hba->clk_gating.state);
1725                 goto rel_lock;
1726         }
1727
1728         if (hba->clk_gating.active_reqs
1729                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1730                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1731                 || hba->active_uic_cmd || hba->uic_async_done)
1732                 goto rel_lock;
1733
1734         spin_unlock_irqrestore(hba->host->host_lock, flags);
1735
1736         /* put the link into hibern8 mode before turning off clocks */
1737         if (ufshcd_can_hibern8_during_gating(hba)) {
1738                 ret = ufshcd_uic_hibern8_enter(hba);
1739                 if (ret) {
1740                         hba->clk_gating.state = CLKS_ON;
1741                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1742                                         __func__, ret);
1743                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1744                                                 hba->clk_gating.state);
1745                         goto out;
1746                 }
1747                 ufshcd_set_link_hibern8(hba);
1748         }
1749
1750         ufshcd_disable_irq(hba);
1751
1752         ufshcd_setup_clocks(hba, false);
1753
1754         /* Put the host controller in low power mode if possible */
1755         ufshcd_hba_vreg_set_lpm(hba);
1756         /*
1757          * In case you are here to cancel this work the gating state
1758          * would be marked as REQ_CLKS_ON. In this case keep the state
1759          * as REQ_CLKS_ON which would anyway imply that clocks are off
1760          * and a request to turn them on is pending. By doing this way,
1761          * we keep the state machine in tact and this would ultimately
1762          * prevent from doing cancel work multiple times when there are
1763          * new requests arriving before the current cancel work is done.
1764          */
1765         spin_lock_irqsave(hba->host->host_lock, flags);
1766         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1767                 hba->clk_gating.state = CLKS_OFF;
1768                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1769                                         hba->clk_gating.state);
1770         }
1771 rel_lock:
1772         spin_unlock_irqrestore(hba->host->host_lock, flags);
1773 out:
1774         return;
1775 }
1776
1777 /* host lock must be held before calling this variant */
1778 static void __ufshcd_release(struct ufs_hba *hba)
1779 {
1780         if (!ufshcd_is_clkgating_allowed(hba))
1781                 return;
1782
1783         hba->clk_gating.active_reqs--;
1784
1785         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1786             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1787             hba->outstanding_tasks ||
1788             hba->active_uic_cmd || hba->uic_async_done ||
1789             hba->clk_gating.state == CLKS_OFF)
1790                 return;
1791
1792         hba->clk_gating.state = REQ_CLKS_OFF;
1793         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1794         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1795                            &hba->clk_gating.gate_work,
1796                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1797 }
1798
1799 void ufshcd_release(struct ufs_hba *hba)
1800 {
1801         unsigned long flags;
1802
1803         spin_lock_irqsave(hba->host->host_lock, flags);
1804         __ufshcd_release(hba);
1805         spin_unlock_irqrestore(hba->host->host_lock, flags);
1806 }
1807 EXPORT_SYMBOL_GPL(ufshcd_release);
1808
1809 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1810                 struct device_attribute *attr, char *buf)
1811 {
1812         struct ufs_hba *hba = dev_get_drvdata(dev);
1813
1814         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1815 }
1816
1817 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1818                 struct device_attribute *attr, const char *buf, size_t count)
1819 {
1820         struct ufs_hba *hba = dev_get_drvdata(dev);
1821         unsigned long flags, value;
1822
1823         if (kstrtoul(buf, 0, &value))
1824                 return -EINVAL;
1825
1826         spin_lock_irqsave(hba->host->host_lock, flags);
1827         hba->clk_gating.delay_ms = value;
1828         spin_unlock_irqrestore(hba->host->host_lock, flags);
1829         return count;
1830 }
1831
1832 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1833                 struct device_attribute *attr, char *buf)
1834 {
1835         struct ufs_hba *hba = dev_get_drvdata(dev);
1836
1837         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1838 }
1839
1840 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1841                 struct device_attribute *attr, const char *buf, size_t count)
1842 {
1843         struct ufs_hba *hba = dev_get_drvdata(dev);
1844         unsigned long flags;
1845         u32 value;
1846
1847         if (kstrtou32(buf, 0, &value))
1848                 return -EINVAL;
1849
1850         value = !!value;
1851
1852         spin_lock_irqsave(hba->host->host_lock, flags);
1853         if (value == hba->clk_gating.is_enabled)
1854                 goto out;
1855
1856         if (value)
1857                 __ufshcd_release(hba);
1858         else
1859                 hba->clk_gating.active_reqs++;
1860
1861         hba->clk_gating.is_enabled = value;
1862 out:
1863         spin_unlock_irqrestore(hba->host->host_lock, flags);
1864         return count;
1865 }
1866
1867 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1868 {
1869         char wq_name[sizeof("ufs_clkscaling_00")];
1870
1871         if (!ufshcd_is_clkscaling_supported(hba))
1872                 return;
1873
1874         if (!hba->clk_scaling.min_gear)
1875                 hba->clk_scaling.min_gear = UFS_HS_G1;
1876
1877         INIT_WORK(&hba->clk_scaling.suspend_work,
1878                   ufshcd_clk_scaling_suspend_work);
1879         INIT_WORK(&hba->clk_scaling.resume_work,
1880                   ufshcd_clk_scaling_resume_work);
1881
1882         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1883                  hba->host->host_no);
1884         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1885
1886         ufshcd_clkscaling_init_sysfs(hba);
1887 }
1888
1889 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1890 {
1891         if (!ufshcd_is_clkscaling_supported(hba))
1892                 return;
1893
1894         destroy_workqueue(hba->clk_scaling.workq);
1895         ufshcd_devfreq_remove(hba);
1896 }
1897
1898 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1899 {
1900         char wq_name[sizeof("ufs_clk_gating_00")];
1901
1902         if (!ufshcd_is_clkgating_allowed(hba))
1903                 return;
1904
1905         hba->clk_gating.state = CLKS_ON;
1906
1907         hba->clk_gating.delay_ms = 150;
1908         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1909         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1910
1911         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1912                  hba->host->host_no);
1913         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1914                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
1915
1916         hba->clk_gating.is_enabled = true;
1917
1918         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1919         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1920         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1921         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1922         hba->clk_gating.delay_attr.attr.mode = 0644;
1923         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1924                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1925
1926         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1927         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1928         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1929         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1930         hba->clk_gating.enable_attr.attr.mode = 0644;
1931         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1932                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1933 }
1934
1935 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1936 {
1937         if (!ufshcd_is_clkgating_allowed(hba))
1938                 return;
1939         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1940         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1941         cancel_work_sync(&hba->clk_gating.ungate_work);
1942         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1943         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1944 }
1945
1946 /* Must be called with host lock acquired */
1947 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1948 {
1949         bool queue_resume_work = false;
1950         ktime_t curr_t = ktime_get();
1951
1952         if (!ufshcd_is_clkscaling_supported(hba))
1953                 return;
1954
1955         if (!hba->clk_scaling.active_reqs++)
1956                 queue_resume_work = true;
1957
1958         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1959                 return;
1960
1961         if (queue_resume_work)
1962                 queue_work(hba->clk_scaling.workq,
1963                            &hba->clk_scaling.resume_work);
1964
1965         if (!hba->clk_scaling.window_start_t) {
1966                 hba->clk_scaling.window_start_t = curr_t;
1967                 hba->clk_scaling.tot_busy_t = 0;
1968                 hba->clk_scaling.is_busy_started = false;
1969         }
1970
1971         if (!hba->clk_scaling.is_busy_started) {
1972                 hba->clk_scaling.busy_start_t = curr_t;
1973                 hba->clk_scaling.is_busy_started = true;
1974         }
1975 }
1976
1977 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1978 {
1979         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1980
1981         if (!ufshcd_is_clkscaling_supported(hba))
1982                 return;
1983
1984         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1985                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1986                                         scaling->busy_start_t));
1987                 scaling->busy_start_t = 0;
1988                 scaling->is_busy_started = false;
1989         }
1990 }
1991 /**
1992  * ufshcd_send_command - Send SCSI or device management commands
1993  * @hba: per adapter instance
1994  * @task_tag: Task tag of the command
1995  */
1996 static inline
1997 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1998 {
1999         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2000
2001         lrbp->issue_time_stamp = ktime_get();
2002         lrbp->compl_time_stamp = ktime_set(0, 0);
2003         ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
2004         ufshcd_add_command_trace(hba, task_tag, "send");
2005         ufshcd_clk_scaling_start_busy(hba);
2006         __set_bit(task_tag, &hba->outstanding_reqs);
2007         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2008         /* Make sure that doorbell is committed immediately */
2009         wmb();
2010 }
2011
2012 /**
2013  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2014  * @lrbp: pointer to local reference block
2015  */
2016 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2017 {
2018         int len;
2019         if (lrbp->sense_buffer &&
2020             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2021                 int len_to_copy;
2022
2023                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2024                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2025
2026                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2027                        len_to_copy);
2028         }
2029 }
2030
2031 /**
2032  * ufshcd_copy_query_response() - Copy the Query Response and the data
2033  * descriptor
2034  * @hba: per adapter instance
2035  * @lrbp: pointer to local reference block
2036  */
2037 static
2038 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2039 {
2040         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2041
2042         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2043
2044         /* Get the descriptor */
2045         if (hba->dev_cmd.query.descriptor &&
2046             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2047                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2048                                 GENERAL_UPIU_REQUEST_SIZE;
2049                 u16 resp_len;
2050                 u16 buf_len;
2051
2052                 /* data segment length */
2053                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2054                                                 MASK_QUERY_DATA_SEG_LEN;
2055                 buf_len = be16_to_cpu(
2056                                 hba->dev_cmd.query.request.upiu_req.length);
2057                 if (likely(buf_len >= resp_len)) {
2058                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2059                 } else {
2060                         dev_warn(hba->dev,
2061                                  "%s: rsp size %d is bigger than buffer size %d",
2062                                  __func__, resp_len, buf_len);
2063                         return -EINVAL;
2064                 }
2065         }
2066
2067         return 0;
2068 }
2069
2070 /**
2071  * ufshcd_hba_capabilities - Read controller capabilities
2072  * @hba: per adapter instance
2073  *
2074  * Return: 0 on success, negative on error.
2075  */
2076 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2077 {
2078         int err;
2079
2080         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2081
2082         /* nutrs and nutmrs are 0 based values */
2083         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2084         hba->nutmrs =
2085         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2086
2087         /* Read crypto capabilities */
2088         err = ufshcd_hba_init_crypto_capabilities(hba);
2089         if (err)
2090                 dev_err(hba->dev, "crypto setup failed\n");
2091
2092         return err;
2093 }
2094
2095 /**
2096  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2097  *                            to accept UIC commands
2098  * @hba: per adapter instance
2099  * Return true on success, else false
2100  */
2101 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2102 {
2103         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2104                 return true;
2105         else
2106                 return false;
2107 }
2108
2109 /**
2110  * ufshcd_get_upmcrs - Get the power mode change request status
2111  * @hba: Pointer to adapter instance
2112  *
2113  * This function gets the UPMCRS field of HCS register
2114  * Returns value of UPMCRS field
2115  */
2116 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2117 {
2118         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2119 }
2120
2121 /**
2122  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2123  * @hba: per adapter instance
2124  * @uic_cmd: UIC command
2125  *
2126  * Mutex must be held.
2127  */
2128 static inline void
2129 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2130 {
2131         WARN_ON(hba->active_uic_cmd);
2132
2133         hba->active_uic_cmd = uic_cmd;
2134
2135         /* Write Args */
2136         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2137         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2138         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2139
2140         ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2141
2142         /* Write UIC Cmd */
2143         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2144                       REG_UIC_COMMAND);
2145 }
2146
2147 /**
2148  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2149  * @hba: per adapter instance
2150  * @uic_cmd: UIC command
2151  *
2152  * Must be called with mutex held.
2153  * Returns 0 only if success.
2154  */
2155 static int
2156 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2157 {
2158         int ret;
2159         unsigned long flags;
2160
2161         if (wait_for_completion_timeout(&uic_cmd->done,
2162                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2163                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2164         } else {
2165                 ret = -ETIMEDOUT;
2166                 dev_err(hba->dev,
2167                         "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2168                         uic_cmd->command, uic_cmd->argument3);
2169
2170                 if (!uic_cmd->cmd_active) {
2171                         dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2172                                 __func__);
2173                         ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2174                 }
2175         }
2176
2177         spin_lock_irqsave(hba->host->host_lock, flags);
2178         hba->active_uic_cmd = NULL;
2179         spin_unlock_irqrestore(hba->host->host_lock, flags);
2180
2181         return ret;
2182 }
2183
2184 /**
2185  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2186  * @hba: per adapter instance
2187  * @uic_cmd: UIC command
2188  * @completion: initialize the completion only if this is set to true
2189  *
2190  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2191  * with mutex held and host_lock locked.
2192  * Returns 0 only if success.
2193  */
2194 static int
2195 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2196                       bool completion)
2197 {
2198         if (!ufshcd_ready_for_uic_cmd(hba)) {
2199                 dev_err(hba->dev,
2200                         "Controller not ready to accept UIC commands\n");
2201                 return -EIO;
2202         }
2203
2204         if (completion)
2205                 init_completion(&uic_cmd->done);
2206
2207         uic_cmd->cmd_active = 1;
2208         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2209
2210         return 0;
2211 }
2212
2213 /**
2214  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2215  * @hba: per adapter instance
2216  * @uic_cmd: UIC command
2217  *
2218  * Returns 0 only if success.
2219  */
2220 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2221 {
2222         int ret;
2223         unsigned long flags;
2224
2225         ufshcd_hold(hba, false);
2226         mutex_lock(&hba->uic_cmd_mutex);
2227         ufshcd_add_delay_before_dme_cmd(hba);
2228
2229         spin_lock_irqsave(hba->host->host_lock, flags);
2230         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2231         spin_unlock_irqrestore(hba->host->host_lock, flags);
2232         if (!ret)
2233                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2234
2235         mutex_unlock(&hba->uic_cmd_mutex);
2236
2237         ufshcd_release(hba);
2238         return ret;
2239 }
2240
2241 /**
2242  * ufshcd_map_sg - Map scatter-gather list to prdt
2243  * @hba: per adapter instance
2244  * @lrbp: pointer to local reference block
2245  *
2246  * Returns 0 in case of success, non-zero value in case of failure
2247  */
2248 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2249 {
2250         struct ufshcd_sg_entry *prd_table;
2251         struct scatterlist *sg;
2252         struct scsi_cmnd *cmd;
2253         int sg_segments;
2254         int i;
2255
2256         cmd = lrbp->cmd;
2257         sg_segments = scsi_dma_map(cmd);
2258         if (sg_segments < 0)
2259                 return sg_segments;
2260
2261         if (sg_segments) {
2262
2263                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2264                         lrbp->utr_descriptor_ptr->prd_table_length =
2265                                 cpu_to_le16((sg_segments *
2266                                         sizeof(struct ufshcd_sg_entry)));
2267                 else
2268                         lrbp->utr_descriptor_ptr->prd_table_length =
2269                                 cpu_to_le16((u16) (sg_segments));
2270
2271                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2272
2273                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2274                         prd_table[i].size  =
2275                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2276                         prd_table[i].base_addr =
2277                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2278                         prd_table[i].upper_addr =
2279                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2280                         prd_table[i].reserved = 0;
2281                 }
2282         } else {
2283                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2284         }
2285
2286         return 0;
2287 }
2288
2289 /**
2290  * ufshcd_enable_intr - enable interrupts
2291  * @hba: per adapter instance
2292  * @intrs: interrupt bits
2293  */
2294 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2295 {
2296         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2297
2298         if (hba->ufs_version == UFSHCI_VERSION_10) {
2299                 u32 rw;
2300                 rw = set & INTERRUPT_MASK_RW_VER_10;
2301                 set = rw | ((set ^ intrs) & intrs);
2302         } else {
2303                 set |= intrs;
2304         }
2305
2306         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2307 }
2308
2309 /**
2310  * ufshcd_disable_intr - disable interrupts
2311  * @hba: per adapter instance
2312  * @intrs: interrupt bits
2313  */
2314 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2315 {
2316         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2317
2318         if (hba->ufs_version == UFSHCI_VERSION_10) {
2319                 u32 rw;
2320                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2321                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2322                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2323
2324         } else {
2325                 set &= ~intrs;
2326         }
2327
2328         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2329 }
2330
2331 /**
2332  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2333  * descriptor according to request
2334  * @lrbp: pointer to local reference block
2335  * @upiu_flags: flags required in the header
2336  * @cmd_dir: requests data direction
2337  */
2338 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2339                         u8 *upiu_flags, enum dma_data_direction cmd_dir)
2340 {
2341         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2342         u32 data_direction;
2343         u32 dword_0;
2344         u32 dword_1 = 0;
2345         u32 dword_3 = 0;
2346
2347         if (cmd_dir == DMA_FROM_DEVICE) {
2348                 data_direction = UTP_DEVICE_TO_HOST;
2349                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2350         } else if (cmd_dir == DMA_TO_DEVICE) {
2351                 data_direction = UTP_HOST_TO_DEVICE;
2352                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2353         } else {
2354                 data_direction = UTP_NO_DATA_TRANSFER;
2355                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2356         }
2357
2358         dword_0 = data_direction | (lrbp->command_type
2359                                 << UPIU_COMMAND_TYPE_OFFSET);
2360         if (lrbp->intr_cmd)
2361                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2362
2363         /* Prepare crypto related dwords */
2364         ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2365
2366         /* Transfer request descriptor header fields */
2367         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2368         req_desc->header.dword_1 = cpu_to_le32(dword_1);
2369         /*
2370          * assigning invalid value for command status. Controller
2371          * updates OCS on command completion, with the command
2372          * status
2373          */
2374         req_desc->header.dword_2 =
2375                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2376         req_desc->header.dword_3 = cpu_to_le32(dword_3);
2377
2378         req_desc->prd_table_length = 0;
2379 }
2380
2381 /**
2382  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2383  * for scsi commands
2384  * @lrbp: local reference block pointer
2385  * @upiu_flags: flags
2386  */
2387 static
2388 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2389 {
2390         struct scsi_cmnd *cmd = lrbp->cmd;
2391         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2392         unsigned short cdb_len;
2393
2394         /* command descriptor fields */
2395         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2396                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2397                                 lrbp->lun, lrbp->task_tag);
2398         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2399                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2400
2401         /* Total EHS length and Data segment length will be zero */
2402         ucd_req_ptr->header.dword_2 = 0;
2403
2404         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2405
2406         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2407         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2408         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2409
2410         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2411 }
2412
2413 /**
2414  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2415  * for query requsts
2416  * @hba: UFS hba
2417  * @lrbp: local reference block pointer
2418  * @upiu_flags: flags
2419  */
2420 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2421                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2422 {
2423         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2424         struct ufs_query *query = &hba->dev_cmd.query;
2425         u16 len = be16_to_cpu(query->request.upiu_req.length);
2426
2427         /* Query request header */
2428         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2429                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2430                         lrbp->lun, lrbp->task_tag);
2431         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2432                         0, query->request.query_func, 0, 0);
2433
2434         /* Data segment length only need for WRITE_DESC */
2435         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2436                 ucd_req_ptr->header.dword_2 =
2437                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2438         else
2439                 ucd_req_ptr->header.dword_2 = 0;
2440
2441         /* Copy the Query Request buffer as is */
2442         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2443                         QUERY_OSF_SIZE);
2444
2445         /* Copy the Descriptor */
2446         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2447                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2448
2449         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2450 }
2451
2452 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2453 {
2454         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2455
2456         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2457
2458         /* command descriptor fields */
2459         ucd_req_ptr->header.dword_0 =
2460                 UPIU_HEADER_DWORD(
2461                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2462         /* clear rest of the fields of basic header */
2463         ucd_req_ptr->header.dword_1 = 0;
2464         ucd_req_ptr->header.dword_2 = 0;
2465
2466         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2467 }
2468
2469 /**
2470  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2471  *                           for Device Management Purposes
2472  * @hba: per adapter instance
2473  * @lrbp: pointer to local reference block
2474  */
2475 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2476                                       struct ufshcd_lrb *lrbp)
2477 {
2478         u8 upiu_flags;
2479         int ret = 0;
2480
2481         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2482             (hba->ufs_version == UFSHCI_VERSION_11))
2483                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2484         else
2485                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2486
2487         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2488         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2489                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2490         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2491                 ufshcd_prepare_utp_nop_upiu(lrbp);
2492         else
2493                 ret = -EINVAL;
2494
2495         return ret;
2496 }
2497
2498 /**
2499  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2500  *                         for SCSI Purposes
2501  * @hba: per adapter instance
2502  * @lrbp: pointer to local reference block
2503  */
2504 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2505 {
2506         u8 upiu_flags;
2507         int ret = 0;
2508
2509         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2510             (hba->ufs_version == UFSHCI_VERSION_11))
2511                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2512         else
2513                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2514
2515         if (likely(lrbp->cmd)) {
2516                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2517                                                 lrbp->cmd->sc_data_direction);
2518                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2519         } else {
2520                 ret = -EINVAL;
2521         }
2522
2523         return ret;
2524 }
2525
2526 /**
2527  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2528  * @upiu_wlun_id: UPIU W-LUN id
2529  *
2530  * Returns SCSI W-LUN id
2531  */
2532 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2533 {
2534         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2535 }
2536
2537 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2538 {
2539         struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2540         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2541         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2542                 i * sizeof(struct utp_transfer_cmd_desc);
2543         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2544                                        response_upiu);
2545         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2546
2547         lrb->utr_descriptor_ptr = utrdlp + i;
2548         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2549                 i * sizeof(struct utp_transfer_req_desc);
2550         lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2551         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2552         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2553         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2554         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2555         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2556 }
2557
2558 /**
2559  * ufshcd_queuecommand - main entry point for SCSI requests
2560  * @host: SCSI host pointer
2561  * @cmd: command from SCSI Midlayer
2562  *
2563  * Returns 0 for success, non-zero in case of failure
2564  */
2565 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2566 {
2567         struct ufshcd_lrb *lrbp;
2568         struct ufs_hba *hba;
2569         unsigned long flags;
2570         int tag;
2571         int err = 0;
2572
2573         hba = shost_priv(host);
2574
2575         tag = cmd->request->tag;
2576         if (!ufshcd_valid_tag(hba, tag)) {
2577                 dev_err(hba->dev,
2578                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2579                         __func__, tag, cmd, cmd->request);
2580                 BUG();
2581         }
2582
2583         if (!down_read_trylock(&hba->clk_scaling_lock))
2584                 return SCSI_MLQUEUE_HOST_BUSY;
2585
2586         hba->req_abort_count = 0;
2587
2588         err = ufshcd_hold(hba, true);
2589         if (err) {
2590                 err = SCSI_MLQUEUE_HOST_BUSY;
2591                 goto out;
2592         }
2593         WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2594                 (hba->clk_gating.state != CLKS_ON));
2595
2596         lrbp = &hba->lrb[tag];
2597         if (unlikely(lrbp->in_use)) {
2598                 if (hba->pm_op_in_progress)
2599                         set_host_byte(cmd, DID_BAD_TARGET);
2600                 else
2601                         err = SCSI_MLQUEUE_HOST_BUSY;
2602                 ufshcd_release(hba);
2603                 goto out;
2604         }
2605
2606         WARN_ON(lrbp->cmd);
2607         lrbp->cmd = cmd;
2608         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2609         lrbp->sense_buffer = cmd->sense_buffer;
2610         lrbp->task_tag = tag;
2611         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2612         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2613
2614         ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2615
2616         lrbp->req_abort_skip = false;
2617
2618         ufshcd_comp_scsi_upiu(hba, lrbp);
2619
2620         err = ufshcd_map_sg(hba, lrbp);
2621         if (err) {
2622                 lrbp->cmd = NULL;
2623                 ufshcd_release(hba);
2624                 goto out;
2625         }
2626         /* Make sure descriptors are ready before ringing the doorbell */
2627         wmb();
2628
2629         spin_lock_irqsave(hba->host->host_lock, flags);
2630         switch (hba->ufshcd_state) {
2631         case UFSHCD_STATE_OPERATIONAL:
2632         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2633                 break;
2634         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2635                 /*
2636                  * pm_runtime_get_sync() is used at error handling preparation
2637                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2638                  * PM ops, it can never be finished if we let SCSI layer keep
2639                  * retrying it, which gets err handler stuck forever. Neither
2640                  * can we let the scsi cmd pass through, because UFS is in bad
2641                  * state, the scsi cmd may eventually time out, which will get
2642                  * err handler blocked for too long. So, just fail the scsi cmd
2643                  * sent from PM ops, err handler can recover PM error anyways.
2644                  */
2645                 if (hba->pm_op_in_progress) {
2646                         hba->force_reset = true;
2647                         set_host_byte(cmd, DID_BAD_TARGET);
2648                         goto out_compl_cmd;
2649                 }
2650                 fallthrough;
2651         case UFSHCD_STATE_RESET:
2652                 err = SCSI_MLQUEUE_HOST_BUSY;
2653                 goto out_compl_cmd;
2654         case UFSHCD_STATE_ERROR:
2655                 set_host_byte(cmd, DID_ERROR);
2656                 goto out_compl_cmd;
2657         default:
2658                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2659                                 __func__, hba->ufshcd_state);
2660                 set_host_byte(cmd, DID_BAD_TARGET);
2661                 goto out_compl_cmd;
2662         }
2663         ufshcd_send_command(hba, tag);
2664         spin_unlock_irqrestore(hba->host->host_lock, flags);
2665         goto out;
2666
2667 out_compl_cmd:
2668         scsi_dma_unmap(lrbp->cmd);
2669         lrbp->cmd = NULL;
2670         spin_unlock_irqrestore(hba->host->host_lock, flags);
2671         ufshcd_release(hba);
2672         if (!err)
2673                 cmd->scsi_done(cmd);
2674 out:
2675         up_read(&hba->clk_scaling_lock);
2676         return err;
2677 }
2678
2679 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2680                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2681 {
2682         lrbp->cmd = NULL;
2683         lrbp->sense_bufflen = 0;
2684         lrbp->sense_buffer = NULL;
2685         lrbp->task_tag = tag;
2686         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2687         lrbp->intr_cmd = true; /* No interrupt aggregation */
2688         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2689         hba->dev_cmd.type = cmd_type;
2690
2691         return ufshcd_compose_devman_upiu(hba, lrbp);
2692 }
2693
2694 static int
2695 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2696 {
2697         int err = 0;
2698         unsigned long flags;
2699         u32 mask = 1 << tag;
2700
2701         /* clear outstanding transaction before retry */
2702         spin_lock_irqsave(hba->host->host_lock, flags);
2703         ufshcd_utrl_clear(hba, tag);
2704         spin_unlock_irqrestore(hba->host->host_lock, flags);
2705
2706         /*
2707          * wait for for h/w to clear corresponding bit in door-bell.
2708          * max. wait is 1 sec.
2709          */
2710         err = ufshcd_wait_for_register(hba,
2711                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2712                         mask, ~mask, 1000, 1000);
2713
2714         return err;
2715 }
2716
2717 static int
2718 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2719 {
2720         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2721
2722         /* Get the UPIU response */
2723         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2724                                 UPIU_RSP_CODE_OFFSET;
2725         return query_res->response;
2726 }
2727
2728 /**
2729  * ufshcd_dev_cmd_completion() - handles device management command responses
2730  * @hba: per adapter instance
2731  * @lrbp: pointer to local reference block
2732  */
2733 static int
2734 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2735 {
2736         int resp;
2737         int err = 0;
2738
2739         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2740         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2741
2742         switch (resp) {
2743         case UPIU_TRANSACTION_NOP_IN:
2744                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2745                         err = -EINVAL;
2746                         dev_err(hba->dev, "%s: unexpected response %x\n",
2747                                         __func__, resp);
2748                 }
2749                 break;
2750         case UPIU_TRANSACTION_QUERY_RSP:
2751                 err = ufshcd_check_query_response(hba, lrbp);
2752                 if (!err)
2753                         err = ufshcd_copy_query_response(hba, lrbp);
2754                 break;
2755         case UPIU_TRANSACTION_REJECT_UPIU:
2756                 /* TODO: handle Reject UPIU Response */
2757                 err = -EPERM;
2758                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2759                                 __func__);
2760                 break;
2761         default:
2762                 err = -EINVAL;
2763                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2764                                 __func__, resp);
2765                 break;
2766         }
2767
2768         return err;
2769 }
2770
2771 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2772                 struct ufshcd_lrb *lrbp, int max_timeout)
2773 {
2774         int err = 0;
2775         unsigned long time_left;
2776         unsigned long flags;
2777
2778         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2779                         msecs_to_jiffies(max_timeout));
2780
2781         /* Make sure descriptors are ready before ringing the doorbell */
2782         wmb();
2783         spin_lock_irqsave(hba->host->host_lock, flags);
2784         hba->dev_cmd.complete = NULL;
2785         if (likely(time_left)) {
2786                 err = ufshcd_get_tr_ocs(lrbp);
2787                 if (!err)
2788                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2789         }
2790         spin_unlock_irqrestore(hba->host->host_lock, flags);
2791
2792         if (!time_left) {
2793                 err = -ETIMEDOUT;
2794                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2795                         __func__, lrbp->task_tag);
2796                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2797                         /* successfully cleared the command, retry if needed */
2798                         err = -EAGAIN;
2799                 /*
2800                  * in case of an error, after clearing the doorbell,
2801                  * we also need to clear the outstanding_request
2802                  * field in hba
2803                  */
2804                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2805         }
2806
2807         return err;
2808 }
2809
2810 /**
2811  * ufshcd_exec_dev_cmd - API for sending device management requests
2812  * @hba: UFS hba
2813  * @cmd_type: specifies the type (NOP, Query...)
2814  * @timeout: time in seconds
2815  *
2816  * NOTE: Since there is only one available tag for device management commands,
2817  * it is expected you hold the hba->dev_cmd.lock mutex.
2818  */
2819 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2820                 enum dev_cmd_type cmd_type, int timeout)
2821 {
2822         struct request_queue *q = hba->cmd_queue;
2823         struct request *req;
2824         struct ufshcd_lrb *lrbp;
2825         int err;
2826         int tag;
2827         struct completion wait;
2828         unsigned long flags;
2829
2830         down_read(&hba->clk_scaling_lock);
2831
2832         /*
2833          * Get free slot, sleep if slots are unavailable.
2834          * Even though we use wait_event() which sleeps indefinitely,
2835          * the maximum wait time is bounded by SCSI request timeout.
2836          */
2837         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2838         if (IS_ERR(req)) {
2839                 err = PTR_ERR(req);
2840                 goto out_unlock;
2841         }
2842         tag = req->tag;
2843         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2844
2845         init_completion(&wait);
2846         lrbp = &hba->lrb[tag];
2847         if (unlikely(lrbp->in_use)) {
2848                 err = -EBUSY;
2849                 goto out;
2850         }
2851
2852         WARN_ON(lrbp->cmd);
2853         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2854         if (unlikely(err))
2855                 goto out_put_tag;
2856
2857         hba->dev_cmd.complete = &wait;
2858
2859         ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2860         /* Make sure descriptors are ready before ringing the doorbell */
2861         wmb();
2862         spin_lock_irqsave(hba->host->host_lock, flags);
2863         ufshcd_send_command(hba, tag);
2864         spin_unlock_irqrestore(hba->host->host_lock, flags);
2865
2866         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2867
2868 out:
2869         ufshcd_add_query_upiu_trace(hba, tag,
2870                         err ? "query_complete_err" : "query_complete");
2871
2872 out_put_tag:
2873         blk_put_request(req);
2874 out_unlock:
2875         up_read(&hba->clk_scaling_lock);
2876         return err;
2877 }
2878
2879 /**
2880  * ufshcd_init_query() - init the query response and request parameters
2881  * @hba: per-adapter instance
2882  * @request: address of the request pointer to be initialized
2883  * @response: address of the response pointer to be initialized
2884  * @opcode: operation to perform
2885  * @idn: flag idn to access
2886  * @index: LU number to access
2887  * @selector: query/flag/descriptor further identification
2888  */
2889 static inline void ufshcd_init_query(struct ufs_hba *hba,
2890                 struct ufs_query_req **request, struct ufs_query_res **response,
2891                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2892 {
2893         *request = &hba->dev_cmd.query.request;
2894         *response = &hba->dev_cmd.query.response;
2895         memset(*request, 0, sizeof(struct ufs_query_req));
2896         memset(*response, 0, sizeof(struct ufs_query_res));
2897         (*request)->upiu_req.opcode = opcode;
2898         (*request)->upiu_req.idn = idn;
2899         (*request)->upiu_req.index = index;
2900         (*request)->upiu_req.selector = selector;
2901 }
2902
2903 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2904         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
2905 {
2906         int ret;
2907         int retries;
2908
2909         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2910                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2911                 if (ret)
2912                         dev_dbg(hba->dev,
2913                                 "%s: failed with error %d, retries %d\n",
2914                                 __func__, ret, retries);
2915                 else
2916                         break;
2917         }
2918
2919         if (ret)
2920                 dev_err(hba->dev,
2921                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2922                         __func__, opcode, idn, ret, retries);
2923         return ret;
2924 }
2925
2926 /**
2927  * ufshcd_query_flag() - API function for sending flag query requests
2928  * @hba: per-adapter instance
2929  * @opcode: flag query to perform
2930  * @idn: flag idn to access
2931  * @index: flag index to access
2932  * @flag_res: the flag value after the query request completes
2933  *
2934  * Returns 0 for success, non-zero in case of failure
2935  */
2936 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2937                         enum flag_idn idn, u8 index, bool *flag_res)
2938 {
2939         struct ufs_query_req *request = NULL;
2940         struct ufs_query_res *response = NULL;
2941         int err, selector = 0;
2942         int timeout = QUERY_REQ_TIMEOUT;
2943
2944         BUG_ON(!hba);
2945
2946         ufshcd_hold(hba, false);
2947         mutex_lock(&hba->dev_cmd.lock);
2948         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2949                         selector);
2950
2951         switch (opcode) {
2952         case UPIU_QUERY_OPCODE_SET_FLAG:
2953         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2954         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2955                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2956                 break;
2957         case UPIU_QUERY_OPCODE_READ_FLAG:
2958                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2959                 if (!flag_res) {
2960                         /* No dummy reads */
2961                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2962                                         __func__);
2963                         err = -EINVAL;
2964                         goto out_unlock;
2965                 }
2966                 break;
2967         default:
2968                 dev_err(hba->dev,
2969                         "%s: Expected query flag opcode but got = %d\n",
2970                         __func__, opcode);
2971                 err = -EINVAL;
2972                 goto out_unlock;
2973         }
2974
2975         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2976
2977         if (err) {
2978                 dev_err(hba->dev,
2979                         "%s: Sending flag query for idn %d failed, err = %d\n",
2980                         __func__, idn, err);
2981                 goto out_unlock;
2982         }
2983
2984         if (flag_res)
2985                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2986                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2987
2988 out_unlock:
2989         mutex_unlock(&hba->dev_cmd.lock);
2990         ufshcd_release(hba);
2991         return err;
2992 }
2993
2994 /**
2995  * ufshcd_query_attr - API function for sending attribute requests
2996  * @hba: per-adapter instance
2997  * @opcode: attribute opcode
2998  * @idn: attribute idn to access
2999  * @index: index field
3000  * @selector: selector field
3001  * @attr_val: the attribute value after the query request completes
3002  *
3003  * Returns 0 for success, non-zero in case of failure
3004 */
3005 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3006                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3007 {
3008         struct ufs_query_req *request = NULL;
3009         struct ufs_query_res *response = NULL;
3010         int err;
3011
3012         BUG_ON(!hba);
3013
3014         if (!attr_val) {
3015                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3016                                 __func__, opcode);
3017                 return -EINVAL;
3018         }
3019
3020         ufshcd_hold(hba, false);
3021
3022         mutex_lock(&hba->dev_cmd.lock);
3023         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3024                         selector);
3025
3026         switch (opcode) {
3027         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3028                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3029                 request->upiu_req.value = cpu_to_be32(*attr_val);
3030                 break;
3031         case UPIU_QUERY_OPCODE_READ_ATTR:
3032                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3033                 break;
3034         default:
3035                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3036                                 __func__, opcode);
3037                 err = -EINVAL;
3038                 goto out_unlock;
3039         }
3040
3041         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3042
3043         if (err) {
3044                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3045                                 __func__, opcode, idn, index, err);
3046                 goto out_unlock;
3047         }
3048
3049         *attr_val = be32_to_cpu(response->upiu_res.value);
3050
3051 out_unlock:
3052         mutex_unlock(&hba->dev_cmd.lock);
3053         ufshcd_release(hba);
3054         return err;
3055 }
3056
3057 /**
3058  * ufshcd_query_attr_retry() - API function for sending query
3059  * attribute with retries
3060  * @hba: per-adapter instance
3061  * @opcode: attribute opcode
3062  * @idn: attribute idn to access
3063  * @index: index field
3064  * @selector: selector field
3065  * @attr_val: the attribute value after the query request
3066  * completes
3067  *
3068  * Returns 0 for success, non-zero in case of failure
3069 */
3070 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3071         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3072         u32 *attr_val)
3073 {
3074         int ret = 0;
3075         u32 retries;
3076
3077         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3078                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3079                                                 selector, attr_val);
3080                 if (ret)
3081                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3082                                 __func__, ret, retries);
3083                 else
3084                         break;
3085         }
3086
3087         if (ret)
3088                 dev_err(hba->dev,
3089                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3090                         __func__, idn, ret, QUERY_REQ_RETRIES);
3091         return ret;
3092 }
3093
3094 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3095                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3096                         u8 selector, u8 *desc_buf, int *buf_len)
3097 {
3098         struct ufs_query_req *request = NULL;
3099         struct ufs_query_res *response = NULL;
3100         int err;
3101
3102         BUG_ON(!hba);
3103
3104         if (!desc_buf) {
3105                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3106                                 __func__, opcode);
3107                 return -EINVAL;
3108         }
3109
3110         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3111                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3112                                 __func__, *buf_len);
3113                 return -EINVAL;
3114         }
3115
3116         ufshcd_hold(hba, false);
3117
3118         mutex_lock(&hba->dev_cmd.lock);
3119         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3120                         selector);
3121         hba->dev_cmd.query.descriptor = desc_buf;
3122         request->upiu_req.length = cpu_to_be16(*buf_len);
3123
3124         switch (opcode) {
3125         case UPIU_QUERY_OPCODE_WRITE_DESC:
3126                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3127                 break;
3128         case UPIU_QUERY_OPCODE_READ_DESC:
3129                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3130                 break;
3131         default:
3132                 dev_err(hba->dev,
3133                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3134                                 __func__, opcode);
3135                 err = -EINVAL;
3136                 goto out_unlock;
3137         }
3138
3139         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3140
3141         if (err) {
3142                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3143                                 __func__, opcode, idn, index, err);
3144                 goto out_unlock;
3145         }
3146
3147         *buf_len = be16_to_cpu(response->upiu_res.length);
3148
3149 out_unlock:
3150         hba->dev_cmd.query.descriptor = NULL;
3151         mutex_unlock(&hba->dev_cmd.lock);
3152         ufshcd_release(hba);
3153         return err;
3154 }
3155
3156 /**
3157  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3158  * @hba: per-adapter instance
3159  * @opcode: attribute opcode
3160  * @idn: attribute idn to access
3161  * @index: index field
3162  * @selector: selector field
3163  * @desc_buf: the buffer that contains the descriptor
3164  * @buf_len: length parameter passed to the device
3165  *
3166  * Returns 0 for success, non-zero in case of failure.
3167  * The buf_len parameter will contain, on return, the length parameter
3168  * received on the response.
3169  */
3170 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3171                                   enum query_opcode opcode,
3172                                   enum desc_idn idn, u8 index,
3173                                   u8 selector,
3174                                   u8 *desc_buf, int *buf_len)
3175 {
3176         int err;
3177         int retries;
3178
3179         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3180                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3181                                                 selector, desc_buf, buf_len);
3182                 if (!err || err == -EINVAL)
3183                         break;
3184         }
3185
3186         return err;
3187 }
3188
3189 /**
3190  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3191  * @hba: Pointer to adapter instance
3192  * @desc_id: descriptor idn value
3193  * @desc_len: mapped desc length (out)
3194  */
3195 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3196                                   int *desc_len)
3197 {
3198         if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3199             desc_id == QUERY_DESC_IDN_RFU_1)
3200                 *desc_len = 0;
3201         else
3202                 *desc_len = hba->desc_size[desc_id];
3203 }
3204 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3205
3206 static void ufshcd_update_desc_length(struct ufs_hba *hba,
3207                                       enum desc_idn desc_id, int desc_index,
3208                                       unsigned char desc_len)
3209 {
3210         if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3211             desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3212                 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3213                  * than the RPMB unit, however, both descriptors share the same
3214                  * desc_idn, to cover both unit descriptors with one length, we
3215                  * choose the normal unit descriptor length by desc_index.
3216                  */
3217                 hba->desc_size[desc_id] = desc_len;
3218 }
3219
3220 /**
3221  * ufshcd_read_desc_param - read the specified descriptor parameter
3222  * @hba: Pointer to adapter instance
3223  * @desc_id: descriptor idn value
3224  * @desc_index: descriptor index
3225  * @param_offset: offset of the parameter to read
3226  * @param_read_buf: pointer to buffer where parameter would be read
3227  * @param_size: sizeof(param_read_buf)
3228  *
3229  * Return 0 in case of success, non-zero otherwise
3230  */
3231 int ufshcd_read_desc_param(struct ufs_hba *hba,
3232                            enum desc_idn desc_id,
3233                            int desc_index,
3234                            u8 param_offset,
3235                            u8 *param_read_buf,
3236                            u8 param_size)
3237 {
3238         int ret;
3239         u8 *desc_buf;
3240         int buff_len;
3241         bool is_kmalloc = true;
3242
3243         /* Safety check */
3244         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3245                 return -EINVAL;
3246
3247         /* Get the length of descriptor */
3248         ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3249         if (!buff_len) {
3250                 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3251                 return -EINVAL;
3252         }
3253
3254         if (param_offset >= buff_len) {
3255                 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3256                         __func__, param_offset, desc_id, buff_len);
3257                 return -EINVAL;
3258         }
3259
3260         /* Check whether we need temp memory */
3261         if (param_offset != 0 || param_size < buff_len) {
3262                 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3263                 if (!desc_buf)
3264                         return -ENOMEM;
3265         } else {
3266                 desc_buf = param_read_buf;
3267                 is_kmalloc = false;
3268         }
3269
3270         /* Request for full descriptor */
3271         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3272                                         desc_id, desc_index, 0,
3273                                         desc_buf, &buff_len);
3274
3275         if (ret) {
3276                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3277                         __func__, desc_id, desc_index, param_offset, ret);
3278                 goto out;
3279         }
3280
3281         /* Sanity check */
3282         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3283                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3284                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3285                 ret = -EINVAL;
3286                 goto out;
3287         }
3288
3289         /* Update descriptor length */
3290         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3291         ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3292
3293         if (is_kmalloc) {
3294                 /* Make sure we don't copy more data than available */
3295                 if (param_offset + param_size > buff_len)
3296                         param_size = buff_len - param_offset;
3297                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3298         }
3299 out:
3300         if (is_kmalloc)
3301                 kfree(desc_buf);
3302         return ret;
3303 }
3304
3305 /**
3306  * struct uc_string_id - unicode string
3307  *
3308  * @len: size of this descriptor inclusive
3309  * @type: descriptor type
3310  * @uc: unicode string character
3311  */
3312 struct uc_string_id {
3313         u8 len;
3314         u8 type;
3315         wchar_t uc[];
3316 } __packed;
3317
3318 /* replace non-printable or non-ASCII characters with spaces */
3319 static inline char ufshcd_remove_non_printable(u8 ch)
3320 {
3321         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3322 }
3323
3324 /**
3325  * ufshcd_read_string_desc - read string descriptor
3326  * @hba: pointer to adapter instance
3327  * @desc_index: descriptor index
3328  * @buf: pointer to buffer where descriptor would be read,
3329  *       the caller should free the memory.
3330  * @ascii: if true convert from unicode to ascii characters
3331  *         null terminated string.
3332  *
3333  * Return:
3334  * *      string size on success.
3335  * *      -ENOMEM: on allocation failure
3336  * *      -EINVAL: on a wrong parameter
3337  */
3338 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3339                             u8 **buf, bool ascii)
3340 {
3341         struct uc_string_id *uc_str;
3342         u8 *str;
3343         int ret;
3344
3345         if (!buf)
3346                 return -EINVAL;
3347
3348         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3349         if (!uc_str)
3350                 return -ENOMEM;
3351
3352         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3353                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3354         if (ret < 0) {
3355                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3356                         QUERY_REQ_RETRIES, ret);
3357                 str = NULL;
3358                 goto out;
3359         }
3360
3361         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3362                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3363                 str = NULL;
3364                 ret = 0;
3365                 goto out;
3366         }
3367
3368         if (ascii) {
3369                 ssize_t ascii_len;
3370                 int i;
3371                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3372                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3373                 str = kzalloc(ascii_len, GFP_KERNEL);
3374                 if (!str) {
3375                         ret = -ENOMEM;
3376                         goto out;
3377                 }
3378
3379                 /*
3380                  * the descriptor contains string in UTF16 format
3381                  * we need to convert to utf-8 so it can be displayed
3382                  */
3383                 ret = utf16s_to_utf8s(uc_str->uc,
3384                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3385                                       UTF16_BIG_ENDIAN, str, ascii_len);
3386
3387                 /* replace non-printable or non-ASCII characters with spaces */
3388                 for (i = 0; i < ret; i++)
3389                         str[i] = ufshcd_remove_non_printable(str[i]);
3390
3391                 str[ret++] = '\0';
3392
3393         } else {
3394                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3395                 if (!str) {
3396                         ret = -ENOMEM;
3397                         goto out;
3398                 }
3399                 ret = uc_str->len;
3400         }
3401 out:
3402         *buf = str;
3403         kfree(uc_str);
3404         return ret;
3405 }
3406
3407 /**
3408  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3409  * @hba: Pointer to adapter instance
3410  * @lun: lun id
3411  * @param_offset: offset of the parameter to read
3412  * @param_read_buf: pointer to buffer where parameter would be read
3413  * @param_size: sizeof(param_read_buf)
3414  *
3415  * Return 0 in case of success, non-zero otherwise
3416  */
3417 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3418                                               int lun,
3419                                               enum unit_desc_param param_offset,
3420                                               u8 *param_read_buf,
3421                                               u32 param_size)
3422 {
3423         /*
3424          * Unit descriptors are only available for general purpose LUs (LUN id
3425          * from 0 to 7) and RPMB Well known LU.
3426          */
3427         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3428                 return -EOPNOTSUPP;
3429
3430         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3431                                       param_offset, param_read_buf, param_size);
3432 }
3433
3434 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3435 {
3436         int err = 0;
3437         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3438
3439         if (hba->dev_info.wspecversion >= 0x300) {
3440                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3441                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3442                                 &gating_wait);
3443                 if (err)
3444                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3445                                          err, gating_wait);
3446
3447                 if (gating_wait == 0) {
3448                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3449                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3450                                          gating_wait);
3451                 }
3452
3453                 hba->dev_info.clk_gating_wait_us = gating_wait;
3454         }
3455
3456         return err;
3457 }
3458
3459 /**
3460  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3461  * @hba: per adapter instance
3462  *
3463  * 1. Allocate DMA memory for Command Descriptor array
3464  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3465  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3466  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3467  *      (UTMRDL)
3468  * 4. Allocate memory for local reference block(lrb).
3469  *
3470  * Returns 0 for success, non-zero in case of failure
3471  */
3472 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3473 {
3474         size_t utmrdl_size, utrdl_size, ucdl_size;
3475
3476         /* Allocate memory for UTP command descriptors */
3477         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3478         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3479                                                   ucdl_size,
3480                                                   &hba->ucdl_dma_addr,
3481                                                   GFP_KERNEL);
3482
3483         /*
3484          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3485          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3486          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3487          * be aligned to 128 bytes as well
3488          */
3489         if (!hba->ucdl_base_addr ||
3490             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3491                 dev_err(hba->dev,
3492                         "Command Descriptor Memory allocation failed\n");
3493                 goto out;
3494         }
3495
3496         /*
3497          * Allocate memory for UTP Transfer descriptors
3498          * UFSHCI requires 1024 byte alignment of UTRD
3499          */
3500         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3501         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3502                                                    utrdl_size,
3503                                                    &hba->utrdl_dma_addr,
3504                                                    GFP_KERNEL);
3505         if (!hba->utrdl_base_addr ||
3506             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3507                 dev_err(hba->dev,
3508                         "Transfer Descriptor Memory allocation failed\n");
3509                 goto out;
3510         }
3511
3512         /*
3513          * Allocate memory for UTP Task Management descriptors
3514          * UFSHCI requires 1024 byte alignment of UTMRD
3515          */
3516         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3517         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3518                                                     utmrdl_size,
3519                                                     &hba->utmrdl_dma_addr,
3520                                                     GFP_KERNEL);
3521         if (!hba->utmrdl_base_addr ||
3522             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3523                 dev_err(hba->dev,
3524                 "Task Management Descriptor Memory allocation failed\n");
3525                 goto out;
3526         }
3527
3528         /* Allocate memory for local reference block */
3529         hba->lrb = devm_kcalloc(hba->dev,
3530                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3531                                 GFP_KERNEL);
3532         if (!hba->lrb) {
3533                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3534                 goto out;
3535         }
3536         return 0;
3537 out:
3538         return -ENOMEM;
3539 }
3540
3541 /**
3542  * ufshcd_host_memory_configure - configure local reference block with
3543  *                              memory offsets
3544  * @hba: per adapter instance
3545  *
3546  * Configure Host memory space
3547  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3548  * address.
3549  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3550  * and PRDT offset.
3551  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3552  * into local reference block.
3553  */
3554 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3555 {
3556         struct utp_transfer_req_desc *utrdlp;
3557         dma_addr_t cmd_desc_dma_addr;
3558         dma_addr_t cmd_desc_element_addr;
3559         u16 response_offset;
3560         u16 prdt_offset;
3561         int cmd_desc_size;
3562         int i;
3563
3564         utrdlp = hba->utrdl_base_addr;
3565
3566         response_offset =
3567                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3568         prdt_offset =
3569                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3570
3571         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3572         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3573
3574         for (i = 0; i < hba->nutrs; i++) {
3575                 /* Configure UTRD with command descriptor base address */
3576                 cmd_desc_element_addr =
3577                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3578                 utrdlp[i].command_desc_base_addr_lo =
3579                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3580                 utrdlp[i].command_desc_base_addr_hi =
3581                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3582
3583                 /* Response upiu and prdt offset should be in double words */
3584                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3585                         utrdlp[i].response_upiu_offset =
3586                                 cpu_to_le16(response_offset);
3587                         utrdlp[i].prd_table_offset =
3588                                 cpu_to_le16(prdt_offset);
3589                         utrdlp[i].response_upiu_length =
3590                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3591                 } else {
3592                         utrdlp[i].response_upiu_offset =
3593                                 cpu_to_le16(response_offset >> 2);
3594                         utrdlp[i].prd_table_offset =
3595                                 cpu_to_le16(prdt_offset >> 2);
3596                         utrdlp[i].response_upiu_length =
3597                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3598                 }
3599
3600                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3601         }
3602 }
3603
3604 /**
3605  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3606  * @hba: per adapter instance
3607  *
3608  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3609  * in order to initialize the Unipro link startup procedure.
3610  * Once the Unipro links are up, the device connected to the controller
3611  * is detected.
3612  *
3613  * Returns 0 on success, non-zero value on failure
3614  */
3615 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3616 {
3617         struct uic_command uic_cmd = {0};
3618         int ret;
3619
3620         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3621
3622         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3623         if (ret)
3624                 dev_dbg(hba->dev,
3625                         "dme-link-startup: error code %d\n", ret);
3626         return ret;
3627 }
3628 /**
3629  * ufshcd_dme_reset - UIC command for DME_RESET
3630  * @hba: per adapter instance
3631  *
3632  * DME_RESET command is issued in order to reset UniPro stack.
3633  * This function now deals with cold reset.
3634  *
3635  * Returns 0 on success, non-zero value on failure
3636  */
3637 static int ufshcd_dme_reset(struct ufs_hba *hba)
3638 {
3639         struct uic_command uic_cmd = {0};
3640         int ret;
3641
3642         uic_cmd.command = UIC_CMD_DME_RESET;
3643
3644         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3645         if (ret)
3646                 dev_err(hba->dev,
3647                         "dme-reset: error code %d\n", ret);
3648
3649         return ret;
3650 }
3651
3652 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3653                                int agreed_gear,
3654                                int adapt_val)
3655 {
3656         int ret;
3657
3658         if (agreed_gear != UFS_HS_G4)
3659                 adapt_val = PA_NO_ADAPT;
3660
3661         ret = ufshcd_dme_set(hba,
3662                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3663                              adapt_val);
3664         return ret;
3665 }
3666 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3667
3668 /**
3669  * ufshcd_dme_enable - UIC command for DME_ENABLE
3670  * @hba: per adapter instance
3671  *
3672  * DME_ENABLE command is issued in order to enable UniPro stack.
3673  *
3674  * Returns 0 on success, non-zero value on failure
3675  */
3676 static int ufshcd_dme_enable(struct ufs_hba *hba)
3677 {
3678         struct uic_command uic_cmd = {0};
3679         int ret;
3680
3681         uic_cmd.command = UIC_CMD_DME_ENABLE;
3682
3683         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3684         if (ret)
3685                 dev_err(hba->dev,
3686                         "dme-enable: error code %d\n", ret);
3687
3688         return ret;
3689 }
3690
3691 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3692 {
3693         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3694         unsigned long min_sleep_time_us;
3695
3696         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3697                 return;
3698
3699         /*
3700          * last_dme_cmd_tstamp will be 0 only for 1st call to
3701          * this function
3702          */
3703         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3704                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3705         } else {
3706                 unsigned long delta =
3707                         (unsigned long) ktime_to_us(
3708                                 ktime_sub(ktime_get(),
3709                                 hba->last_dme_cmd_tstamp));
3710
3711                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3712                         min_sleep_time_us =
3713                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3714                 else
3715                         return; /* no more delay required */
3716         }
3717
3718         /* allow sleep for extra 50us if needed */
3719         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3720 }
3721
3722 /**
3723  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3724  * @hba: per adapter instance
3725  * @attr_sel: uic command argument1
3726  * @attr_set: attribute set type as uic command argument2
3727  * @mib_val: setting value as uic command argument3
3728  * @peer: indicate whether peer or local
3729  *
3730  * Returns 0 on success, non-zero value on failure
3731  */
3732 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3733                         u8 attr_set, u32 mib_val, u8 peer)
3734 {
3735         struct uic_command uic_cmd = {0};
3736         static const char *const action[] = {
3737                 "dme-set",
3738                 "dme-peer-set"
3739         };
3740         const char *set = action[!!peer];
3741         int ret;
3742         int retries = UFS_UIC_COMMAND_RETRIES;
3743
3744         uic_cmd.command = peer ?
3745                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3746         uic_cmd.argument1 = attr_sel;
3747         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3748         uic_cmd.argument3 = mib_val;
3749
3750         do {
3751                 /* for peer attributes we retry upon failure */
3752                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3753                 if (ret)
3754                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3755                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3756         } while (ret && peer && --retries);
3757
3758         if (ret)
3759                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3760                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3761                         UFS_UIC_COMMAND_RETRIES - retries);
3762
3763         return ret;
3764 }
3765 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3766
3767 /**
3768  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3769  * @hba: per adapter instance
3770  * @attr_sel: uic command argument1
3771  * @mib_val: the value of the attribute as returned by the UIC command
3772  * @peer: indicate whether peer or local
3773  *
3774  * Returns 0 on success, non-zero value on failure
3775  */
3776 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3777                         u32 *mib_val, u8 peer)
3778 {
3779         struct uic_command uic_cmd = {0};
3780         static const char *const action[] = {
3781                 "dme-get",
3782                 "dme-peer-get"
3783         };
3784         const char *get = action[!!peer];
3785         int ret;
3786         int retries = UFS_UIC_COMMAND_RETRIES;
3787         struct ufs_pa_layer_attr orig_pwr_info;
3788         struct ufs_pa_layer_attr temp_pwr_info;
3789         bool pwr_mode_change = false;
3790
3791         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3792                 orig_pwr_info = hba->pwr_info;
3793                 temp_pwr_info = orig_pwr_info;
3794
3795                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3796                     orig_pwr_info.pwr_rx == FAST_MODE) {
3797                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3798                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3799                         pwr_mode_change = true;
3800                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3801                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3802                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3803                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3804                         pwr_mode_change = true;
3805                 }
3806                 if (pwr_mode_change) {
3807                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3808                         if (ret)
3809                                 goto out;
3810                 }
3811         }
3812
3813         uic_cmd.command = peer ?
3814                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3815         uic_cmd.argument1 = attr_sel;
3816
3817         do {
3818                 /* for peer attributes we retry upon failure */
3819                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3820                 if (ret)
3821                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3822                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3823         } while (ret && peer && --retries);
3824
3825         if (ret)
3826                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3827                         get, UIC_GET_ATTR_ID(attr_sel),
3828                         UFS_UIC_COMMAND_RETRIES - retries);
3829
3830         if (mib_val && !ret)
3831                 *mib_val = uic_cmd.argument3;
3832
3833         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3834             && pwr_mode_change)
3835                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3836 out:
3837         return ret;
3838 }
3839 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3840
3841 /**
3842  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3843  * state) and waits for it to take effect.
3844  *
3845  * @hba: per adapter instance
3846  * @cmd: UIC command to execute
3847  *
3848  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3849  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3850  * and device UniPro link and hence it's final completion would be indicated by
3851  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3852  * addition to normal UIC command completion Status (UCCS). This function only
3853  * returns after the relevant status bits indicate the completion.
3854  *
3855  * Returns 0 on success, non-zero value on failure
3856  */
3857 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3858 {
3859         struct completion uic_async_done;
3860         unsigned long flags;
3861         u8 status;
3862         int ret;
3863         bool reenable_intr = false;
3864
3865         mutex_lock(&hba->uic_cmd_mutex);
3866         init_completion(&uic_async_done);
3867         ufshcd_add_delay_before_dme_cmd(hba);
3868
3869         spin_lock_irqsave(hba->host->host_lock, flags);
3870         if (ufshcd_is_link_broken(hba)) {
3871                 ret = -ENOLINK;
3872                 goto out_unlock;
3873         }
3874         hba->uic_async_done = &uic_async_done;
3875         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3876                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3877                 /*
3878                  * Make sure UIC command completion interrupt is disabled before
3879                  * issuing UIC command.
3880                  */
3881                 wmb();
3882                 reenable_intr = true;
3883         }
3884         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3885         spin_unlock_irqrestore(hba->host->host_lock, flags);
3886         if (ret) {
3887                 dev_err(hba->dev,
3888                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3889                         cmd->command, cmd->argument3, ret);
3890                 goto out;
3891         }
3892
3893         if (!wait_for_completion_timeout(hba->uic_async_done,
3894                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3895                 dev_err(hba->dev,
3896                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3897                         cmd->command, cmd->argument3);
3898
3899                 if (!cmd->cmd_active) {
3900                         dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
3901                                 __func__);
3902                         goto check_upmcrs;
3903                 }
3904
3905                 ret = -ETIMEDOUT;
3906                 goto out;
3907         }
3908
3909 check_upmcrs:
3910         status = ufshcd_get_upmcrs(hba);
3911         if (status != PWR_LOCAL) {
3912                 dev_err(hba->dev,
3913                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3914                         cmd->command, status);
3915                 ret = (status != PWR_OK) ? status : -1;
3916         }
3917 out:
3918         if (ret) {
3919                 ufshcd_print_host_state(hba);
3920                 ufshcd_print_pwr_info(hba);
3921                 ufshcd_print_evt_hist(hba);
3922         }
3923
3924         spin_lock_irqsave(hba->host->host_lock, flags);
3925         hba->active_uic_cmd = NULL;
3926         hba->uic_async_done = NULL;
3927         if (reenable_intr)
3928                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3929         if (ret) {
3930                 ufshcd_set_link_broken(hba);
3931                 ufshcd_schedule_eh_work(hba);
3932         }
3933 out_unlock:
3934         spin_unlock_irqrestore(hba->host->host_lock, flags);
3935         mutex_unlock(&hba->uic_cmd_mutex);
3936
3937         return ret;
3938 }
3939
3940 /**
3941  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3942  *                              using DME_SET primitives.
3943  * @hba: per adapter instance
3944  * @mode: powr mode value
3945  *
3946  * Returns 0 on success, non-zero value on failure
3947  */
3948 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3949 {
3950         struct uic_command uic_cmd = {0};
3951         int ret;
3952
3953         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3954                 ret = ufshcd_dme_set(hba,
3955                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3956                 if (ret) {
3957                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3958                                                 __func__, ret);
3959                         goto out;
3960                 }
3961         }
3962
3963         uic_cmd.command = UIC_CMD_DME_SET;
3964         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3965         uic_cmd.argument3 = mode;
3966         ufshcd_hold(hba, false);
3967         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3968         ufshcd_release(hba);
3969
3970 out:
3971         return ret;
3972 }
3973
3974 int ufshcd_link_recovery(struct ufs_hba *hba)
3975 {
3976         int ret;
3977         unsigned long flags;
3978
3979         spin_lock_irqsave(hba->host->host_lock, flags);
3980         hba->ufshcd_state = UFSHCD_STATE_RESET;
3981         ufshcd_set_eh_in_progress(hba);
3982         spin_unlock_irqrestore(hba->host->host_lock, flags);
3983
3984         /* Reset the attached device */
3985         ufshcd_device_reset(hba);
3986
3987         ret = ufshcd_host_reset_and_restore(hba);
3988
3989         spin_lock_irqsave(hba->host->host_lock, flags);
3990         if (ret)
3991                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3992         ufshcd_clear_eh_in_progress(hba);
3993         spin_unlock_irqrestore(hba->host->host_lock, flags);
3994
3995         if (ret)
3996                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3997                         __func__, ret);
3998
3999         return ret;
4000 }
4001 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4002
4003 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4004 {
4005         int ret;
4006         struct uic_command uic_cmd = {0};
4007         ktime_t start = ktime_get();
4008
4009         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4010
4011         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4012         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4013         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4014                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4015
4016         if (ret)
4017                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4018                         __func__, ret);
4019         else
4020                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4021                                                                 POST_CHANGE);
4022
4023         return ret;
4024 }
4025
4026 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4027 {
4028         struct uic_command uic_cmd = {0};
4029         int ret;
4030         ktime_t start = ktime_get();
4031
4032         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4033
4034         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4035         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4036         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4037                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4038
4039         if (ret) {
4040                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4041                         __func__, ret);
4042         } else {
4043                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4044                                                                 POST_CHANGE);
4045                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4046                 hba->ufs_stats.hibern8_exit_cnt++;
4047         }
4048
4049         return ret;
4050 }
4051 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4052
4053 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4054 {
4055         unsigned long flags;
4056         bool update = false;
4057
4058         if (!ufshcd_is_auto_hibern8_supported(hba))
4059                 return;
4060
4061         spin_lock_irqsave(hba->host->host_lock, flags);
4062         if (hba->ahit != ahit) {
4063                 hba->ahit = ahit;
4064                 update = true;
4065         }
4066         spin_unlock_irqrestore(hba->host->host_lock, flags);
4067
4068         if (update && !pm_runtime_suspended(hba->dev)) {
4069                 pm_runtime_get_sync(hba->dev);
4070                 ufshcd_hold(hba, false);
4071                 ufshcd_auto_hibern8_enable(hba);
4072                 ufshcd_release(hba);
4073                 pm_runtime_put(hba->dev);
4074         }
4075 }
4076 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4077
4078 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4079 {
4080         unsigned long flags;
4081
4082         if (!ufshcd_is_auto_hibern8_supported(hba))
4083                 return;
4084
4085         spin_lock_irqsave(hba->host->host_lock, flags);
4086         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4087         spin_unlock_irqrestore(hba->host->host_lock, flags);
4088 }
4089
4090  /**
4091  * ufshcd_init_pwr_info - setting the POR (power on reset)
4092  * values in hba power info
4093  * @hba: per-adapter instance
4094  */
4095 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4096 {
4097         hba->pwr_info.gear_rx = UFS_PWM_G1;
4098         hba->pwr_info.gear_tx = UFS_PWM_G1;
4099         hba->pwr_info.lane_rx = 1;
4100         hba->pwr_info.lane_tx = 1;
4101         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4102         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4103         hba->pwr_info.hs_rate = 0;
4104 }
4105
4106 /**
4107  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4108  * @hba: per-adapter instance
4109  */
4110 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4111 {
4112         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4113
4114         if (hba->max_pwr_info.is_valid)
4115                 return 0;
4116
4117         pwr_info->pwr_tx = FAST_MODE;
4118         pwr_info->pwr_rx = FAST_MODE;
4119         pwr_info->hs_rate = PA_HS_MODE_B;
4120
4121         /* Get the connected lane count */
4122         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4123                         &pwr_info->lane_rx);
4124         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4125                         &pwr_info->lane_tx);
4126
4127         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4128                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4129                                 __func__,
4130                                 pwr_info->lane_rx,
4131                                 pwr_info->lane_tx);
4132                 return -EINVAL;
4133         }
4134
4135         /*
4136          * First, get the maximum gears of HS speed.
4137          * If a zero value, it means there is no HSGEAR capability.
4138          * Then, get the maximum gears of PWM speed.
4139          */
4140         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4141         if (!pwr_info->gear_rx) {
4142                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4143                                 &pwr_info->gear_rx);
4144                 if (!pwr_info->gear_rx) {
4145                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4146                                 __func__, pwr_info->gear_rx);
4147                         return -EINVAL;
4148                 }
4149                 pwr_info->pwr_rx = SLOW_MODE;
4150         }
4151
4152         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4153                         &pwr_info->gear_tx);
4154         if (!pwr_info->gear_tx) {
4155                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4156                                 &pwr_info->gear_tx);
4157                 if (!pwr_info->gear_tx) {
4158                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4159                                 __func__, pwr_info->gear_tx);
4160                         return -EINVAL;
4161                 }
4162                 pwr_info->pwr_tx = SLOW_MODE;
4163         }
4164
4165         hba->max_pwr_info.is_valid = true;
4166         return 0;
4167 }
4168
4169 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4170                              struct ufs_pa_layer_attr *pwr_mode)
4171 {
4172         int ret;
4173
4174         /* if already configured to the requested pwr_mode */
4175         if (!hba->force_pmc &&
4176             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4177             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4178             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4179             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4180             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4181             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4182             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4183                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4184                 return 0;
4185         }
4186
4187         /*
4188          * Configure attributes for power mode change with below.
4189          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4190          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4191          * - PA_HSSERIES
4192          */
4193         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4194         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4195                         pwr_mode->lane_rx);
4196         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4197                         pwr_mode->pwr_rx == FAST_MODE)
4198                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4199         else
4200                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4201
4202         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4203         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4204                         pwr_mode->lane_tx);
4205         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4206                         pwr_mode->pwr_tx == FAST_MODE)
4207                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4208         else
4209                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4210
4211         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4212             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4213             pwr_mode->pwr_rx == FAST_MODE ||
4214             pwr_mode->pwr_tx == FAST_MODE)
4215                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4216                                                 pwr_mode->hs_rate);
4217
4218         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4219                         DL_FC0ProtectionTimeOutVal_Default);
4220         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4221                         DL_TC0ReplayTimeOutVal_Default);
4222         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4223                         DL_AFC0ReqTimeOutVal_Default);
4224         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4225                         DL_FC1ProtectionTimeOutVal_Default);
4226         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4227                         DL_TC1ReplayTimeOutVal_Default);
4228         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4229                         DL_AFC1ReqTimeOutVal_Default);
4230
4231         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4232                         DL_FC0ProtectionTimeOutVal_Default);
4233         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4234                         DL_TC0ReplayTimeOutVal_Default);
4235         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4236                         DL_AFC0ReqTimeOutVal_Default);
4237
4238         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4239                         | pwr_mode->pwr_tx);
4240
4241         if (ret) {
4242                 dev_err(hba->dev,
4243                         "%s: power mode change failed %d\n", __func__, ret);
4244         } else {
4245                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4246                                                                 pwr_mode);
4247
4248                 memcpy(&hba->pwr_info, pwr_mode,
4249                         sizeof(struct ufs_pa_layer_attr));
4250         }
4251
4252         return ret;
4253 }
4254
4255 /**
4256  * ufshcd_config_pwr_mode - configure a new power mode
4257  * @hba: per-adapter instance
4258  * @desired_pwr_mode: desired power configuration
4259  */
4260 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4261                 struct ufs_pa_layer_attr *desired_pwr_mode)
4262 {
4263         struct ufs_pa_layer_attr final_params = { 0 };
4264         int ret;
4265
4266         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4267                                         desired_pwr_mode, &final_params);
4268
4269         if (ret)
4270                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4271
4272         ret = ufshcd_change_power_mode(hba, &final_params);
4273
4274         return ret;
4275 }
4276 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4277
4278 /**
4279  * ufshcd_complete_dev_init() - checks device readiness
4280  * @hba: per-adapter instance
4281  *
4282  * Set fDeviceInit flag and poll until device toggles it.
4283  */
4284 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4285 {
4286         int err;
4287         bool flag_res = true;
4288         ktime_t timeout;
4289
4290         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4291                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4292         if (err) {
4293                 dev_err(hba->dev,
4294                         "%s setting fDeviceInit flag failed with error %d\n",
4295                         __func__, err);
4296                 goto out;
4297         }
4298
4299         /* Poll fDeviceInit flag to be cleared */
4300         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4301         do {
4302                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4303                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4304                 if (!flag_res)
4305                         break;
4306                 usleep_range(5000, 10000);
4307         } while (ktime_before(ktime_get(), timeout));
4308
4309         if (err) {
4310                 dev_err(hba->dev,
4311                                 "%s reading fDeviceInit flag failed with error %d\n",
4312                                 __func__, err);
4313         } else if (flag_res) {
4314                 dev_err(hba->dev,
4315                                 "%s fDeviceInit was not cleared by the device\n",
4316                                 __func__);
4317                 err = -EBUSY;
4318         }
4319 out:
4320         return err;
4321 }
4322
4323 /**
4324  * ufshcd_make_hba_operational - Make UFS controller operational
4325  * @hba: per adapter instance
4326  *
4327  * To bring UFS host controller to operational state,
4328  * 1. Enable required interrupts
4329  * 2. Configure interrupt aggregation
4330  * 3. Program UTRL and UTMRL base address
4331  * 4. Configure run-stop-registers
4332  *
4333  * Returns 0 on success, non-zero value on failure
4334  */
4335 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4336 {
4337         int err = 0;
4338         u32 reg;
4339
4340         /* Enable required interrupts */
4341         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4342
4343         /* Configure interrupt aggregation */
4344         if (ufshcd_is_intr_aggr_allowed(hba))
4345                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4346         else
4347                 ufshcd_disable_intr_aggr(hba);
4348
4349         /* Configure UTRL and UTMRL base address registers */
4350         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4351                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4352         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4353                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4354         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4355                         REG_UTP_TASK_REQ_LIST_BASE_L);
4356         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4357                         REG_UTP_TASK_REQ_LIST_BASE_H);
4358
4359         /*
4360          * Make sure base address and interrupt setup are updated before
4361          * enabling the run/stop registers below.
4362          */
4363         wmb();
4364
4365         /*
4366          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4367          */
4368         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4369         if (!(ufshcd_get_lists_status(reg))) {
4370                 ufshcd_enable_run_stop_reg(hba);
4371         } else {
4372                 dev_err(hba->dev,
4373                         "Host controller not ready to process requests");
4374                 err = -EIO;
4375         }
4376
4377         return err;
4378 }
4379 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4380
4381 /**
4382  * ufshcd_hba_stop - Send controller to reset state
4383  * @hba: per adapter instance
4384  */
4385 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4386 {
4387         unsigned long flags;
4388         int err;
4389
4390         /*
4391          * Obtain the host lock to prevent that the controller is disabled
4392          * while the UFS interrupt handler is active on another CPU.
4393          */
4394         spin_lock_irqsave(hba->host->host_lock, flags);
4395         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4396         spin_unlock_irqrestore(hba->host->host_lock, flags);
4397
4398         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4399                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4400                                         10, 1);
4401         if (err)
4402                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4403 }
4404
4405 /**
4406  * ufshcd_hba_execute_hce - initialize the controller
4407  * @hba: per adapter instance
4408  *
4409  * The controller resets itself and controller firmware initialization
4410  * sequence kicks off. When controller is ready it will set
4411  * the Host Controller Enable bit to 1.
4412  *
4413  * Returns 0 on success, non-zero value on failure
4414  */
4415 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4416 {
4417         int retry_outer = 3;
4418         int retry_inner;
4419
4420 start:
4421         if (!ufshcd_is_hba_active(hba))
4422                 /* change controller state to "reset state" */
4423                 ufshcd_hba_stop(hba);
4424
4425         /* UniPro link is disabled at this point */
4426         ufshcd_set_link_off(hba);
4427
4428         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4429
4430         /* start controller initialization sequence */
4431         ufshcd_hba_start(hba);
4432
4433         /*
4434          * To initialize a UFS host controller HCE bit must be set to 1.
4435          * During initialization the HCE bit value changes from 1->0->1.
4436          * When the host controller completes initialization sequence
4437          * it sets the value of HCE bit to 1. The same HCE bit is read back
4438          * to check if the controller has completed initialization sequence.
4439          * So without this delay the value HCE = 1, set in the previous
4440          * instruction might be read back.
4441          * This delay can be changed based on the controller.
4442          */
4443         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4444
4445         /* wait for the host controller to complete initialization */
4446         retry_inner = 50;
4447         while (ufshcd_is_hba_active(hba)) {
4448                 if (retry_inner) {
4449                         retry_inner--;
4450                 } else {
4451                         dev_err(hba->dev,
4452                                 "Controller enable failed\n");
4453                         if (retry_outer) {
4454                                 retry_outer--;
4455                                 goto start;
4456                         }
4457                         return -EIO;
4458                 }
4459                 usleep_range(1000, 1100);
4460         }
4461
4462         /* enable UIC related interrupts */
4463         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4464
4465         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4466
4467         return 0;
4468 }
4469
4470 int ufshcd_hba_enable(struct ufs_hba *hba)
4471 {
4472         int ret;
4473
4474         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4475                 ufshcd_set_link_off(hba);
4476                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4477
4478                 /* enable UIC related interrupts */
4479                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4480                 ret = ufshcd_dme_reset(hba);
4481                 if (!ret) {
4482                         ret = ufshcd_dme_enable(hba);
4483                         if (!ret)
4484                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4485                         if (ret)
4486                                 dev_err(hba->dev,
4487                                         "Host controller enable failed with non-hce\n");
4488                 }
4489         } else {
4490                 ret = ufshcd_hba_execute_hce(hba);
4491         }
4492
4493         return ret;
4494 }
4495 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4496
4497 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4498 {
4499         int tx_lanes = 0, i, err = 0;
4500
4501         if (!peer)
4502                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4503                                &tx_lanes);
4504         else
4505                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4506                                     &tx_lanes);
4507         for (i = 0; i < tx_lanes; i++) {
4508                 if (!peer)
4509                         err = ufshcd_dme_set(hba,
4510                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4511                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4512                                         0);
4513                 else
4514                         err = ufshcd_dme_peer_set(hba,
4515                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4516                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4517                                         0);
4518                 if (err) {
4519                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4520                                 __func__, peer, i, err);
4521                         break;
4522                 }
4523         }
4524
4525         return err;
4526 }
4527
4528 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4529 {
4530         return ufshcd_disable_tx_lcc(hba, true);
4531 }
4532
4533 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4534 {
4535         struct ufs_event_hist *e;
4536
4537         if (id >= UFS_EVT_CNT)
4538                 return;
4539
4540         e = &hba->ufs_stats.event[id];
4541         e->val[e->pos] = val;
4542         e->tstamp[e->pos] = ktime_get();
4543         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4544
4545         ufshcd_vops_event_notify(hba, id, &val);
4546 }
4547 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4548
4549 /**
4550  * ufshcd_link_startup - Initialize unipro link startup
4551  * @hba: per adapter instance
4552  *
4553  * Returns 0 for success, non-zero in case of failure
4554  */
4555 static int ufshcd_link_startup(struct ufs_hba *hba)
4556 {
4557         int ret;
4558         int retries = DME_LINKSTARTUP_RETRIES;
4559         bool link_startup_again = false;
4560
4561         /*
4562          * If UFS device isn't active then we will have to issue link startup
4563          * 2 times to make sure the device state move to active.
4564          */
4565         if (!ufshcd_is_ufs_dev_active(hba))
4566                 link_startup_again = true;
4567
4568 link_startup:
4569         do {
4570                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4571
4572                 ret = ufshcd_dme_link_startup(hba);
4573
4574                 /* check if device is detected by inter-connect layer */
4575                 if (!ret && !ufshcd_is_device_present(hba)) {
4576                         ufshcd_update_evt_hist(hba,
4577                                                UFS_EVT_LINK_STARTUP_FAIL,
4578                                                0);
4579                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4580                         ret = -ENXIO;
4581                         goto out;
4582                 }
4583
4584                 /*
4585                  * DME link lost indication is only received when link is up,
4586                  * but we can't be sure if the link is up until link startup
4587                  * succeeds. So reset the local Uni-Pro and try again.
4588                  */
4589                 if (ret && ufshcd_hba_enable(hba)) {
4590                         ufshcd_update_evt_hist(hba,
4591                                                UFS_EVT_LINK_STARTUP_FAIL,
4592                                                (u32)ret);
4593                         goto out;
4594                 }
4595         } while (ret && retries--);
4596
4597         if (ret) {
4598                 /* failed to get the link up... retire */
4599                 ufshcd_update_evt_hist(hba,
4600                                        UFS_EVT_LINK_STARTUP_FAIL,
4601                                        (u32)ret);
4602                 goto out;
4603         }
4604
4605         if (link_startup_again) {
4606                 link_startup_again = false;
4607                 retries = DME_LINKSTARTUP_RETRIES;
4608                 goto link_startup;
4609         }
4610
4611         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4612         ufshcd_init_pwr_info(hba);
4613         ufshcd_print_pwr_info(hba);
4614
4615         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4616                 ret = ufshcd_disable_device_tx_lcc(hba);
4617                 if (ret)
4618                         goto out;
4619         }
4620
4621         /* Include any host controller configuration via UIC commands */
4622         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4623         if (ret)
4624                 goto out;
4625
4626         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4627         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4628         ret = ufshcd_make_hba_operational(hba);
4629 out:
4630         if (ret) {
4631                 dev_err(hba->dev, "link startup failed %d\n", ret);
4632                 ufshcd_print_host_state(hba);
4633                 ufshcd_print_pwr_info(hba);
4634                 ufshcd_print_evt_hist(hba);
4635         }
4636         return ret;
4637 }
4638
4639 /**
4640  * ufshcd_verify_dev_init() - Verify device initialization
4641  * @hba: per-adapter instance
4642  *
4643  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4644  * device Transport Protocol (UTP) layer is ready after a reset.
4645  * If the UTP layer at the device side is not initialized, it may
4646  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4647  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4648  */
4649 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4650 {
4651         int err = 0;
4652         int retries;
4653
4654         ufshcd_hold(hba, false);
4655         mutex_lock(&hba->dev_cmd.lock);
4656         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4657                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4658                                                NOP_OUT_TIMEOUT);
4659
4660                 if (!err || err == -ETIMEDOUT)
4661                         break;
4662
4663                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4664         }
4665         mutex_unlock(&hba->dev_cmd.lock);
4666         ufshcd_release(hba);
4667
4668         if (err)
4669                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4670         return err;
4671 }
4672
4673 /**
4674  * ufshcd_set_queue_depth - set lun queue depth
4675  * @sdev: pointer to SCSI device
4676  *
4677  * Read bLUQueueDepth value and activate scsi tagged command
4678  * queueing. For WLUN, queue depth is set to 1. For best-effort
4679  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4680  * value that host can queue.
4681  */
4682 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4683 {
4684         int ret = 0;
4685         u8 lun_qdepth;
4686         struct ufs_hba *hba;
4687
4688         hba = shost_priv(sdev->host);
4689
4690         lun_qdepth = hba->nutrs;
4691         ret = ufshcd_read_unit_desc_param(hba,
4692                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4693                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4694                                           &lun_qdepth,
4695                                           sizeof(lun_qdepth));
4696
4697         /* Some WLUN doesn't support unit descriptor */
4698         if (ret == -EOPNOTSUPP)
4699                 lun_qdepth = 1;
4700         else if (!lun_qdepth)
4701                 /* eventually, we can figure out the real queue depth */
4702                 lun_qdepth = hba->nutrs;
4703         else
4704                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4705
4706         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4707                         __func__, lun_qdepth);
4708         scsi_change_queue_depth(sdev, lun_qdepth);
4709 }
4710
4711 /*
4712  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4713  * @hba: per-adapter instance
4714  * @lun: UFS device lun id
4715  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4716  *
4717  * Returns 0 in case of success and b_lu_write_protect status would be returned
4718  * @b_lu_write_protect parameter.
4719  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4720  * Returns -EINVAL in case of invalid parameters passed to this function.
4721  */
4722 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4723                             u8 lun,
4724                             u8 *b_lu_write_protect)
4725 {
4726         int ret;
4727
4728         if (!b_lu_write_protect)
4729                 ret = -EINVAL;
4730         /*
4731          * According to UFS device spec, RPMB LU can't be write
4732          * protected so skip reading bLUWriteProtect parameter for
4733          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4734          */
4735         else if (lun >= hba->dev_info.max_lu_supported)
4736                 ret = -ENOTSUPP;
4737         else
4738                 ret = ufshcd_read_unit_desc_param(hba,
4739                                           lun,
4740                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4741                                           b_lu_write_protect,
4742                                           sizeof(*b_lu_write_protect));
4743         return ret;
4744 }
4745
4746 /**
4747  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4748  * status
4749  * @hba: per-adapter instance
4750  * @sdev: pointer to SCSI device
4751  *
4752  */
4753 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4754                                                     struct scsi_device *sdev)
4755 {
4756         if (hba->dev_info.f_power_on_wp_en &&
4757             !hba->dev_info.is_lu_power_on_wp) {
4758                 u8 b_lu_write_protect;
4759
4760                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4761                                       &b_lu_write_protect) &&
4762                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4763                         hba->dev_info.is_lu_power_on_wp = true;
4764         }
4765 }
4766
4767 /**
4768  * ufshcd_slave_alloc - handle initial SCSI device configurations
4769  * @sdev: pointer to SCSI device
4770  *
4771  * Returns success
4772  */
4773 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4774 {
4775         struct ufs_hba *hba;
4776
4777         hba = shost_priv(sdev->host);
4778
4779         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4780         sdev->use_10_for_ms = 1;
4781
4782         /* DBD field should be set to 1 in mode sense(10) */
4783         sdev->set_dbd_for_ms = 1;
4784
4785         /* allow SCSI layer to restart the device in case of errors */
4786         sdev->allow_restart = 1;
4787
4788         /* REPORT SUPPORTED OPERATION CODES is not supported */
4789         sdev->no_report_opcodes = 1;
4790
4791         /* WRITE_SAME command is not supported */
4792         sdev->no_write_same = 1;
4793
4794         ufshcd_set_queue_depth(sdev);
4795
4796         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4797
4798         return 0;
4799 }
4800
4801 /**
4802  * ufshcd_change_queue_depth - change queue depth
4803  * @sdev: pointer to SCSI device
4804  * @depth: required depth to set
4805  *
4806  * Change queue depth and make sure the max. limits are not crossed.
4807  */
4808 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4809 {
4810         struct ufs_hba *hba = shost_priv(sdev->host);
4811
4812         if (depth > hba->nutrs)
4813                 depth = hba->nutrs;
4814         return scsi_change_queue_depth(sdev, depth);
4815 }
4816
4817 /**
4818  * ufshcd_slave_configure - adjust SCSI device configurations
4819  * @sdev: pointer to SCSI device
4820  */
4821 static int ufshcd_slave_configure(struct scsi_device *sdev)
4822 {
4823         struct ufs_hba *hba = shost_priv(sdev->host);
4824         struct request_queue *q = sdev->request_queue;
4825
4826         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4827
4828         if (ufshcd_is_rpm_autosuspend_allowed(hba))
4829                 sdev->rpm_autosuspend = 1;
4830
4831         ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4832
4833         return 0;
4834 }
4835
4836 /**
4837  * ufshcd_slave_destroy - remove SCSI device configurations
4838  * @sdev: pointer to SCSI device
4839  */
4840 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4841 {
4842         struct ufs_hba *hba;
4843
4844         hba = shost_priv(sdev->host);
4845         /* Drop the reference as it won't be needed anymore */
4846         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4847                 unsigned long flags;
4848
4849                 spin_lock_irqsave(hba->host->host_lock, flags);
4850                 hba->sdev_ufs_device = NULL;
4851                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4852         }
4853 }
4854
4855 /**
4856  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4857  * @lrbp: pointer to local reference block of completed command
4858  * @scsi_status: SCSI command status
4859  *
4860  * Returns value base on SCSI command status
4861  */
4862 static inline int
4863 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4864 {
4865         int result = 0;
4866
4867         switch (scsi_status) {
4868         case SAM_STAT_CHECK_CONDITION:
4869                 ufshcd_copy_sense_data(lrbp);
4870                 fallthrough;
4871         case SAM_STAT_GOOD:
4872                 result |= DID_OK << 16 |
4873                           COMMAND_COMPLETE << 8 |
4874                           scsi_status;
4875                 break;
4876         case SAM_STAT_TASK_SET_FULL:
4877         case SAM_STAT_BUSY:
4878         case SAM_STAT_TASK_ABORTED:
4879                 ufshcd_copy_sense_data(lrbp);
4880                 result |= scsi_status;
4881                 break;
4882         default:
4883                 result |= DID_ERROR << 16;
4884                 break;
4885         } /* end of switch */
4886
4887         return result;
4888 }
4889
4890 /**
4891  * ufshcd_transfer_rsp_status - Get overall status of the response
4892  * @hba: per adapter instance
4893  * @lrbp: pointer to local reference block of completed command
4894  *
4895  * Returns result of the command to notify SCSI midlayer
4896  */
4897 static inline int
4898 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4899 {
4900         int result = 0;
4901         int scsi_status;
4902         int ocs;
4903
4904         /* overall command status of utrd */
4905         ocs = ufshcd_get_tr_ocs(lrbp);
4906
4907         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4908                 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
4909                                         MASK_RSP_UPIU_RESULT)
4910                         ocs = OCS_SUCCESS;
4911         }
4912
4913         switch (ocs) {
4914         case OCS_SUCCESS:
4915                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4916                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4917                 switch (result) {
4918                 case UPIU_TRANSACTION_RESPONSE:
4919                         /*
4920                          * get the response UPIU result to extract
4921                          * the SCSI command status
4922                          */
4923                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4924
4925                         /*
4926                          * get the result based on SCSI status response
4927                          * to notify the SCSI midlayer of the command status
4928                          */
4929                         scsi_status = result & MASK_SCSI_STATUS;
4930                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4931
4932                         /*
4933                          * Currently we are only supporting BKOPs exception
4934                          * events hence we can ignore BKOPs exception event
4935                          * during power management callbacks. BKOPs exception
4936                          * event is not expected to be raised in runtime suspend
4937                          * callback as it allows the urgent bkops.
4938                          * During system suspend, we are anyway forcefully
4939                          * disabling the bkops and if urgent bkops is needed
4940                          * it will be enabled on system resume. Long term
4941                          * solution could be to abort the system suspend if
4942                          * UFS device needs urgent BKOPs.
4943                          */
4944                         if (!hba->pm_op_in_progress &&
4945                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4946                             schedule_work(&hba->eeh_work)) {
4947                                 /*
4948                                  * Prevent suspend once eeh_work is scheduled
4949                                  * to avoid deadlock between ufshcd_suspend
4950                                  * and exception event handler.
4951                                  */
4952                                 pm_runtime_get_noresume(hba->dev);
4953                         }
4954                         break;
4955                 case UPIU_TRANSACTION_REJECT_UPIU:
4956                         /* TODO: handle Reject UPIU Response */
4957                         result = DID_ERROR << 16;
4958                         dev_err(hba->dev,
4959                                 "Reject UPIU not fully implemented\n");
4960                         break;
4961                 default:
4962                         dev_err(hba->dev,
4963                                 "Unexpected request response code = %x\n",
4964                                 result);
4965                         result = DID_ERROR << 16;
4966                         break;
4967                 }
4968                 break;
4969         case OCS_ABORTED:
4970                 result |= DID_ABORT << 16;
4971                 break;
4972         case OCS_INVALID_COMMAND_STATUS:
4973                 result |= DID_REQUEUE << 16;
4974                 break;
4975         case OCS_INVALID_CMD_TABLE_ATTR:
4976         case OCS_INVALID_PRDT_ATTR:
4977         case OCS_MISMATCH_DATA_BUF_SIZE:
4978         case OCS_MISMATCH_RESP_UPIU_SIZE:
4979         case OCS_PEER_COMM_FAILURE:
4980         case OCS_FATAL_ERROR:
4981         case OCS_DEVICE_FATAL_ERROR:
4982         case OCS_INVALID_CRYPTO_CONFIG:
4983         case OCS_GENERAL_CRYPTO_ERROR:
4984         default:
4985                 result |= DID_ERROR << 16;
4986                 dev_err(hba->dev,
4987                                 "OCS error from controller = %x for tag %d\n",
4988                                 ocs, lrbp->task_tag);
4989                 ufshcd_print_evt_hist(hba);
4990                 ufshcd_print_host_state(hba);
4991                 break;
4992         } /* end of switch */
4993
4994         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4995                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4996         return result;
4997 }
4998
4999 /**
5000  * ufshcd_uic_cmd_compl - handle completion of uic command
5001  * @hba: per adapter instance
5002  * @intr_status: interrupt status generated by the controller
5003  *
5004  * Returns
5005  *  IRQ_HANDLED - If interrupt is valid
5006  *  IRQ_NONE    - If invalid interrupt
5007  */
5008 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5009 {
5010         irqreturn_t retval = IRQ_NONE;
5011
5012         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5013                 hba->active_uic_cmd->argument2 |=
5014                         ufshcd_get_uic_cmd_result(hba);
5015                 hba->active_uic_cmd->argument3 =
5016                         ufshcd_get_dme_attr_val(hba);
5017                 if (!hba->uic_async_done)
5018                         hba->active_uic_cmd->cmd_active = 0;
5019                 complete(&hba->active_uic_cmd->done);
5020                 retval = IRQ_HANDLED;
5021         }
5022
5023         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5024                 hba->active_uic_cmd->cmd_active = 0;
5025                 complete(hba->uic_async_done);
5026                 retval = IRQ_HANDLED;
5027         }
5028
5029         if (retval == IRQ_HANDLED)
5030                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5031                                              "complete");
5032         return retval;
5033 }
5034
5035 /**
5036  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5037  * @hba: per adapter instance
5038  * @completed_reqs: requests to complete
5039  */
5040 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5041                                         unsigned long completed_reqs)
5042 {
5043         struct ufshcd_lrb *lrbp;
5044         struct scsi_cmnd *cmd;
5045         int result;
5046         int index;
5047         bool update_scaling = false;
5048
5049         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5050                 lrbp = &hba->lrb[index];
5051                 lrbp->in_use = false;
5052                 lrbp->compl_time_stamp = ktime_get();
5053                 cmd = lrbp->cmd;
5054                 if (cmd) {
5055                         ufshcd_add_command_trace(hba, index, "complete");
5056                         result = ufshcd_transfer_rsp_status(hba, lrbp);
5057                         scsi_dma_unmap(cmd);
5058                         cmd->result = result;
5059                         /* Mark completed command as NULL in LRB */
5060                         lrbp->cmd = NULL;
5061                         /* Do not touch lrbp after scsi done */
5062                         cmd->scsi_done(cmd);
5063                         __ufshcd_release(hba);
5064                         update_scaling = true;
5065                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5066                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5067                         if (hba->dev_cmd.complete) {
5068                                 ufshcd_add_command_trace(hba, index,
5069                                                 "dev_complete");
5070                                 complete(hba->dev_cmd.complete);
5071                                 update_scaling = true;
5072                         }
5073                 }
5074                 if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
5075                         hba->clk_scaling.active_reqs--;
5076         }
5077
5078         /* clear corresponding bits of completed commands */
5079         hba->outstanding_reqs ^= completed_reqs;
5080
5081         ufshcd_clk_scaling_update_busy(hba);
5082 }
5083
5084 /**
5085  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5086  * @hba: per adapter instance
5087  *
5088  * Returns
5089  *  IRQ_HANDLED - If interrupt is valid
5090  *  IRQ_NONE    - If invalid interrupt
5091  */
5092 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5093 {
5094         unsigned long completed_reqs;
5095         u32 tr_doorbell;
5096
5097         /* Resetting interrupt aggregation counters first and reading the
5098          * DOOR_BELL afterward allows us to handle all the completed requests.
5099          * In order to prevent other interrupts starvation the DB is read once
5100          * after reset. The down side of this solution is the possibility of
5101          * false interrupt if device completes another request after resetting
5102          * aggregation and before reading the DB.
5103          */
5104         if (ufshcd_is_intr_aggr_allowed(hba) &&
5105             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5106                 ufshcd_reset_intr_aggr(hba);
5107
5108         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5109         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5110
5111         if (completed_reqs) {
5112                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5113                 return IRQ_HANDLED;
5114         } else {
5115                 return IRQ_NONE;
5116         }
5117 }
5118
5119 /**
5120  * ufshcd_disable_ee - disable exception event
5121  * @hba: per-adapter instance
5122  * @mask: exception event to disable
5123  *
5124  * Disables exception event in the device so that the EVENT_ALERT
5125  * bit is not set.
5126  *
5127  * Returns zero on success, non-zero error value on failure.
5128  */
5129 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5130 {
5131         int err = 0;
5132         u32 val;
5133
5134         if (!(hba->ee_ctrl_mask & mask))
5135                 goto out;
5136
5137         val = hba->ee_ctrl_mask & ~mask;
5138         val &= MASK_EE_STATUS;
5139         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5140                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5141         if (!err)
5142                 hba->ee_ctrl_mask &= ~mask;
5143 out:
5144         return err;
5145 }
5146
5147 /**
5148  * ufshcd_enable_ee - enable exception event
5149  * @hba: per-adapter instance
5150  * @mask: exception event to enable
5151  *
5152  * Enable corresponding exception event in the device to allow
5153  * device to alert host in critical scenarios.
5154  *
5155  * Returns zero on success, non-zero error value on failure.
5156  */
5157 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5158 {
5159         int err = 0;
5160         u32 val;
5161
5162         if (hba->ee_ctrl_mask & mask)
5163                 goto out;
5164
5165         val = hba->ee_ctrl_mask | mask;
5166         val &= MASK_EE_STATUS;
5167         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5168                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5169         if (!err)
5170                 hba->ee_ctrl_mask |= mask;
5171 out:
5172         return err;
5173 }
5174
5175 /**
5176  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5177  * @hba: per-adapter instance
5178  *
5179  * Allow device to manage background operations on its own. Enabling
5180  * this might lead to inconsistent latencies during normal data transfers
5181  * as the device is allowed to manage its own way of handling background
5182  * operations.
5183  *
5184  * Returns zero on success, non-zero on failure.
5185  */
5186 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5187 {
5188         int err = 0;
5189
5190         if (hba->auto_bkops_enabled)
5191                 goto out;
5192
5193         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5194                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5195         if (err) {
5196                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5197                                 __func__, err);
5198                 goto out;
5199         }
5200
5201         hba->auto_bkops_enabled = true;
5202         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5203
5204         /* No need of URGENT_BKOPS exception from the device */
5205         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5206         if (err)
5207                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5208                                 __func__, err);
5209 out:
5210         return err;
5211 }
5212
5213 /**
5214  * ufshcd_disable_auto_bkops - block device in doing background operations
5215  * @hba: per-adapter instance
5216  *
5217  * Disabling background operations improves command response latency but
5218  * has drawback of device moving into critical state where the device is
5219  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5220  * host is idle so that BKOPS are managed effectively without any negative
5221  * impacts.
5222  *
5223  * Returns zero on success, non-zero on failure.
5224  */
5225 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5226 {
5227         int err = 0;
5228
5229         if (!hba->auto_bkops_enabled)
5230                 goto out;
5231
5232         /*
5233          * If host assisted BKOPs is to be enabled, make sure
5234          * urgent bkops exception is allowed.
5235          */
5236         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5237         if (err) {
5238                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5239                                 __func__, err);
5240                 goto out;
5241         }
5242
5243         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5244                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5245         if (err) {
5246                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5247                                 __func__, err);
5248                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5249                 goto out;
5250         }
5251
5252         hba->auto_bkops_enabled = false;
5253         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5254         hba->is_urgent_bkops_lvl_checked = false;
5255 out:
5256         return err;
5257 }
5258
5259 /**
5260  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5261  * @hba: per adapter instance
5262  *
5263  * After a device reset the device may toggle the BKOPS_EN flag
5264  * to default value. The s/w tracking variables should be updated
5265  * as well. This function would change the auto-bkops state based on
5266  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5267  */
5268 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5269 {
5270         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5271                 hba->auto_bkops_enabled = false;
5272                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5273                 ufshcd_enable_auto_bkops(hba);
5274         } else {
5275                 hba->auto_bkops_enabled = true;
5276                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5277                 ufshcd_disable_auto_bkops(hba);
5278         }
5279         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5280         hba->is_urgent_bkops_lvl_checked = false;
5281 }
5282
5283 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5284 {
5285         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5286                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5287 }
5288
5289 /**
5290  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5291  * @hba: per-adapter instance
5292  * @status: bkops_status value
5293  *
5294  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5295  * flag in the device to permit background operations if the device
5296  * bkops_status is greater than or equal to "status" argument passed to
5297  * this function, disable otherwise.
5298  *
5299  * Returns 0 for success, non-zero in case of failure.
5300  *
5301  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5302  * to know whether auto bkops is enabled or disabled after this function
5303  * returns control to it.
5304  */
5305 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5306                              enum bkops_status status)
5307 {
5308         int err;
5309         u32 curr_status = 0;
5310
5311         err = ufshcd_get_bkops_status(hba, &curr_status);
5312         if (err) {
5313                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5314                                 __func__, err);
5315                 goto out;
5316         } else if (curr_status > BKOPS_STATUS_MAX) {
5317                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5318                                 __func__, curr_status);
5319                 err = -EINVAL;
5320                 goto out;
5321         }
5322
5323         if (curr_status >= status)
5324                 err = ufshcd_enable_auto_bkops(hba);
5325         else
5326                 err = ufshcd_disable_auto_bkops(hba);
5327 out:
5328         return err;
5329 }
5330
5331 /**
5332  * ufshcd_urgent_bkops - handle urgent bkops exception event
5333  * @hba: per-adapter instance
5334  *
5335  * Enable fBackgroundOpsEn flag in the device to permit background
5336  * operations.
5337  *
5338  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5339  * and negative error value for any other failure.
5340  */
5341 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5342 {
5343         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5344 }
5345
5346 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5347 {
5348         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5349                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5350 }
5351
5352 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5353 {
5354         int err;
5355         u32 curr_status = 0;
5356
5357         if (hba->is_urgent_bkops_lvl_checked)
5358                 goto enable_auto_bkops;
5359
5360         err = ufshcd_get_bkops_status(hba, &curr_status);
5361         if (err) {
5362                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5363                                 __func__, err);
5364                 goto out;
5365         }
5366
5367         /*
5368          * We are seeing that some devices are raising the urgent bkops
5369          * exception events even when BKOPS status doesn't indicate performace
5370          * impacted or critical. Handle these device by determining their urgent
5371          * bkops status at runtime.
5372          */
5373         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5374                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5375                                 __func__, curr_status);
5376                 /* update the current status as the urgent bkops level */
5377                 hba->urgent_bkops_lvl = curr_status;
5378                 hba->is_urgent_bkops_lvl_checked = true;
5379         }
5380
5381 enable_auto_bkops:
5382         err = ufshcd_enable_auto_bkops(hba);
5383 out:
5384         if (err < 0)
5385                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5386                                 __func__, err);
5387 }
5388
5389 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5390 {
5391         int ret;
5392         u8 index;
5393         enum query_opcode opcode;
5394
5395         if (!ufshcd_is_wb_allowed(hba))
5396                 return 0;
5397
5398         if (!(enable ^ hba->wb_enabled))
5399                 return 0;
5400         if (enable)
5401                 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5402         else
5403                 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5404
5405         index = ufshcd_wb_get_query_index(hba);
5406         ret = ufshcd_query_flag_retry(hba, opcode,
5407                                       QUERY_FLAG_IDN_WB_EN, index, NULL);
5408         if (ret) {
5409                 dev_err(hba->dev, "%s write booster %s failed %d\n",
5410                         __func__, enable ? "enable" : "disable", ret);
5411                 return ret;
5412         }
5413
5414         hba->wb_enabled = enable;
5415         dev_dbg(hba->dev, "%s write booster %s %d\n",
5416                         __func__, enable ? "enable" : "disable", ret);
5417
5418         return ret;
5419 }
5420
5421 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5422 {
5423         int val;
5424         u8 index;
5425
5426         if (set)
5427                 val =  UPIU_QUERY_OPCODE_SET_FLAG;
5428         else
5429                 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5430
5431         index = ufshcd_wb_get_query_index(hba);
5432         return ufshcd_query_flag_retry(hba, val,
5433                                 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5434                                 index, NULL);
5435 }
5436
5437 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5438 {
5439         if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
5440                 return;
5441
5442         if (enable)
5443                 ufshcd_wb_buf_flush_enable(hba);
5444         else
5445                 ufshcd_wb_buf_flush_disable(hba);
5446
5447 }
5448
5449 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5450 {
5451         int ret;
5452         u8 index;
5453
5454         if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5455                 return 0;
5456
5457         index = ufshcd_wb_get_query_index(hba);
5458         ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5459                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5460                                       index, NULL);
5461         if (ret)
5462                 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5463                         __func__, ret);
5464         else
5465                 hba->wb_buf_flush_enabled = true;
5466
5467         dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5468         return ret;
5469 }
5470
5471 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5472 {
5473         int ret;
5474         u8 index;
5475
5476         if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5477                 return 0;
5478
5479         index = ufshcd_wb_get_query_index(hba);
5480         ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5481                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5482                                       index, NULL);
5483         if (ret) {
5484                 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5485                          __func__, ret);
5486         } else {
5487                 hba->wb_buf_flush_enabled = false;
5488                 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5489         }
5490
5491         return ret;
5492 }
5493
5494 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5495                                                 u32 avail_buf)
5496 {
5497         u32 cur_buf;
5498         int ret;
5499         u8 index;
5500
5501         index = ufshcd_wb_get_query_index(hba);
5502         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5503                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5504                                               index, 0, &cur_buf);
5505         if (ret) {
5506                 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5507                         __func__, ret);
5508                 return false;
5509         }
5510
5511         if (!cur_buf) {
5512                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5513                          cur_buf);
5514                 return false;
5515         }
5516         /* Let it continue to flush when available buffer exceeds threshold */
5517         if (avail_buf < hba->vps->wb_flush_threshold)
5518                 return true;
5519
5520         return false;
5521 }
5522
5523 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5524 {
5525         int ret;
5526         u32 avail_buf;
5527         u8 index;
5528
5529         if (!ufshcd_is_wb_allowed(hba))
5530                 return false;
5531         /*
5532          * The ufs device needs the vcc to be ON to flush.
5533          * With user-space reduction enabled, it's enough to enable flush
5534          * by checking only the available buffer. The threshold
5535          * defined here is > 90% full.
5536          * With user-space preserved enabled, the current-buffer
5537          * should be checked too because the wb buffer size can reduce
5538          * when disk tends to be full. This info is provided by current
5539          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5540          * keeping vcc on when current buffer is empty.
5541          */
5542         index = ufshcd_wb_get_query_index(hba);
5543         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5544                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5545                                       index, 0, &avail_buf);
5546         if (ret) {
5547                 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5548                          __func__, ret);
5549                 return false;
5550         }
5551
5552         if (!hba->dev_info.b_presrv_uspc_en) {
5553                 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5554                         return true;
5555                 return false;
5556         }
5557
5558         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5559 }
5560
5561 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5562 {
5563         struct ufs_hba *hba = container_of(to_delayed_work(work),
5564                                            struct ufs_hba,
5565                                            rpm_dev_flush_recheck_work);
5566         /*
5567          * To prevent unnecessary VCC power drain after device finishes
5568          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5569          * after a certain delay to recheck the threshold by next runtime
5570          * suspend.
5571          */
5572         pm_runtime_get_sync(hba->dev);
5573         pm_runtime_put_sync(hba->dev);
5574 }
5575
5576 /**
5577  * ufshcd_exception_event_handler - handle exceptions raised by device
5578  * @work: pointer to work data
5579  *
5580  * Read bExceptionEventStatus attribute from the device and handle the
5581  * exception event accordingly.
5582  */
5583 static void ufshcd_exception_event_handler(struct work_struct *work)
5584 {
5585         struct ufs_hba *hba;
5586         int err;
5587         u32 status = 0;
5588         hba = container_of(work, struct ufs_hba, eeh_work);
5589
5590         pm_runtime_get_sync(hba->dev);
5591         ufshcd_scsi_block_requests(hba);
5592         err = ufshcd_get_ee_status(hba, &status);
5593         if (err) {
5594                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5595                                 __func__, err);
5596                 goto out;
5597         }
5598
5599         status &= hba->ee_ctrl_mask;
5600
5601         if (status & MASK_EE_URGENT_BKOPS)
5602                 ufshcd_bkops_exception_event_handler(hba);
5603
5604 out:
5605         ufshcd_scsi_unblock_requests(hba);
5606         /*
5607          * pm_runtime_get_noresume is called while scheduling
5608          * eeh_work to avoid suspend racing with exception work.
5609          * Hence decrement usage counter using pm_runtime_put_noidle
5610          * to allow suspend on completion of exception event handler.
5611          */
5612         pm_runtime_put_noidle(hba->dev);
5613         pm_runtime_put(hba->dev);
5614         return;
5615 }
5616
5617 /* Complete requests that have door-bell cleared */
5618 static void ufshcd_complete_requests(struct ufs_hba *hba)
5619 {
5620         ufshcd_transfer_req_compl(hba);
5621         ufshcd_tmc_handler(hba);
5622 }
5623
5624 /**
5625  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5626  *                              to recover from the DL NAC errors or not.
5627  * @hba: per-adapter instance
5628  *
5629  * Returns true if error handling is required, false otherwise
5630  */
5631 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5632 {
5633         unsigned long flags;
5634         bool err_handling = true;
5635
5636         spin_lock_irqsave(hba->host->host_lock, flags);
5637         /*
5638          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5639          * device fatal error and/or DL NAC & REPLAY timeout errors.
5640          */
5641         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5642                 goto out;
5643
5644         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5645             ((hba->saved_err & UIC_ERROR) &&
5646              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5647                 goto out;
5648
5649         if ((hba->saved_err & UIC_ERROR) &&
5650             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5651                 int err;
5652                 /*
5653                  * wait for 50ms to see if we can get any other errors or not.
5654                  */
5655                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5656                 msleep(50);
5657                 spin_lock_irqsave(hba->host->host_lock, flags);
5658
5659                 /*
5660                  * now check if we have got any other severe errors other than
5661                  * DL NAC error?
5662                  */
5663                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5664                     ((hba->saved_err & UIC_ERROR) &&
5665                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5666                         goto out;
5667
5668                 /*
5669                  * As DL NAC is the only error received so far, send out NOP
5670                  * command to confirm if link is still active or not.
5671                  *   - If we don't get any response then do error recovery.
5672                  *   - If we get response then clear the DL NAC error bit.
5673                  */
5674
5675                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5676                 err = ufshcd_verify_dev_init(hba);
5677                 spin_lock_irqsave(hba->host->host_lock, flags);
5678
5679                 if (err)
5680                         goto out;
5681
5682                 /* Link seems to be alive hence ignore the DL NAC errors */
5683                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5684                         hba->saved_err &= ~UIC_ERROR;
5685                 /* clear NAC error */
5686                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5687                 if (!hba->saved_uic_err)
5688                         err_handling = false;
5689         }
5690 out:
5691         spin_unlock_irqrestore(hba->host->host_lock, flags);
5692         return err_handling;
5693 }
5694
5695 /* host lock must be held before calling this func */
5696 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5697 {
5698         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5699                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5700 }
5701
5702 /* host lock must be held before calling this func */
5703 static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5704 {
5705         /* handle fatal errors only when link is not in error state */
5706         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5707                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5708                     ufshcd_is_saved_err_fatal(hba))
5709                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5710                 else
5711                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5712                 queue_work(hba->eh_wq, &hba->eh_work);
5713         }
5714 }
5715
5716 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5717 {
5718         pm_runtime_get_sync(hba->dev);
5719         if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
5720                 enum ufs_pm_op pm_op;
5721
5722                 /*
5723                  * Don't assume anything of pm_runtime_get_sync(), if
5724                  * resume fails, irq and clocks can be OFF, and powers
5725                  * can be OFF or in LPM.
5726                  */
5727                 ufshcd_setup_hba_vreg(hba, true);
5728                 ufshcd_enable_irq(hba);
5729                 ufshcd_setup_vreg(hba, true);
5730                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5731                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5732                 ufshcd_hold(hba, false);
5733                 if (!ufshcd_is_clkgating_allowed(hba))
5734                         ufshcd_setup_clocks(hba, true);
5735                 ufshcd_release(hba);
5736                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5737                 ufshcd_vops_resume(hba, pm_op);
5738         } else {
5739                 ufshcd_hold(hba, false);
5740                 if (hba->clk_scaling.is_allowed) {
5741                         cancel_work_sync(&hba->clk_scaling.suspend_work);
5742                         cancel_work_sync(&hba->clk_scaling.resume_work);
5743                         ufshcd_suspend_clkscaling(hba);
5744                 }
5745         }
5746 }
5747
5748 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5749 {
5750         ufshcd_release(hba);
5751         if (hba->clk_scaling.is_allowed)
5752                 ufshcd_resume_clkscaling(hba);
5753         pm_runtime_put(hba->dev);
5754 }
5755
5756 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5757 {
5758         return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5759                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5760                         ufshcd_is_link_broken(hba))));
5761 }
5762
5763 #ifdef CONFIG_PM
5764 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5765 {
5766         struct Scsi_Host *shost = hba->host;
5767         struct scsi_device *sdev;
5768         struct request_queue *q;
5769         int ret;
5770
5771         hba->is_sys_suspended = false;
5772         /*
5773          * Set RPM status of hba device to RPM_ACTIVE,
5774          * this also clears its runtime error.
5775          */
5776         ret = pm_runtime_set_active(hba->dev);
5777         /*
5778          * If hba device had runtime error, we also need to resume those
5779          * scsi devices under hba in case any of them has failed to be
5780          * resumed due to hba runtime resume failure. This is to unblock
5781          * blk_queue_enter in case there are bios waiting inside it.
5782          */
5783         if (!ret) {
5784                 shost_for_each_device(sdev, shost) {
5785                         q = sdev->request_queue;
5786                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
5787                                        q->rpm_status == RPM_SUSPENDING))
5788                                 pm_request_resume(q->dev);
5789                 }
5790         }
5791 }
5792 #else
5793 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
5794 {
5795 }
5796 #endif
5797
5798 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
5799 {
5800         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
5801         u32 mode;
5802
5803         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
5804
5805         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
5806                 return true;
5807
5808         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
5809                 return true;
5810
5811         return false;
5812 }
5813
5814 /**
5815  * ufshcd_err_handler - handle UFS errors that require s/w attention
5816  * @work: pointer to work structure
5817  */
5818 static void ufshcd_err_handler(struct work_struct *work)
5819 {
5820         struct ufs_hba *hba;
5821         unsigned long flags;
5822         bool err_xfer = false;
5823         bool err_tm = false;
5824         int err = 0, pmc_err;
5825         int tag;
5826         bool needs_reset = false, needs_restore = false;
5827
5828         hba = container_of(work, struct ufs_hba, eh_work);
5829
5830         down(&hba->eh_sem);
5831         spin_lock_irqsave(hba->host->host_lock, flags);
5832         if (ufshcd_err_handling_should_stop(hba)) {
5833                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5834                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5835                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5836                 up(&hba->eh_sem);
5837                 return;
5838         }
5839         ufshcd_set_eh_in_progress(hba);
5840         spin_unlock_irqrestore(hba->host->host_lock, flags);
5841         ufshcd_err_handling_prepare(hba);
5842         spin_lock_irqsave(hba->host->host_lock, flags);
5843         ufshcd_scsi_block_requests(hba);
5844         hba->ufshcd_state = UFSHCD_STATE_RESET;
5845
5846         /* Complete requests that have door-bell cleared by h/w */
5847         ufshcd_complete_requests(hba);
5848
5849         /*
5850          * A full reset and restore might have happened after preparation
5851          * is finished, double check whether we should stop.
5852          */
5853         if (ufshcd_err_handling_should_stop(hba))
5854                 goto skip_err_handling;
5855
5856         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5857                 bool ret;
5858
5859                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5860                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5861                 ret = ufshcd_quirk_dl_nac_errors(hba);
5862                 spin_lock_irqsave(hba->host->host_lock, flags);
5863                 if (!ret && ufshcd_err_handling_should_stop(hba))
5864                         goto skip_err_handling;
5865         }
5866
5867         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
5868             (hba->saved_uic_err &&
5869              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
5870                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
5871
5872                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5873                 ufshcd_print_host_state(hba);
5874                 ufshcd_print_pwr_info(hba);
5875                 ufshcd_print_evt_hist(hba);
5876                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5877                 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
5878                 spin_lock_irqsave(hba->host->host_lock, flags);
5879         }
5880
5881         /*
5882          * if host reset is required then skip clearing the pending
5883          * transfers forcefully because they will get cleared during
5884          * host reset and restore
5885          */
5886         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5887             ufshcd_is_saved_err_fatal(hba) ||
5888             ((hba->saved_err & UIC_ERROR) &&
5889              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5890                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
5891                 needs_reset = true;
5892                 goto do_reset;
5893         }
5894
5895         /*
5896          * If LINERESET was caught, UFS might have been put to PWM mode,
5897          * check if power mode restore is needed.
5898          */
5899         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
5900                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5901                 if (!hba->saved_uic_err)
5902                         hba->saved_err &= ~UIC_ERROR;
5903                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5904                 if (ufshcd_is_pwr_mode_restore_needed(hba))
5905                         needs_restore = true;
5906                 spin_lock_irqsave(hba->host->host_lock, flags);
5907                 if (!hba->saved_err && !needs_restore)
5908                         goto skip_err_handling;
5909         }
5910
5911         hba->silence_err_logs = true;
5912         /* release lock as clear command might sleep */
5913         spin_unlock_irqrestore(hba->host->host_lock, flags);
5914         /* Clear pending transfer requests */
5915         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5916                 if (ufshcd_try_to_abort_task(hba, tag)) {
5917                         err_xfer = true;
5918                         goto lock_skip_pending_xfer_clear;
5919                 }
5920         }
5921
5922         /* Clear pending task management requests */
5923         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5924                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5925                         err_tm = true;
5926                         goto lock_skip_pending_xfer_clear;
5927                 }
5928         }
5929
5930 lock_skip_pending_xfer_clear:
5931         spin_lock_irqsave(hba->host->host_lock, flags);
5932
5933         /* Complete the requests that are cleared by s/w */
5934         ufshcd_complete_requests(hba);
5935         hba->silence_err_logs = false;
5936
5937         if (err_xfer || err_tm) {
5938                 needs_reset = true;
5939                 goto do_reset;
5940         }
5941
5942         /*
5943          * After all reqs and tasks are cleared from doorbell,
5944          * now it is safe to retore power mode.
5945          */
5946         if (needs_restore) {
5947                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5948                 /*
5949                  * Hold the scaling lock just in case dev cmds
5950                  * are sent via bsg and/or sysfs.
5951                  */
5952                 down_write(&hba->clk_scaling_lock);
5953                 hba->force_pmc = true;
5954                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
5955                 if (pmc_err) {
5956                         needs_reset = true;
5957                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
5958                                         __func__, pmc_err);
5959                 }
5960                 hba->force_pmc = false;
5961                 ufshcd_print_pwr_info(hba);
5962                 up_write(&hba->clk_scaling_lock);
5963                 spin_lock_irqsave(hba->host->host_lock, flags);
5964         }
5965
5966 do_reset:
5967         /* Fatal errors need reset */
5968         if (needs_reset) {
5969                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5970
5971                 /*
5972                  * ufshcd_reset_and_restore() does the link reinitialization
5973                  * which will need atleast one empty doorbell slot to send the
5974                  * device management commands (NOP and query commands).
5975                  * If there is no slot empty at this moment then free up last
5976                  * slot forcefully.
5977                  */
5978                 if (hba->outstanding_reqs == max_doorbells)
5979                         __ufshcd_transfer_req_compl(hba,
5980                                                     (1UL << (hba->nutrs - 1)));
5981
5982                 hba->force_reset = false;
5983                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5984                 err = ufshcd_reset_and_restore(hba);
5985                 if (err)
5986                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
5987                                         __func__, err);
5988                 else
5989                         ufshcd_recover_pm_error(hba);
5990                 spin_lock_irqsave(hba->host->host_lock, flags);
5991         }
5992
5993 skip_err_handling:
5994         if (!needs_reset) {
5995                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5996                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5997                 if (hba->saved_err || hba->saved_uic_err)
5998                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5999                             __func__, hba->saved_err, hba->saved_uic_err);
6000         }
6001         ufshcd_clear_eh_in_progress(hba);
6002         spin_unlock_irqrestore(hba->host->host_lock, flags);
6003         ufshcd_scsi_unblock_requests(hba);
6004         ufshcd_err_handling_unprepare(hba);
6005         up(&hba->eh_sem);
6006 }
6007
6008 /**
6009  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6010  * @hba: per-adapter instance
6011  *
6012  * Returns
6013  *  IRQ_HANDLED - If interrupt is valid
6014  *  IRQ_NONE    - If invalid interrupt
6015  */
6016 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6017 {
6018         u32 reg;
6019         irqreturn_t retval = IRQ_NONE;
6020
6021         /* PHY layer error */
6022         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6023         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6024             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6025                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6026                 /*
6027                  * To know whether this error is fatal or not, DB timeout
6028                  * must be checked but this error is handled separately.
6029                  */
6030                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6031                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6032                                         __func__);
6033
6034                 /* Got a LINERESET indication. */
6035                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6036                         struct uic_command *cmd = NULL;
6037
6038                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6039                         if (hba->uic_async_done && hba->active_uic_cmd)
6040                                 cmd = hba->active_uic_cmd;
6041                         /*
6042                          * Ignore the LINERESET during power mode change
6043                          * operation via DME_SET command.
6044                          */
6045                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
6046                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6047                 }
6048                 retval |= IRQ_HANDLED;
6049         }
6050
6051         /* PA_INIT_ERROR is fatal and needs UIC reset */
6052         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6053         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6054             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6055                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6056
6057                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6058                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6059                 else if (hba->dev_quirks &
6060                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6061                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6062                                 hba->uic_error |=
6063                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6064                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6065                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6066                 }
6067                 retval |= IRQ_HANDLED;
6068         }
6069
6070         /* UIC NL/TL/DME errors needs software retry */
6071         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6072         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6073             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6074                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6075                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6076                 retval |= IRQ_HANDLED;
6077         }
6078
6079         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6080         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6081             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6082                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6083                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6084                 retval |= IRQ_HANDLED;
6085         }
6086
6087         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6088         if ((reg & UIC_DME_ERROR) &&
6089             (reg & UIC_DME_ERROR_CODE_MASK)) {
6090                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6091                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6092                 retval |= IRQ_HANDLED;
6093         }
6094
6095         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6096                         __func__, hba->uic_error);
6097         return retval;
6098 }
6099
6100 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
6101                                          u32 intr_mask)
6102 {
6103         if (!ufshcd_is_auto_hibern8_supported(hba) ||
6104             !ufshcd_is_auto_hibern8_enabled(hba))
6105                 return false;
6106
6107         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
6108                 return false;
6109
6110         if (hba->active_uic_cmd &&
6111             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
6112             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
6113                 return false;
6114
6115         return true;
6116 }
6117
6118 /**
6119  * ufshcd_check_errors - Check for errors that need s/w attention
6120  * @hba: per-adapter instance
6121  *
6122  * Returns
6123  *  IRQ_HANDLED - If interrupt is valid
6124  *  IRQ_NONE    - If invalid interrupt
6125  */
6126 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6127 {
6128         bool queue_eh_work = false;
6129         irqreturn_t retval = IRQ_NONE;
6130
6131         if (hba->errors & INT_FATAL_ERRORS) {
6132                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6133                                        hba->errors);
6134                 queue_eh_work = true;
6135         }
6136
6137         if (hba->errors & UIC_ERROR) {
6138                 hba->uic_error = 0;
6139                 retval = ufshcd_update_uic_error(hba);
6140                 if (hba->uic_error)
6141                         queue_eh_work = true;
6142         }
6143
6144         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6145                 dev_err(hba->dev,
6146                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6147                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6148                         "Enter" : "Exit",
6149                         hba->errors, ufshcd_get_upmcrs(hba));
6150                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6151                                        hba->errors);
6152                 ufshcd_set_link_broken(hba);
6153                 queue_eh_work = true;
6154         }
6155
6156         if (queue_eh_work) {
6157                 /*
6158                  * update the transfer error masks to sticky bits, let's do this
6159                  * irrespective of current ufshcd_state.
6160                  */
6161                 hba->saved_err |= hba->errors;
6162                 hba->saved_uic_err |= hba->uic_error;
6163
6164                 /* dump controller state before resetting */
6165                 if ((hba->saved_err &
6166                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6167                     (hba->saved_uic_err &&
6168                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6169                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6170                                         __func__, hba->saved_err,
6171                                         hba->saved_uic_err);
6172                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6173                                          "host_regs: ");
6174                         ufshcd_print_pwr_info(hba);
6175                 }
6176                 ufshcd_schedule_eh_work(hba);
6177                 retval |= IRQ_HANDLED;
6178         }
6179         /*
6180          * if (!queue_eh_work) -
6181          * Other errors are either non-fatal where host recovers
6182          * itself without s/w intervention or errors that will be
6183          * handled by the SCSI core layer.
6184          */
6185         return retval;
6186 }
6187
6188 struct ctm_info {
6189         struct ufs_hba  *hba;
6190         unsigned long   pending;
6191         unsigned int    ncpl;
6192 };
6193
6194 static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
6195 {
6196         struct ctm_info *const ci = priv;
6197         struct completion *c;
6198
6199         WARN_ON_ONCE(reserved);
6200         if (test_bit(req->tag, &ci->pending))
6201                 return true;
6202         ci->ncpl++;
6203         c = req->end_io_data;
6204         if (c)
6205                 complete(c);
6206         return true;
6207 }
6208
6209 /**
6210  * ufshcd_tmc_handler - handle task management function completion
6211  * @hba: per adapter instance
6212  *
6213  * Returns
6214  *  IRQ_HANDLED - If interrupt is valid
6215  *  IRQ_NONE    - If invalid interrupt
6216  */
6217 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6218 {
6219         struct request_queue *q = hba->tmf_queue;
6220         struct ctm_info ci = {
6221                 .hba     = hba,
6222                 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
6223         };
6224
6225         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
6226         return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
6227 }
6228
6229 /**
6230  * ufshcd_sl_intr - Interrupt service routine
6231  * @hba: per adapter instance
6232  * @intr_status: contains interrupts generated by the controller
6233  *
6234  * Returns
6235  *  IRQ_HANDLED - If interrupt is valid
6236  *  IRQ_NONE    - If invalid interrupt
6237  */
6238 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6239 {
6240         irqreturn_t retval = IRQ_NONE;
6241
6242         hba->errors = UFSHCD_ERROR_MASK & intr_status;
6243
6244         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
6245                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
6246
6247         if (hba->errors)
6248                 retval |= ufshcd_check_errors(hba);
6249
6250         if (intr_status & UFSHCD_UIC_MASK)
6251                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6252
6253         if (intr_status & UTP_TASK_REQ_COMPL)
6254                 retval |= ufshcd_tmc_handler(hba);
6255
6256         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6257                 retval |= ufshcd_transfer_req_compl(hba);
6258
6259         return retval;
6260 }
6261
6262 /**
6263  * ufshcd_intr - Main interrupt service routine
6264  * @irq: irq number
6265  * @__hba: pointer to adapter instance
6266  *
6267  * Returns
6268  *  IRQ_HANDLED - If interrupt is valid
6269  *  IRQ_NONE    - If invalid interrupt
6270  */
6271 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6272 {
6273         u32 intr_status, enabled_intr_status = 0;
6274         irqreturn_t retval = IRQ_NONE;
6275         struct ufs_hba *hba = __hba;
6276         int retries = hba->nutrs;
6277
6278         spin_lock(hba->host->host_lock);
6279         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6280         hba->ufs_stats.last_intr_status = intr_status;
6281         hba->ufs_stats.last_intr_ts = ktime_get();
6282
6283         /*
6284          * There could be max of hba->nutrs reqs in flight and in worst case
6285          * if the reqs get finished 1 by 1 after the interrupt status is
6286          * read, make sure we handle them by checking the interrupt status
6287          * again in a loop until we process all of the reqs before returning.
6288          */
6289         while (intr_status && retries--) {
6290                 enabled_intr_status =
6291                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6292                 if (intr_status)
6293                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6294                 if (enabled_intr_status)
6295                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6296
6297                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6298         }
6299
6300         if (enabled_intr_status && retval == IRQ_NONE) {
6301                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6302                                         __func__, intr_status);
6303                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6304         }
6305
6306         spin_unlock(hba->host->host_lock);
6307         return retval;
6308 }
6309
6310 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6311 {
6312         int err = 0;
6313         u32 mask = 1 << tag;
6314         unsigned long flags;
6315
6316         if (!test_bit(tag, &hba->outstanding_tasks))
6317                 goto out;
6318
6319         spin_lock_irqsave(hba->host->host_lock, flags);
6320         ufshcd_utmrl_clear(hba, tag);
6321         spin_unlock_irqrestore(hba->host->host_lock, flags);
6322
6323         /* poll for max. 1 sec to clear door bell register by h/w */
6324         err = ufshcd_wait_for_register(hba,
6325                         REG_UTP_TASK_REQ_DOOR_BELL,
6326                         mask, 0, 1000, 1000);
6327 out:
6328         return err;
6329 }
6330
6331 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6332                 struct utp_task_req_desc *treq, u8 tm_function)
6333 {
6334         struct request_queue *q = hba->tmf_queue;
6335         struct Scsi_Host *host = hba->host;
6336         DECLARE_COMPLETION_ONSTACK(wait);
6337         struct request *req;
6338         unsigned long flags;
6339         int free_slot, task_tag, err;
6340
6341         /*
6342          * Get free slot, sleep if slots are unavailable.
6343          * Even though we use wait_event() which sleeps indefinitely,
6344          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6345          */
6346         req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
6347         req->end_io_data = &wait;
6348         free_slot = req->tag;
6349         WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
6350         ufshcd_hold(hba, false);
6351
6352         spin_lock_irqsave(host->host_lock, flags);
6353         task_tag = hba->nutrs + free_slot;
6354
6355         treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6356
6357         memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
6358         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
6359
6360         /* send command to the controller */
6361         __set_bit(free_slot, &hba->outstanding_tasks);
6362
6363         /* Make sure descriptors are ready before ringing the task doorbell */
6364         wmb();
6365
6366         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6367         /* Make sure that doorbell is committed immediately */
6368         wmb();
6369
6370         spin_unlock_irqrestore(host->host_lock, flags);
6371
6372         ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6373
6374         /* wait until the task management command is completed */
6375         err = wait_for_completion_io_timeout(&wait,
6376                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6377         if (!err) {
6378                 /*
6379                  * Make sure that ufshcd_compl_tm() does not trigger a
6380                  * use-after-free.
6381                  */
6382                 req->end_io_data = NULL;
6383                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6384                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6385                                 __func__, tm_function);
6386                 if (ufshcd_clear_tm_cmd(hba, free_slot))
6387                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6388                                         __func__, free_slot);
6389                 err = -ETIMEDOUT;
6390         } else {
6391                 err = 0;
6392                 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
6393
6394                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6395         }
6396
6397         spin_lock_irqsave(hba->host->host_lock, flags);
6398         __clear_bit(free_slot, &hba->outstanding_tasks);
6399         spin_unlock_irqrestore(hba->host->host_lock, flags);
6400
6401         blk_put_request(req);
6402
6403         ufshcd_release(hba);
6404         return err;
6405 }
6406
6407 /**
6408  * ufshcd_issue_tm_cmd - issues task management commands to controller
6409  * @hba: per adapter instance
6410  * @lun_id: LUN ID to which TM command is sent
6411  * @task_id: task ID to which the TM command is applicable
6412  * @tm_function: task management function opcode
6413  * @tm_response: task management service response return value
6414  *
6415  * Returns non-zero value on error, zero on success.
6416  */
6417 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6418                 u8 tm_function, u8 *tm_response)
6419 {
6420         struct utp_task_req_desc treq = { { 0 }, };
6421         int ocs_value, err;
6422
6423         /* Configure task request descriptor */
6424         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6425         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6426
6427         /* Configure task request UPIU */
6428         treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6429                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6430         treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6431
6432         /*
6433          * The host shall provide the same value for LUN field in the basic
6434          * header and for Input Parameter.
6435          */
6436         treq.input_param1 = cpu_to_be32(lun_id);
6437         treq.input_param2 = cpu_to_be32(task_id);
6438
6439         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6440         if (err == -ETIMEDOUT)
6441                 return err;
6442
6443         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6444         if (ocs_value != OCS_SUCCESS)
6445                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6446                                 __func__, ocs_value);
6447         else if (tm_response)
6448                 *tm_response = be32_to_cpu(treq.output_param1) &
6449                                 MASK_TM_SERVICE_RESP;
6450         return err;
6451 }
6452
6453 /**
6454  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6455  * @hba:        per-adapter instance
6456  * @req_upiu:   upiu request
6457  * @rsp_upiu:   upiu reply
6458  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6459  * @buff_len:   descriptor size, 0 if NA
6460  * @cmd_type:   specifies the type (NOP, Query...)
6461  * @desc_op:    descriptor operation
6462  *
6463  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6464  * Therefore, it "rides" the device management infrastructure: uses its tag and
6465  * tasks work queues.
6466  *
6467  * Since there is only one available tag for device management commands,
6468  * the caller is expected to hold the hba->dev_cmd.lock mutex.
6469  */
6470 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6471                                         struct utp_upiu_req *req_upiu,
6472                                         struct utp_upiu_req *rsp_upiu,
6473                                         u8 *desc_buff, int *buff_len,
6474                                         enum dev_cmd_type cmd_type,
6475                                         enum query_opcode desc_op)
6476 {
6477         struct request_queue *q = hba->cmd_queue;
6478         struct request *req;
6479         struct ufshcd_lrb *lrbp;
6480         int err = 0;
6481         int tag;
6482         struct completion wait;
6483         unsigned long flags;
6484         u8 upiu_flags;
6485
6486         down_read(&hba->clk_scaling_lock);
6487
6488         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6489         if (IS_ERR(req)) {
6490                 err = PTR_ERR(req);
6491                 goto out_unlock;
6492         }
6493         tag = req->tag;
6494         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6495
6496         init_completion(&wait);
6497         lrbp = &hba->lrb[tag];
6498         if (unlikely(lrbp->in_use)) {
6499                 err = -EBUSY;
6500                 goto out;
6501         }
6502
6503         WARN_ON(lrbp->cmd);
6504         lrbp->cmd = NULL;
6505         lrbp->sense_bufflen = 0;
6506         lrbp->sense_buffer = NULL;
6507         lrbp->task_tag = tag;
6508         lrbp->lun = 0;
6509         lrbp->intr_cmd = true;
6510         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6511         hba->dev_cmd.type = cmd_type;
6512
6513         switch (hba->ufs_version) {
6514         case UFSHCI_VERSION_10:
6515         case UFSHCI_VERSION_11:
6516                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6517                 break;
6518         default:
6519                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6520                 break;
6521         }
6522
6523         /* update the task tag in the request upiu */
6524         req_upiu->header.dword_0 |= cpu_to_be32(tag);
6525
6526         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6527
6528         /* just copy the upiu request as it is */
6529         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6530         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6531                 /* The Data Segment Area is optional depending upon the query
6532                  * function value. for WRITE DESCRIPTOR, the data segment
6533                  * follows right after the tsf.
6534                  */
6535                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6536                 *buff_len = 0;
6537         }
6538
6539         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6540
6541         hba->dev_cmd.complete = &wait;
6542
6543         /* Make sure descriptors are ready before ringing the doorbell */
6544         wmb();
6545         spin_lock_irqsave(hba->host->host_lock, flags);
6546         ufshcd_send_command(hba, tag);
6547         spin_unlock_irqrestore(hba->host->host_lock, flags);
6548
6549         /*
6550          * ignore the returning value here - ufshcd_check_query_response is
6551          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6552          * read the response directly ignoring all errors.
6553          */
6554         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6555
6556         /* just copy the upiu response as it is */
6557         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6558         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6559                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6560                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6561                                MASK_QUERY_DATA_SEG_LEN;
6562
6563                 if (*buff_len >= resp_len) {
6564                         memcpy(desc_buff, descp, resp_len);
6565                         *buff_len = resp_len;
6566                 } else {
6567                         dev_warn(hba->dev,
6568                                  "%s: rsp size %d is bigger than buffer size %d",
6569                                  __func__, resp_len, *buff_len);
6570                         *buff_len = 0;
6571                         err = -EINVAL;
6572                 }
6573         }
6574
6575 out:
6576         blk_put_request(req);
6577 out_unlock:
6578         up_read(&hba->clk_scaling_lock);
6579         return err;
6580 }
6581
6582 /**
6583  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6584  * @hba:        per-adapter instance
6585  * @req_upiu:   upiu request
6586  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
6587  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
6588  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6589  * @buff_len:   descriptor size, 0 if NA
6590  * @desc_op:    descriptor operation
6591  *
6592  * Supports UTP Transfer requests (nop and query), and UTP Task
6593  * Management requests.
6594  * It is up to the caller to fill the upiu conent properly, as it will
6595  * be copied without any further input validations.
6596  */
6597 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6598                              struct utp_upiu_req *req_upiu,
6599                              struct utp_upiu_req *rsp_upiu,
6600                              int msgcode,
6601                              u8 *desc_buff, int *buff_len,
6602                              enum query_opcode desc_op)
6603 {
6604         int err;
6605         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6606         struct utp_task_req_desc treq = { { 0 }, };
6607         int ocs_value;
6608         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6609
6610         switch (msgcode) {
6611         case UPIU_TRANSACTION_NOP_OUT:
6612                 cmd_type = DEV_CMD_TYPE_NOP;
6613                 fallthrough;
6614         case UPIU_TRANSACTION_QUERY_REQ:
6615                 ufshcd_hold(hba, false);
6616                 mutex_lock(&hba->dev_cmd.lock);
6617                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6618                                                    desc_buff, buff_len,
6619                                                    cmd_type, desc_op);
6620                 mutex_unlock(&hba->dev_cmd.lock);
6621                 ufshcd_release(hba);
6622
6623                 break;
6624         case UPIU_TRANSACTION_TASK_REQ:
6625                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6626                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6627
6628                 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6629
6630                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6631                 if (err == -ETIMEDOUT)
6632                         break;
6633
6634                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6635                 if (ocs_value != OCS_SUCCESS) {
6636                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6637                                 ocs_value);
6638                         break;
6639                 }
6640
6641                 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6642
6643                 break;
6644         default:
6645                 err = -EINVAL;
6646
6647                 break;
6648         }
6649
6650         return err;
6651 }
6652
6653 /**
6654  * ufshcd_eh_device_reset_handler - device reset handler registered to
6655  *                                    scsi layer.
6656  * @cmd: SCSI command pointer
6657  *
6658  * Returns SUCCESS/FAILED
6659  */
6660 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6661 {
6662         struct Scsi_Host *host;
6663         struct ufs_hba *hba;
6664         unsigned int tag;
6665         u32 pos;
6666         int err;
6667         u8 resp = 0xF;
6668         struct ufshcd_lrb *lrbp;
6669         unsigned long flags;
6670
6671         host = cmd->device->host;
6672         hba = shost_priv(host);
6673         tag = cmd->request->tag;
6674
6675         lrbp = &hba->lrb[tag];
6676         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6677         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6678                 if (!err)
6679                         err = resp;
6680                 goto out;
6681         }
6682
6683         /* clear the commands that were pending for corresponding LUN */
6684         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6685                 if (hba->lrb[pos].lun == lrbp->lun) {
6686                         err = ufshcd_clear_cmd(hba, pos);
6687                         if (err)
6688                                 break;
6689                 }
6690         }
6691         spin_lock_irqsave(host->host_lock, flags);
6692         ufshcd_transfer_req_compl(hba);
6693         spin_unlock_irqrestore(host->host_lock, flags);
6694
6695 out:
6696         hba->req_abort_count = 0;
6697         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
6698         if (!err) {
6699                 err = SUCCESS;
6700         } else {
6701                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6702                 err = FAILED;
6703         }
6704         return err;
6705 }
6706
6707 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6708 {
6709         struct ufshcd_lrb *lrbp;
6710         int tag;
6711
6712         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6713                 lrbp = &hba->lrb[tag];
6714                 lrbp->req_abort_skip = true;
6715         }
6716 }
6717
6718 /**
6719  * ufshcd_try_to_abort_task - abort a specific task
6720  * @hba: Pointer to adapter instance
6721  * @tag: Task tag/index to be aborted
6722  *
6723  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6724  * command, and in host controller by clearing the door-bell register. There can
6725  * be race between controller sending the command to the device while abort is
6726  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6727  * really issued and then try to abort it.
6728  *
6729  * Returns zero on success, non-zero on failure
6730  */
6731 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6732 {
6733         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6734         int err = 0;
6735         int poll_cnt;
6736         u8 resp = 0xF;
6737         u32 reg;
6738
6739         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6740                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6741                                 UFS_QUERY_TASK, &resp);
6742                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6743                         /* cmd pending in the device */
6744                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6745                                 __func__, tag);
6746                         break;
6747                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6748                         /*
6749                          * cmd not pending in the device, check if it is
6750                          * in transition.
6751                          */
6752                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6753                                 __func__, tag);
6754                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6755                         if (reg & (1 << tag)) {
6756                                 /* sleep for max. 200us to stabilize */
6757                                 usleep_range(100, 200);
6758                                 continue;
6759                         }
6760                         /* command completed already */
6761                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6762                                 __func__, tag);
6763                         goto out;
6764                 } else {
6765                         dev_err(hba->dev,
6766                                 "%s: no response from device. tag = %d, err %d\n",
6767                                 __func__, tag, err);
6768                         if (!err)
6769                                 err = resp; /* service response error */
6770                         goto out;
6771                 }
6772         }
6773
6774         if (!poll_cnt) {
6775                 err = -EBUSY;
6776                 goto out;
6777         }
6778
6779         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6780                         UFS_ABORT_TASK, &resp);
6781         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6782                 if (!err) {
6783                         err = resp; /* service response error */
6784                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6785                                 __func__, tag, err);
6786                 }
6787                 goto out;
6788         }
6789
6790         err = ufshcd_clear_cmd(hba, tag);
6791         if (err)
6792                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6793                         __func__, tag, err);
6794
6795 out:
6796         return err;
6797 }
6798
6799 /**
6800  * ufshcd_abort - scsi host template eh_abort_handler callback
6801  * @cmd: SCSI command pointer
6802  *
6803  * Returns SUCCESS/FAILED
6804  */
6805 static int ufshcd_abort(struct scsi_cmnd *cmd)
6806 {
6807         struct Scsi_Host *host;
6808         struct ufs_hba *hba;
6809         unsigned long flags;
6810         unsigned int tag;
6811         int err = 0;
6812         struct ufshcd_lrb *lrbp;
6813         u32 reg;
6814
6815         host = cmd->device->host;
6816         hba = shost_priv(host);
6817         tag = cmd->request->tag;
6818         lrbp = &hba->lrb[tag];
6819         if (!ufshcd_valid_tag(hba, tag)) {
6820                 dev_err(hba->dev,
6821                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6822                         __func__, tag, cmd, cmd->request);
6823                 BUG();
6824         }
6825
6826         ufshcd_hold(hba, false);
6827         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6828         /* If command is already aborted/completed, return SUCCESS */
6829         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6830                 dev_err(hba->dev,
6831                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6832                         __func__, tag, hba->outstanding_reqs, reg);
6833                 goto out;
6834         }
6835
6836         /* Print Transfer Request of aborted task */
6837         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6838
6839         /*
6840          * Print detailed info about aborted request.
6841          * As more than one request might get aborted at the same time,
6842          * print full information only for the first aborted request in order
6843          * to reduce repeated printouts. For other aborted requests only print
6844          * basic details.
6845          */
6846         scsi_print_command(cmd);
6847         if (!hba->req_abort_count) {
6848                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
6849                 ufshcd_print_evt_hist(hba);
6850                 ufshcd_print_host_state(hba);
6851                 ufshcd_print_pwr_info(hba);
6852                 ufshcd_print_trs(hba, 1 << tag, true);
6853         } else {
6854                 ufshcd_print_trs(hba, 1 << tag, false);
6855         }
6856         hba->req_abort_count++;
6857
6858         if (!(reg & (1 << tag))) {
6859                 dev_err(hba->dev,
6860                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6861                 __func__, tag);
6862                 goto cleanup;
6863         }
6864
6865         /*
6866          * Task abort to the device W-LUN is illegal. When this command
6867          * will fail, due to spec violation, scsi err handling next step
6868          * will be to send LU reset which, again, is a spec violation.
6869          * To avoid these unnecessary/illegal steps, first we clean up
6870          * the lrb taken by this cmd and mark the lrb as in_use, then
6871          * queue the eh_work and bail.
6872          */
6873         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
6874                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
6875                 spin_lock_irqsave(host->host_lock, flags);
6876                 if (lrbp->cmd) {
6877                         __ufshcd_transfer_req_compl(hba, (1UL << tag));
6878                         __set_bit(tag, &hba->outstanding_reqs);
6879                         lrbp->in_use = true;
6880                         hba->force_reset = true;
6881                         ufshcd_schedule_eh_work(hba);
6882                 }
6883
6884                 spin_unlock_irqrestore(host->host_lock, flags);
6885                 goto out;
6886         }
6887
6888         /* Skip task abort in case previous aborts failed and report failure */
6889         if (lrbp->req_abort_skip)
6890                 err = -EIO;
6891         else
6892                 err = ufshcd_try_to_abort_task(hba, tag);
6893
6894         if (!err) {
6895 cleanup:
6896                 spin_lock_irqsave(host->host_lock, flags);
6897                 __ufshcd_transfer_req_compl(hba, (1UL << tag));
6898                 spin_unlock_irqrestore(host->host_lock, flags);
6899 out:
6900                 err = SUCCESS;
6901         } else {
6902                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6903                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6904                 err = FAILED;
6905         }
6906
6907         /*
6908          * This ufshcd_release() corresponds to the original scsi cmd that got
6909          * aborted here (as we won't get any IRQ for it).
6910          */
6911         ufshcd_release(hba);
6912         return err;
6913 }
6914
6915 /**
6916  * ufshcd_host_reset_and_restore - reset and restore host controller
6917  * @hba: per-adapter instance
6918  *
6919  * Note that host controller reset may issue DME_RESET to
6920  * local and remote (device) Uni-Pro stack and the attributes
6921  * are reset to default state.
6922  *
6923  * Returns zero on success, non-zero on failure
6924  */
6925 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6926 {
6927         int err;
6928         unsigned long flags;
6929
6930         /*
6931          * Stop the host controller and complete the requests
6932          * cleared by h/w
6933          */
6934         ufshcd_hba_stop(hba);
6935
6936         spin_lock_irqsave(hba->host->host_lock, flags);
6937         hba->silence_err_logs = true;
6938         ufshcd_complete_requests(hba);
6939         hba->silence_err_logs = false;
6940         spin_unlock_irqrestore(hba->host->host_lock, flags);
6941
6942         /* scale up clocks to max frequency before full reinitialization */
6943         ufshcd_set_clk_freq(hba, true);
6944
6945         err = ufshcd_hba_enable(hba);
6946         if (err)
6947                 goto out;
6948
6949         /* Establish the link again and restore the device */
6950         err = ufshcd_probe_hba(hba, false);
6951         if (!err)
6952                 ufshcd_clear_ua_wluns(hba);
6953 out:
6954         if (err)
6955                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6956         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
6957         return err;
6958 }
6959
6960 /**
6961  * ufshcd_reset_and_restore - reset and re-initialize host/device
6962  * @hba: per-adapter instance
6963  *
6964  * Reset and recover device, host and re-establish link. This
6965  * is helpful to recover the communication in fatal error conditions.
6966  *
6967  * Returns zero on success, non-zero on failure
6968  */
6969 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6970 {
6971         u32 saved_err;
6972         u32 saved_uic_err;
6973         int err = 0;
6974         unsigned long flags;
6975         int retries = MAX_HOST_RESET_RETRIES;
6976
6977         /*
6978          * This is a fresh start, cache and clear saved error first,
6979          * in case new error generated during reset and restore.
6980          */
6981         spin_lock_irqsave(hba->host->host_lock, flags);
6982         saved_err = hba->saved_err;
6983         saved_uic_err = hba->saved_uic_err;
6984         hba->saved_err = 0;
6985         hba->saved_uic_err = 0;
6986         spin_unlock_irqrestore(hba->host->host_lock, flags);
6987
6988         do {
6989                 /* Reset the attached device */
6990                 ufshcd_device_reset(hba);
6991
6992                 err = ufshcd_host_reset_and_restore(hba);
6993         } while (err && --retries);
6994
6995         spin_lock_irqsave(hba->host->host_lock, flags);
6996         /*
6997          * Inform scsi mid-layer that we did reset and allow to handle
6998          * Unit Attention properly.
6999          */
7000         scsi_report_bus_reset(hba->host, 0);
7001         if (err) {
7002                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7003                 hba->saved_err |= saved_err;
7004                 hba->saved_uic_err |= saved_uic_err;
7005         }
7006         spin_unlock_irqrestore(hba->host->host_lock, flags);
7007
7008         return err;
7009 }
7010
7011 /**
7012  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7013  * @cmd: SCSI command pointer
7014  *
7015  * Returns SUCCESS/FAILED
7016  */
7017 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7018 {
7019         int err = SUCCESS;
7020         unsigned long flags;
7021         struct ufs_hba *hba;
7022
7023         hba = shost_priv(cmd->device->host);
7024
7025         spin_lock_irqsave(hba->host->host_lock, flags);
7026         hba->force_reset = true;
7027         ufshcd_schedule_eh_work(hba);
7028         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7029         spin_unlock_irqrestore(hba->host->host_lock, flags);
7030
7031         flush_work(&hba->eh_work);
7032
7033         spin_lock_irqsave(hba->host->host_lock, flags);
7034         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7035                 err = FAILED;
7036         spin_unlock_irqrestore(hba->host->host_lock, flags);
7037
7038         return err;
7039 }
7040
7041 /**
7042  * ufshcd_get_max_icc_level - calculate the ICC level
7043  * @sup_curr_uA: max. current supported by the regulator
7044  * @start_scan: row at the desc table to start scan from
7045  * @buff: power descriptor buffer
7046  *
7047  * Returns calculated max ICC level for specific regulator
7048  */
7049 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7050 {
7051         int i;
7052         int curr_uA;
7053         u16 data;
7054         u16 unit;
7055
7056         for (i = start_scan; i >= 0; i--) {
7057                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
7058                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7059                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7060                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7061                 switch (unit) {
7062                 case UFSHCD_NANO_AMP:
7063                         curr_uA = curr_uA / 1000;
7064                         break;
7065                 case UFSHCD_MILI_AMP:
7066                         curr_uA = curr_uA * 1000;
7067                         break;
7068                 case UFSHCD_AMP:
7069                         curr_uA = curr_uA * 1000 * 1000;
7070                         break;
7071                 case UFSHCD_MICRO_AMP:
7072                 default:
7073                         break;
7074                 }
7075                 if (sup_curr_uA >= curr_uA)
7076                         break;
7077         }
7078         if (i < 0) {
7079                 i = 0;
7080                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7081         }
7082
7083         return (u32)i;
7084 }
7085
7086 /**
7087  * ufshcd_calc_icc_level - calculate the max ICC level
7088  * In case regulators are not initialized we'll return 0
7089  * @hba: per-adapter instance
7090  * @desc_buf: power descriptor buffer to extract ICC levels from.
7091  * @len: length of desc_buff
7092  *
7093  * Returns calculated ICC level
7094  */
7095 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7096                                                         u8 *desc_buf, int len)
7097 {
7098         u32 icc_level = 0;
7099
7100         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7101                                                 !hba->vreg_info.vccq2) {
7102                 dev_err(hba->dev,
7103                         "%s: Regulator capability was not set, actvIccLevel=%d",
7104                                                         __func__, icc_level);
7105                 goto out;
7106         }
7107
7108         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
7109                 icc_level = ufshcd_get_max_icc_level(
7110                                 hba->vreg_info.vcc->max_uA,
7111                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7112                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7113
7114         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
7115                 icc_level = ufshcd_get_max_icc_level(
7116                                 hba->vreg_info.vccq->max_uA,
7117                                 icc_level,
7118                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7119
7120         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7121                 icc_level = ufshcd_get_max_icc_level(
7122                                 hba->vreg_info.vccq2->max_uA,
7123                                 icc_level,
7124                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7125 out:
7126         return icc_level;
7127 }
7128
7129 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7130 {
7131         int ret;
7132         int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7133         u8 *desc_buf;
7134         u32 icc_level;
7135
7136         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7137         if (!desc_buf)
7138                 return;
7139
7140         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7141                                      desc_buf, buff_len);
7142         if (ret) {
7143                 dev_err(hba->dev,
7144                         "%s: Failed reading power descriptor.len = %d ret = %d",
7145                         __func__, buff_len, ret);
7146                 goto out;
7147         }
7148
7149         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7150                                                          buff_len);
7151         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7152
7153         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7154                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7155
7156         if (ret)
7157                 dev_err(hba->dev,
7158                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7159                         __func__, icc_level, ret);
7160
7161 out:
7162         kfree(desc_buf);
7163 }
7164
7165 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7166 {
7167         scsi_autopm_get_device(sdev);
7168         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7169         if (sdev->rpm_autosuspend)
7170                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7171                                                  RPM_AUTOSUSPEND_DELAY_MS);
7172         scsi_autopm_put_device(sdev);
7173 }
7174
7175 /**
7176  * ufshcd_scsi_add_wlus - Adds required W-LUs
7177  * @hba: per-adapter instance
7178  *
7179  * UFS device specification requires the UFS devices to support 4 well known
7180  * logical units:
7181  *      "REPORT_LUNS" (address: 01h)
7182  *      "UFS Device" (address: 50h)
7183  *      "RPMB" (address: 44h)
7184  *      "BOOT" (address: 30h)
7185  * UFS device's power management needs to be controlled by "POWER CONDITION"
7186  * field of SSU (START STOP UNIT) command. But this "power condition" field
7187  * will take effect only when its sent to "UFS device" well known logical unit
7188  * hence we require the scsi_device instance to represent this logical unit in
7189  * order for the UFS host driver to send the SSU command for power management.
7190  *
7191  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7192  * Block) LU so user space process can control this LU. User space may also
7193  * want to have access to BOOT LU.
7194  *
7195  * This function adds scsi device instances for each of all well known LUs
7196  * (except "REPORT LUNS" LU).
7197  *
7198  * Returns zero on success (all required W-LUs are added successfully),
7199  * non-zero error value on failure (if failed to add any of the required W-LU).
7200  */
7201 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7202 {
7203         int ret = 0;
7204         struct scsi_device *sdev_boot;
7205
7206         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7207                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7208         if (IS_ERR(hba->sdev_ufs_device)) {
7209                 ret = PTR_ERR(hba->sdev_ufs_device);
7210                 hba->sdev_ufs_device = NULL;
7211                 goto out;
7212         }
7213         ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
7214         scsi_device_put(hba->sdev_ufs_device);
7215
7216         hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7217                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7218         if (IS_ERR(hba->sdev_rpmb)) {
7219                 ret = PTR_ERR(hba->sdev_rpmb);
7220                 goto remove_sdev_ufs_device;
7221         }
7222         ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7223         scsi_device_put(hba->sdev_rpmb);
7224
7225         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7226                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7227         if (IS_ERR(sdev_boot)) {
7228                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7229         } else {
7230                 ufshcd_blk_pm_runtime_init(sdev_boot);
7231                 scsi_device_put(sdev_boot);
7232         }
7233         goto out;
7234
7235 remove_sdev_ufs_device:
7236         scsi_remove_device(hba->sdev_ufs_device);
7237 out:
7238         return ret;
7239 }
7240
7241 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7242 {
7243         struct ufs_dev_info *dev_info = &hba->dev_info;
7244         u8 lun;
7245         u32 d_lu_wb_buf_alloc;
7246
7247         if (!ufshcd_is_wb_allowed(hba))
7248                 return;
7249         /*
7250          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7251          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7252          * enabled
7253          */
7254         if (!(dev_info->wspecversion >= 0x310 ||
7255               dev_info->wspecversion == 0x220 ||
7256              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7257                 goto wb_disabled;
7258
7259         if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7260             DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7261                 goto wb_disabled;
7262
7263         dev_info->d_ext_ufs_feature_sup =
7264                 get_unaligned_be32(desc_buf +
7265                                    DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7266
7267         if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
7268                 goto wb_disabled;
7269
7270         /*
7271          * WB may be supported but not configured while provisioning.
7272          * The spec says, in dedicated wb buffer mode,
7273          * a max of 1 lun would have wb buffer configured.
7274          * Now only shared buffer mode is supported.
7275          */
7276         dev_info->b_wb_buffer_type =
7277                 desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7278
7279         dev_info->b_presrv_uspc_en =
7280                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7281
7282         if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
7283                 dev_info->d_wb_alloc_units =
7284                 get_unaligned_be32(desc_buf +
7285                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
7286                 if (!dev_info->d_wb_alloc_units)
7287                         goto wb_disabled;
7288         } else {
7289                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7290                         d_lu_wb_buf_alloc = 0;
7291                         ufshcd_read_unit_desc_param(hba,
7292                                         lun,
7293                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7294                                         (u8 *)&d_lu_wb_buf_alloc,
7295                                         sizeof(d_lu_wb_buf_alloc));
7296                         if (d_lu_wb_buf_alloc) {
7297                                 dev_info->wb_dedicated_lu = lun;
7298                                 break;
7299                         }
7300                 }
7301
7302                 if (!d_lu_wb_buf_alloc)
7303                         goto wb_disabled;
7304         }
7305         return;
7306
7307 wb_disabled:
7308         hba->caps &= ~UFSHCD_CAP_WB_EN;
7309 }
7310
7311 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7312 {
7313         struct ufs_dev_fix *f;
7314         struct ufs_dev_info *dev_info = &hba->dev_info;
7315
7316         if (!fixups)
7317                 return;
7318
7319         for (f = fixups; f->quirk; f++) {
7320                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7321                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
7322                      ((dev_info->model &&
7323                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7324                       !strcmp(f->model, UFS_ANY_MODEL)))
7325                         hba->dev_quirks |= f->quirk;
7326         }
7327 }
7328 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7329
7330 static void ufs_fixup_device_setup(struct ufs_hba *hba)
7331 {
7332         /* fix by general quirk table */
7333         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7334
7335         /* allow vendors to fix quirks */
7336         ufshcd_vops_fixup_dev_quirks(hba);
7337 }
7338
7339 static int ufs_get_device_desc(struct ufs_hba *hba)
7340 {
7341         int err;
7342         u8 model_index;
7343         u8 *desc_buf;
7344         struct ufs_dev_info *dev_info = &hba->dev_info;
7345
7346         desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7347         if (!desc_buf) {
7348                 err = -ENOMEM;
7349                 goto out;
7350         }
7351
7352         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7353                                      hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7354         if (err) {
7355                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7356                         __func__, err);
7357                 goto out;
7358         }
7359
7360         /*
7361          * getting vendor (manufacturerID) and Bank Index in big endian
7362          * format
7363          */
7364         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7365                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7366
7367         /* getting Specification Version in big endian format */
7368         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7369                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7370
7371         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7372
7373         err = ufshcd_read_string_desc(hba, model_index,
7374                                       &dev_info->model, SD_ASCII_STD);
7375         if (err < 0) {
7376                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7377                         __func__, err);
7378                 goto out;
7379         }
7380
7381         ufs_fixup_device_setup(hba);
7382
7383         ufshcd_wb_probe(hba, desc_buf);
7384
7385         /*
7386          * ufshcd_read_string_desc returns size of the string
7387          * reset the error value
7388          */
7389         err = 0;
7390
7391 out:
7392         kfree(desc_buf);
7393         return err;
7394 }
7395
7396 static void ufs_put_device_desc(struct ufs_hba *hba)
7397 {
7398         struct ufs_dev_info *dev_info = &hba->dev_info;
7399
7400         kfree(dev_info->model);
7401         dev_info->model = NULL;
7402 }
7403
7404 /**
7405  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7406  * @hba: per-adapter instance
7407  *
7408  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7409  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7410  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7411  * the hibern8 exit latency.
7412  *
7413  * Returns zero on success, non-zero error value on failure.
7414  */
7415 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7416 {
7417         int ret = 0;
7418         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7419
7420         ret = ufshcd_dme_peer_get(hba,
7421                                   UIC_ARG_MIB_SEL(
7422                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7423                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7424                                   &peer_rx_min_activatetime);
7425         if (ret)
7426                 goto out;
7427
7428         /* make sure proper unit conversion is applied */
7429         tuned_pa_tactivate =
7430                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7431                  / PA_TACTIVATE_TIME_UNIT_US);
7432         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7433                              tuned_pa_tactivate);
7434
7435 out:
7436         return ret;
7437 }
7438
7439 /**
7440  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7441  * @hba: per-adapter instance
7442  *
7443  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7444  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7445  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7446  * This optimal value can help reduce the hibern8 exit latency.
7447  *
7448  * Returns zero on success, non-zero error value on failure.
7449  */
7450 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7451 {
7452         int ret = 0;
7453         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7454         u32 max_hibern8_time, tuned_pa_hibern8time;
7455
7456         ret = ufshcd_dme_get(hba,
7457                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7458                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7459                                   &local_tx_hibern8_time_cap);
7460         if (ret)
7461                 goto out;
7462
7463         ret = ufshcd_dme_peer_get(hba,
7464                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7465                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7466                                   &peer_rx_hibern8_time_cap);
7467         if (ret)
7468                 goto out;
7469
7470         max_hibern8_time = max(local_tx_hibern8_time_cap,
7471                                peer_rx_hibern8_time_cap);
7472         /* make sure proper unit conversion is applied */
7473         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7474                                 / PA_HIBERN8_TIME_UNIT_US);
7475         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7476                              tuned_pa_hibern8time);
7477 out:
7478         return ret;
7479 }
7480
7481 /**
7482  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7483  * less than device PA_TACTIVATE time.
7484  * @hba: per-adapter instance
7485  *
7486  * Some UFS devices require host PA_TACTIVATE to be lower than device
7487  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7488  * for such devices.
7489  *
7490  * Returns zero on success, non-zero error value on failure.
7491  */
7492 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7493 {
7494         int ret = 0;
7495         u32 granularity, peer_granularity;
7496         u32 pa_tactivate, peer_pa_tactivate;
7497         u32 pa_tactivate_us, peer_pa_tactivate_us;
7498         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7499
7500         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7501                                   &granularity);
7502         if (ret)
7503                 goto out;
7504
7505         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7506                                   &peer_granularity);
7507         if (ret)
7508                 goto out;
7509
7510         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7511             (granularity > PA_GRANULARITY_MAX_VAL)) {
7512                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7513                         __func__, granularity);
7514                 return -EINVAL;
7515         }
7516
7517         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7518             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7519                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7520                         __func__, peer_granularity);
7521                 return -EINVAL;
7522         }
7523
7524         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7525         if (ret)
7526                 goto out;
7527
7528         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7529                                   &peer_pa_tactivate);
7530         if (ret)
7531                 goto out;
7532
7533         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7534         peer_pa_tactivate_us = peer_pa_tactivate *
7535                              gran_to_us_table[peer_granularity - 1];
7536
7537         if (pa_tactivate_us > peer_pa_tactivate_us) {
7538                 u32 new_peer_pa_tactivate;
7539
7540                 new_peer_pa_tactivate = pa_tactivate_us /
7541                                       gran_to_us_table[peer_granularity - 1];
7542                 new_peer_pa_tactivate++;
7543                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7544                                           new_peer_pa_tactivate);
7545         }
7546
7547 out:
7548         return ret;
7549 }
7550
7551 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7552 {
7553         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7554                 ufshcd_tune_pa_tactivate(hba);
7555                 ufshcd_tune_pa_hibern8time(hba);
7556         }
7557
7558         ufshcd_vops_apply_dev_quirks(hba);
7559
7560         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7561                 /* set 1ms timeout for PA_TACTIVATE */
7562                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7563
7564         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7565                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7566 }
7567
7568 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7569 {
7570         hba->ufs_stats.hibern8_exit_cnt = 0;
7571         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7572         hba->req_abort_count = 0;
7573 }
7574
7575 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7576 {
7577         int err;
7578         size_t buff_len;
7579         u8 *desc_buf;
7580
7581         buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7582         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7583         if (!desc_buf) {
7584                 err = -ENOMEM;
7585                 goto out;
7586         }
7587
7588         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7589                                      desc_buf, buff_len);
7590         if (err) {
7591                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7592                                 __func__, err);
7593                 goto out;
7594         }
7595
7596         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7597                 hba->dev_info.max_lu_supported = 32;
7598         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7599                 hba->dev_info.max_lu_supported = 8;
7600
7601 out:
7602         kfree(desc_buf);
7603         return err;
7604 }
7605
7606 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7607         {19200000, REF_CLK_FREQ_19_2_MHZ},
7608         {26000000, REF_CLK_FREQ_26_MHZ},
7609         {38400000, REF_CLK_FREQ_38_4_MHZ},
7610         {52000000, REF_CLK_FREQ_52_MHZ},
7611         {0, REF_CLK_FREQ_INVAL},
7612 };
7613
7614 static enum ufs_ref_clk_freq
7615 ufs_get_bref_clk_from_hz(unsigned long freq)
7616 {
7617         int i;
7618
7619         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7620                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7621                         return ufs_ref_clk_freqs[i].val;
7622
7623         return REF_CLK_FREQ_INVAL;
7624 }
7625
7626 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7627 {
7628         unsigned long freq;
7629
7630         freq = clk_get_rate(refclk);
7631
7632         hba->dev_ref_clk_freq =
7633                 ufs_get_bref_clk_from_hz(freq);
7634
7635         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7636                 dev_err(hba->dev,
7637                 "invalid ref_clk setting = %ld\n", freq);
7638 }
7639
7640 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7641 {
7642         int err;
7643         u32 ref_clk;
7644         u32 freq = hba->dev_ref_clk_freq;
7645
7646         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7647                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7648
7649         if (err) {
7650                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7651                         err);
7652                 goto out;
7653         }
7654
7655         if (ref_clk == freq)
7656                 goto out; /* nothing to update */
7657
7658         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7659                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7660
7661         if (err) {
7662                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7663                         ufs_ref_clk_freqs[freq].freq_hz);
7664                 goto out;
7665         }
7666
7667         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7668                         ufs_ref_clk_freqs[freq].freq_hz);
7669
7670 out:
7671         return err;
7672 }
7673
7674 static int ufshcd_device_params_init(struct ufs_hba *hba)
7675 {
7676         bool flag;
7677         int ret, i;
7678
7679          /* Init device descriptor sizes */
7680         for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7681                 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7682
7683         /* Init UFS geometry descriptor related parameters */
7684         ret = ufshcd_device_geo_params_init(hba);
7685         if (ret)
7686                 goto out;
7687
7688         /* Check and apply UFS device quirks */
7689         ret = ufs_get_device_desc(hba);
7690         if (ret) {
7691                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7692                         __func__, ret);
7693                 goto out;
7694         }
7695
7696         ufshcd_get_ref_clk_gating_wait(hba);
7697
7698         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7699                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7700                 hba->dev_info.f_power_on_wp_en = flag;
7701
7702         /* Probe maximum power mode co-supported by both UFS host and device */
7703         if (ufshcd_get_max_pwr_mode(hba))
7704                 dev_err(hba->dev,
7705                         "%s: Failed getting max supported power mode\n",
7706                         __func__);
7707 out:
7708         return ret;
7709 }
7710
7711 /**
7712  * ufshcd_add_lus - probe and add UFS logical units
7713  * @hba: per-adapter instance
7714  */
7715 static int ufshcd_add_lus(struct ufs_hba *hba)
7716 {
7717         int ret;
7718
7719         /* Add required well known logical units to scsi mid layer */
7720         ret = ufshcd_scsi_add_wlus(hba);
7721         if (ret)
7722                 goto out;
7723
7724         /* Initialize devfreq after UFS device is detected */
7725         if (ufshcd_is_clkscaling_supported(hba)) {
7726                 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7727                         &hba->pwr_info,
7728                         sizeof(struct ufs_pa_layer_attr));
7729                 hba->clk_scaling.saved_pwr_info.is_valid = true;
7730                 if (!hba->devfreq) {
7731                         ret = ufshcd_devfreq_init(hba);
7732                         if (ret)
7733                                 goto out;
7734                 }
7735
7736                 hba->clk_scaling.is_allowed = true;
7737         }
7738
7739         ufs_bsg_probe(hba);
7740         scsi_scan_host(hba->host);
7741         pm_runtime_put_sync(hba->dev);
7742
7743 out:
7744         return ret;
7745 }
7746
7747 static int
7748 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
7749
7750 static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
7751 {
7752         struct scsi_device *sdp;
7753         unsigned long flags;
7754         int ret = 0;
7755
7756         spin_lock_irqsave(hba->host->host_lock, flags);
7757         if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
7758                 sdp = hba->sdev_ufs_device;
7759         else if (wlun == UFS_UPIU_RPMB_WLUN)
7760                 sdp = hba->sdev_rpmb;
7761         else
7762                 BUG();
7763         if (sdp) {
7764                 ret = scsi_device_get(sdp);
7765                 if (!ret && !scsi_device_online(sdp)) {
7766                         ret = -ENODEV;
7767                         scsi_device_put(sdp);
7768                 }
7769         } else {
7770                 ret = -ENODEV;
7771         }
7772         spin_unlock_irqrestore(hba->host->host_lock, flags);
7773         if (ret)
7774                 goto out_err;
7775
7776         ret = ufshcd_send_request_sense(hba, sdp);
7777         scsi_device_put(sdp);
7778 out_err:
7779         if (ret)
7780                 dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
7781                                 __func__, wlun, ret);
7782         return ret;
7783 }
7784
7785 static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
7786 {
7787         int ret = 0;
7788
7789         if (!hba->wlun_dev_clr_ua)
7790                 goto out;
7791
7792         ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
7793         if (!ret)
7794                 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
7795         if (!ret)
7796                 hba->wlun_dev_clr_ua = false;
7797 out:
7798         if (ret)
7799                 dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
7800                                 __func__, ret);
7801         return ret;
7802 }
7803
7804 /**
7805  * ufshcd_probe_hba - probe hba to detect device and initialize
7806  * @hba: per-adapter instance
7807  * @async: asynchronous execution or not
7808  *
7809  * Execute link-startup and verify device initialization
7810  */
7811 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7812 {
7813         int ret;
7814         unsigned long flags;
7815         ktime_t start = ktime_get();
7816
7817         ret = ufshcd_link_startup(hba);
7818         if (ret)
7819                 goto out;
7820
7821         /* Debug counters initialization */
7822         ufshcd_clear_dbg_ufs_stats(hba);
7823
7824         /* UniPro link is active now */
7825         ufshcd_set_link_active(hba);
7826
7827         /* Verify device initialization by sending NOP OUT UPIU */
7828         ret = ufshcd_verify_dev_init(hba);
7829         if (ret)
7830                 goto out;
7831
7832         /* Initiate UFS initialization, and waiting until completion */
7833         ret = ufshcd_complete_dev_init(hba);
7834         if (ret)
7835                 goto out;
7836
7837         /*
7838          * Initialize UFS device parameters used by driver, these
7839          * parameters are associated with UFS descriptors.
7840          */
7841         if (async) {
7842                 ret = ufshcd_device_params_init(hba);
7843                 if (ret)
7844                         goto out;
7845         }
7846
7847         ufshcd_tune_unipro_params(hba);
7848
7849         /* UFS device is also active now */
7850         ufshcd_set_ufs_dev_active(hba);
7851         ufshcd_force_reset_auto_bkops(hba);
7852         hba->wlun_dev_clr_ua = true;
7853
7854         /* Gear up to HS gear if supported */
7855         if (hba->max_pwr_info.is_valid) {
7856                 /*
7857                  * Set the right value to bRefClkFreq before attempting to
7858                  * switch to HS gears.
7859                  */
7860                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7861                         ufshcd_set_dev_ref_clk(hba);
7862                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7863                 if (ret) {
7864                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7865                                         __func__, ret);
7866                         goto out;
7867                 }
7868                 ufshcd_print_pwr_info(hba);
7869         }
7870
7871         /*
7872          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7873          * and for removable UFS card as well, hence always set the parameter.
7874          * Note: Error handler may issue the device reset hence resetting
7875          * bActiveICCLevel as well so it is always safe to set this here.
7876          */
7877         ufshcd_set_active_icc_lvl(hba);
7878
7879         ufshcd_wb_config(hba);
7880         /* Enable Auto-Hibernate if configured */
7881         ufshcd_auto_hibern8_enable(hba);
7882
7883 out:
7884         spin_lock_irqsave(hba->host->host_lock, flags);
7885         if (ret)
7886                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7887         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7888                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7889         spin_unlock_irqrestore(hba->host->host_lock, flags);
7890
7891         trace_ufshcd_init(dev_name(hba->dev), ret,
7892                 ktime_to_us(ktime_sub(ktime_get(), start)),
7893                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7894         return ret;
7895 }
7896
7897 /**
7898  * ufshcd_async_scan - asynchronous execution for probing hba
7899  * @data: data pointer to pass to this function
7900  * @cookie: cookie data
7901  */
7902 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7903 {
7904         struct ufs_hba *hba = (struct ufs_hba *)data;
7905         int ret;
7906
7907         down(&hba->eh_sem);
7908         /* Initialize hba, detect and initialize UFS device */
7909         ret = ufshcd_probe_hba(hba, true);
7910         up(&hba->eh_sem);
7911         if (ret)
7912                 goto out;
7913
7914         /* Probe and add UFS logical units  */
7915         ret = ufshcd_add_lus(hba);
7916 out:
7917         /*
7918          * If we failed to initialize the device or the device is not
7919          * present, turn off the power/clocks etc.
7920          */
7921         if (ret) {
7922                 pm_runtime_put_sync(hba->dev);
7923                 ufshcd_exit_clk_scaling(hba);
7924                 ufshcd_hba_exit(hba);
7925         } else {
7926                 ufshcd_clear_ua_wluns(hba);
7927         }
7928 }
7929
7930 static const struct attribute_group *ufshcd_driver_groups[] = {
7931         &ufs_sysfs_unit_descriptor_group,
7932         &ufs_sysfs_lun_attributes_group,
7933         NULL,
7934 };
7935
7936 static struct ufs_hba_variant_params ufs_hba_vps = {
7937         .hba_enable_delay_us            = 1000,
7938         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
7939         .devfreq_profile.polling_ms     = 100,
7940         .devfreq_profile.target         = ufshcd_devfreq_target,
7941         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
7942         .ondemand_data.upthreshold      = 70,
7943         .ondemand_data.downdifferential = 5,
7944 };
7945
7946 static struct scsi_host_template ufshcd_driver_template = {
7947         .module                 = THIS_MODULE,
7948         .name                   = UFSHCD,
7949         .proc_name              = UFSHCD,
7950         .queuecommand           = ufshcd_queuecommand,
7951         .slave_alloc            = ufshcd_slave_alloc,
7952         .slave_configure        = ufshcd_slave_configure,
7953         .slave_destroy          = ufshcd_slave_destroy,
7954         .change_queue_depth     = ufshcd_change_queue_depth,
7955         .eh_abort_handler       = ufshcd_abort,
7956         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7957         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7958         .this_id                = -1,
7959         .sg_tablesize           = SG_ALL,
7960         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7961         .can_queue              = UFSHCD_CAN_QUEUE,
7962         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
7963         .max_host_blocked       = 1,
7964         .track_queue_depth      = 1,
7965         .sdev_groups            = ufshcd_driver_groups,
7966         .dma_boundary           = PAGE_SIZE - 1,
7967         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
7968 };
7969
7970 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7971                                    int ua)
7972 {
7973         int ret;
7974
7975         if (!vreg)
7976                 return 0;
7977
7978         /*
7979          * "set_load" operation shall be required on those regulators
7980          * which specifically configured current limitation. Otherwise
7981          * zero max_uA may cause unexpected behavior when regulator is
7982          * enabled or set as high power mode.
7983          */
7984         if (!vreg->max_uA)
7985                 return 0;
7986
7987         ret = regulator_set_load(vreg->reg, ua);
7988         if (ret < 0) {
7989                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7990                                 __func__, vreg->name, ua, ret);
7991         }
7992
7993         return ret;
7994 }
7995
7996 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7997                                          struct ufs_vreg *vreg)
7998 {
7999         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8000 }
8001
8002 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8003                                          struct ufs_vreg *vreg)
8004 {
8005         if (!vreg)
8006                 return 0;
8007
8008         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8009 }
8010
8011 static int ufshcd_config_vreg(struct device *dev,
8012                 struct ufs_vreg *vreg, bool on)
8013 {
8014         int ret = 0;
8015         struct regulator *reg;
8016         const char *name;
8017         int min_uV, uA_load;
8018
8019         BUG_ON(!vreg);
8020
8021         reg = vreg->reg;
8022         name = vreg->name;
8023
8024         if (regulator_count_voltages(reg) > 0) {
8025                 uA_load = on ? vreg->max_uA : 0;
8026                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8027                 if (ret)
8028                         goto out;
8029
8030                 if (vreg->min_uV && vreg->max_uV) {
8031                         min_uV = on ? vreg->min_uV : 0;
8032                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8033                         if (ret)
8034                                 dev_err(dev,
8035                                         "%s: %s set voltage failed, err=%d\n",
8036                                         __func__, name, ret);
8037                 }
8038         }
8039 out:
8040         return ret;
8041 }
8042
8043 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8044 {
8045         int ret = 0;
8046
8047         if (!vreg || vreg->enabled)
8048                 goto out;
8049
8050         ret = ufshcd_config_vreg(dev, vreg, true);
8051         if (!ret)
8052                 ret = regulator_enable(vreg->reg);
8053
8054         if (!ret)
8055                 vreg->enabled = true;
8056         else
8057                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8058                                 __func__, vreg->name, ret);
8059 out:
8060         return ret;
8061 }
8062
8063 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8064 {
8065         int ret = 0;
8066
8067         if (!vreg || !vreg->enabled || vreg->always_on)
8068                 goto out;
8069
8070         ret = regulator_disable(vreg->reg);
8071
8072         if (!ret) {
8073                 /* ignore errors on applying disable config */
8074                 ufshcd_config_vreg(dev, vreg, false);
8075                 vreg->enabled = false;
8076         } else {
8077                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8078                                 __func__, vreg->name, ret);
8079         }
8080 out:
8081         return ret;
8082 }
8083
8084 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8085 {
8086         int ret = 0;
8087         struct device *dev = hba->dev;
8088         struct ufs_vreg_info *info = &hba->vreg_info;
8089
8090         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8091         if (ret)
8092                 goto out;
8093
8094         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8095         if (ret)
8096                 goto out;
8097
8098         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8099
8100 out:
8101         if (ret) {
8102                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8103                 ufshcd_toggle_vreg(dev, info->vccq, false);
8104                 ufshcd_toggle_vreg(dev, info->vcc, false);
8105         }
8106         return ret;
8107 }
8108
8109 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8110 {
8111         struct ufs_vreg_info *info = &hba->vreg_info;
8112
8113         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8114 }
8115
8116 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8117 {
8118         int ret = 0;
8119
8120         if (!vreg)
8121                 goto out;
8122
8123         vreg->reg = devm_regulator_get(dev, vreg->name);
8124         if (IS_ERR(vreg->reg)) {
8125                 ret = PTR_ERR(vreg->reg);
8126                 dev_err(dev, "%s: %s get failed, err=%d\n",
8127                                 __func__, vreg->name, ret);
8128         }
8129 out:
8130         return ret;
8131 }
8132
8133 static int ufshcd_init_vreg(struct ufs_hba *hba)
8134 {
8135         int ret = 0;
8136         struct device *dev = hba->dev;
8137         struct ufs_vreg_info *info = &hba->vreg_info;
8138
8139         ret = ufshcd_get_vreg(dev, info->vcc);
8140         if (ret)
8141                 goto out;
8142
8143         ret = ufshcd_get_vreg(dev, info->vccq);
8144         if (!ret)
8145                 ret = ufshcd_get_vreg(dev, info->vccq2);
8146 out:
8147         return ret;
8148 }
8149
8150 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8151 {
8152         struct ufs_vreg_info *info = &hba->vreg_info;
8153
8154         if (info)
8155                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8156
8157         return 0;
8158 }
8159
8160 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8161 {
8162         int ret = 0;
8163         struct ufs_clk_info *clki;
8164         struct list_head *head = &hba->clk_list_head;
8165         unsigned long flags;
8166         ktime_t start = ktime_get();
8167         bool clk_state_changed = false;
8168
8169         if (list_empty(head))
8170                 goto out;
8171
8172         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8173         if (ret)
8174                 return ret;
8175
8176         list_for_each_entry(clki, head, list) {
8177                 if (!IS_ERR_OR_NULL(clki->clk)) {
8178                         /*
8179                          * Don't disable clocks which are needed
8180                          * to keep the link active.
8181                          */
8182                         if (ufshcd_is_link_active(hba) &&
8183                             clki->keep_link_active)
8184                                 continue;
8185
8186                         clk_state_changed = on ^ clki->enabled;
8187                         if (on && !clki->enabled) {
8188                                 ret = clk_prepare_enable(clki->clk);
8189                                 if (ret) {
8190                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8191                                                 __func__, clki->name, ret);
8192                                         goto out;
8193                                 }
8194                         } else if (!on && clki->enabled) {
8195                                 clk_disable_unprepare(clki->clk);
8196                         }
8197                         clki->enabled = on;
8198                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8199                                         clki->name, on ? "en" : "dis");
8200                 }
8201         }
8202
8203         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8204         if (ret)
8205                 return ret;
8206
8207 out:
8208         if (ret) {
8209                 list_for_each_entry(clki, head, list) {
8210                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8211                                 clk_disable_unprepare(clki->clk);
8212                 }
8213         } else if (!ret && on) {
8214                 spin_lock_irqsave(hba->host->host_lock, flags);
8215                 hba->clk_gating.state = CLKS_ON;
8216                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8217                                         hba->clk_gating.state);
8218                 spin_unlock_irqrestore(hba->host->host_lock, flags);
8219         }
8220
8221         if (clk_state_changed)
8222                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8223                         (on ? "on" : "off"),
8224                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8225         return ret;
8226 }
8227
8228 static int ufshcd_init_clocks(struct ufs_hba *hba)
8229 {
8230         int ret = 0;
8231         struct ufs_clk_info *clki;
8232         struct device *dev = hba->dev;
8233         struct list_head *head = &hba->clk_list_head;
8234
8235         if (list_empty(head))
8236                 goto out;
8237
8238         list_for_each_entry(clki, head, list) {
8239                 if (!clki->name)
8240                         continue;
8241
8242                 clki->clk = devm_clk_get(dev, clki->name);
8243                 if (IS_ERR(clki->clk)) {
8244                         ret = PTR_ERR(clki->clk);
8245                         dev_err(dev, "%s: %s clk get failed, %d\n",
8246                                         __func__, clki->name, ret);
8247                         goto out;
8248                 }
8249
8250                 /*
8251                  * Parse device ref clk freq as per device tree "ref_clk".
8252                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8253                  * in ufshcd_alloc_host().
8254                  */
8255                 if (!strcmp(clki->name, "ref_clk"))
8256                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8257
8258                 if (clki->max_freq) {
8259                         ret = clk_set_rate(clki->clk, clki->max_freq);
8260                         if (ret) {
8261                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8262                                         __func__, clki->name,
8263                                         clki->max_freq, ret);
8264                                 goto out;
8265                         }
8266                         clki->curr_freq = clki->max_freq;
8267                 }
8268                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8269                                 clki->name, clk_get_rate(clki->clk));
8270         }
8271 out:
8272         return ret;
8273 }
8274
8275 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8276 {
8277         int err = 0;
8278
8279         if (!hba->vops)
8280                 goto out;
8281
8282         err = ufshcd_vops_init(hba);
8283         if (err)
8284                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8285                         __func__, ufshcd_get_var_name(hba), err);
8286 out:
8287         return err;
8288 }
8289
8290 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8291 {
8292         if (!hba->vops)
8293                 return;
8294
8295         ufshcd_vops_exit(hba);
8296 }
8297
8298 static int ufshcd_hba_init(struct ufs_hba *hba)
8299 {
8300         int err;
8301
8302         /*
8303          * Handle host controller power separately from the UFS device power
8304          * rails as it will help controlling the UFS host controller power
8305          * collapse easily which is different than UFS device power collapse.
8306          * Also, enable the host controller power before we go ahead with rest
8307          * of the initialization here.
8308          */
8309         err = ufshcd_init_hba_vreg(hba);
8310         if (err)
8311                 goto out;
8312
8313         err = ufshcd_setup_hba_vreg(hba, true);
8314         if (err)
8315                 goto out;
8316
8317         err = ufshcd_init_clocks(hba);
8318         if (err)
8319                 goto out_disable_hba_vreg;
8320
8321         err = ufshcd_setup_clocks(hba, true);
8322         if (err)
8323                 goto out_disable_hba_vreg;
8324
8325         err = ufshcd_init_vreg(hba);
8326         if (err)
8327                 goto out_disable_clks;
8328
8329         err = ufshcd_setup_vreg(hba, true);
8330         if (err)
8331                 goto out_disable_clks;
8332
8333         err = ufshcd_variant_hba_init(hba);
8334         if (err)
8335                 goto out_disable_vreg;
8336
8337         hba->is_powered = true;
8338         goto out;
8339
8340 out_disable_vreg:
8341         ufshcd_setup_vreg(hba, false);
8342 out_disable_clks:
8343         ufshcd_setup_clocks(hba, false);
8344 out_disable_hba_vreg:
8345         ufshcd_setup_hba_vreg(hba, false);
8346 out:
8347         return err;
8348 }
8349
8350 static void ufshcd_hba_exit(struct ufs_hba *hba)
8351 {
8352         if (hba->is_powered) {
8353                 ufshcd_variant_hba_exit(hba);
8354                 ufshcd_setup_vreg(hba, false);
8355                 ufshcd_suspend_clkscaling(hba);
8356                 if (ufshcd_is_clkscaling_supported(hba))
8357                         if (hba->devfreq)
8358                                 ufshcd_suspend_clkscaling(hba);
8359                 ufshcd_setup_clocks(hba, false);
8360                 ufshcd_setup_hba_vreg(hba, false);
8361                 hba->is_powered = false;
8362                 ufs_put_device_desc(hba);
8363         }
8364 }
8365
8366 static int
8367 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8368 {
8369         unsigned char cmd[6] = {REQUEST_SENSE,
8370                                 0,
8371                                 0,
8372                                 0,
8373                                 UFS_SENSE_SIZE,
8374                                 0};
8375         char *buffer;
8376         int ret;
8377
8378         buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
8379         if (!buffer) {
8380                 ret = -ENOMEM;
8381                 goto out;
8382         }
8383
8384         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
8385                         UFS_SENSE_SIZE, NULL, NULL,
8386                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
8387         if (ret)
8388                 pr_err("%s: failed with err %d\n", __func__, ret);
8389
8390         kfree(buffer);
8391 out:
8392         return ret;
8393 }
8394
8395 /**
8396  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8397  *                           power mode
8398  * @hba: per adapter instance
8399  * @pwr_mode: device power mode to set
8400  *
8401  * Returns 0 if requested power mode is set successfully
8402  * Returns non-zero if failed to set the requested power mode
8403  */
8404 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8405                                      enum ufs_dev_pwr_mode pwr_mode)
8406 {
8407         unsigned char cmd[6] = { START_STOP };
8408         struct scsi_sense_hdr sshdr;
8409         struct scsi_device *sdp;
8410         unsigned long flags;
8411         int ret;
8412
8413         spin_lock_irqsave(hba->host->host_lock, flags);
8414         sdp = hba->sdev_ufs_device;
8415         if (sdp) {
8416                 ret = scsi_device_get(sdp);
8417                 if (!ret && !scsi_device_online(sdp)) {
8418                         ret = -ENODEV;
8419                         scsi_device_put(sdp);
8420                 }
8421         } else {
8422                 ret = -ENODEV;
8423         }
8424         spin_unlock_irqrestore(hba->host->host_lock, flags);
8425
8426         if (ret)
8427                 return ret;
8428
8429         /*
8430          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8431          * handling, which would wait for host to be resumed. Since we know
8432          * we are functional while we are here, skip host resume in error
8433          * handling context.
8434          */
8435         hba->host->eh_noresume = 1;
8436         ufshcd_clear_ua_wluns(hba);
8437
8438         cmd[4] = pwr_mode << 4;
8439
8440         /*
8441          * Current function would be generally called from the power management
8442          * callbacks hence set the RQF_PM flag so that it doesn't resume the
8443          * already suspended childs.
8444          */
8445         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8446                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8447         if (ret) {
8448                 sdev_printk(KERN_WARNING, sdp,
8449                             "START_STOP failed for power mode: %d, result %x\n",
8450                             pwr_mode, ret);
8451                 if (driver_byte(ret) == DRIVER_SENSE)
8452                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8453         }
8454
8455         if (!ret)
8456                 hba->curr_dev_pwr_mode = pwr_mode;
8457
8458         scsi_device_put(sdp);
8459         hba->host->eh_noresume = 0;
8460         return ret;
8461 }
8462
8463 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8464                                         enum uic_link_state req_link_state,
8465                                         int check_for_bkops)
8466 {
8467         int ret = 0;
8468
8469         if (req_link_state == hba->uic_link_state)
8470                 return 0;
8471
8472         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8473                 ret = ufshcd_uic_hibern8_enter(hba);
8474                 if (!ret) {
8475                         ufshcd_set_link_hibern8(hba);
8476                 } else {
8477                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8478                                         __func__, ret);
8479                         goto out;
8480                 }
8481         }
8482         /*
8483          * If autobkops is enabled, link can't be turned off because
8484          * turning off the link would also turn off the device, except in the
8485          * case of DeepSleep where the device is expected to remain powered.
8486          */
8487         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8488                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
8489                 /*
8490                  * Let's make sure that link is in low power mode, we are doing
8491                  * this currently by putting the link in Hibern8. Otherway to
8492                  * put the link in low power mode is to send the DME end point
8493                  * to device and then send the DME reset command to local
8494                  * unipro. But putting the link in hibern8 is much faster.
8495                  *
8496                  * Note also that putting the link in Hibern8 is a requirement
8497                  * for entering DeepSleep.
8498                  */
8499                 ret = ufshcd_uic_hibern8_enter(hba);
8500                 if (ret) {
8501                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8502                                         __func__, ret);
8503                         goto out;
8504                 }
8505                 /*
8506                  * Change controller state to "reset state" which
8507                  * should also put the link in off/reset state
8508                  */
8509                 ufshcd_hba_stop(hba);
8510                 /*
8511                  * TODO: Check if we need any delay to make sure that
8512                  * controller is reset
8513                  */
8514                 ufshcd_set_link_off(hba);
8515         }
8516
8517 out:
8518         return ret;
8519 }
8520
8521 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8522 {
8523         bool vcc_off = false;
8524
8525         /*
8526          * It seems some UFS devices may keep drawing more than sleep current
8527          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8528          * To avoid this situation, add 2ms delay before putting these UFS
8529          * rails in LPM mode.
8530          */
8531         if (!ufshcd_is_link_active(hba) &&
8532             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8533                 usleep_range(2000, 2100);
8534
8535         /*
8536          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8537          * power.
8538          *
8539          * If UFS device and link is in OFF state, all power supplies (VCC,
8540          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8541          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8542          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8543          *
8544          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8545          * in low power state which would save some power.
8546          *
8547          * If Write Booster is enabled and the device needs to flush the WB
8548          * buffer OR if bkops status is urgent for WB, keep Vcc on.
8549          */
8550         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8551             !hba->dev_info.is_lu_power_on_wp) {
8552                 ufshcd_setup_vreg(hba, false);
8553                 vcc_off = true;
8554         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8555                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8556                 vcc_off = true;
8557                 if (!ufshcd_is_link_active(hba)) {
8558                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8559                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8560                 }
8561         }
8562
8563         /*
8564          * Some UFS devices require delay after VCC power rail is turned-off.
8565          */
8566         if (vcc_off && hba->vreg_info.vcc &&
8567                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8568                 usleep_range(5000, 5100);
8569 }
8570
8571 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8572 {
8573         int ret = 0;
8574
8575         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8576             !hba->dev_info.is_lu_power_on_wp) {
8577                 ret = ufshcd_setup_vreg(hba, true);
8578         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8579                 if (!ret && !ufshcd_is_link_active(hba)) {
8580                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8581                         if (ret)
8582                                 goto vcc_disable;
8583                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8584                         if (ret)
8585                                 goto vccq_lpm;
8586                 }
8587                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8588         }
8589         goto out;
8590
8591 vccq_lpm:
8592         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8593 vcc_disable:
8594         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8595 out:
8596         return ret;
8597 }
8598
8599 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8600 {
8601         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8602                 ufshcd_setup_hba_vreg(hba, false);
8603 }
8604
8605 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8606 {
8607         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8608                 ufshcd_setup_hba_vreg(hba, true);
8609 }
8610
8611 /**
8612  * ufshcd_suspend - helper function for suspend operations
8613  * @hba: per adapter instance
8614  * @pm_op: desired low power operation type
8615  *
8616  * This function will try to put the UFS device and link into low power
8617  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8618  * (System PM level).
8619  *
8620  * If this function is called during shutdown, it will make sure that
8621  * both UFS device and UFS link is powered off.
8622  *
8623  * NOTE: UFS device & link must be active before we enter in this function.
8624  *
8625  * Returns 0 for success and non-zero for failure
8626  */
8627 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8628 {
8629         int ret = 0;
8630         int check_for_bkops;
8631         enum ufs_pm_level pm_lvl;
8632         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8633         enum uic_link_state req_link_state;
8634
8635         hba->pm_op_in_progress = 1;
8636         if (!ufshcd_is_shutdown_pm(pm_op)) {
8637                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8638                          hba->rpm_lvl : hba->spm_lvl;
8639                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8640                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8641         } else {
8642                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8643                 req_link_state = UIC_LINK_OFF_STATE;
8644         }
8645
8646         /*
8647          * If we can't transition into any of the low power modes
8648          * just gate the clocks.
8649          */
8650         ufshcd_hold(hba, false);
8651         hba->clk_gating.is_suspended = true;
8652
8653         if (hba->clk_scaling.is_allowed) {
8654                 cancel_work_sync(&hba->clk_scaling.suspend_work);
8655                 cancel_work_sync(&hba->clk_scaling.resume_work);
8656                 ufshcd_suspend_clkscaling(hba);
8657         }
8658
8659         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8660                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8661                 goto disable_clks;
8662         }
8663
8664         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8665             (req_link_state == hba->uic_link_state))
8666                 goto enable_gating;
8667
8668         /* UFS device & link must be active before we enter in this function */
8669         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8670                 ret = -EINVAL;
8671                 goto enable_gating;
8672         }
8673
8674         if (ufshcd_is_runtime_pm(pm_op)) {
8675                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8676                         /*
8677                          * The device is idle with no requests in the queue,
8678                          * allow background operations if bkops status shows
8679                          * that performance might be impacted.
8680                          */
8681                         ret = ufshcd_urgent_bkops(hba);
8682                         if (ret)
8683                                 goto enable_gating;
8684                 } else {
8685                         /* make sure that auto bkops is disabled */
8686                         ufshcd_disable_auto_bkops(hba);
8687                 }
8688                 /*
8689                  * If device needs to do BKOP or WB buffer flush during
8690                  * Hibern8, keep device power mode as "active power mode"
8691                  * and VCC supply.
8692                  */
8693                 hba->dev_info.b_rpm_dev_flush_capable =
8694                         hba->auto_bkops_enabled ||
8695                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8696                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8697                         ufshcd_is_auto_hibern8_enabled(hba))) &&
8698                         ufshcd_wb_need_flush(hba));
8699         }
8700
8701         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8702                 if (!ufshcd_is_runtime_pm(pm_op))
8703                         /* ensure that bkops is disabled */
8704                         ufshcd_disable_auto_bkops(hba);
8705
8706                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8707                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8708                         if (ret)
8709                                 goto enable_gating;
8710                 }
8711         }
8712
8713         flush_work(&hba->eeh_work);
8714
8715         /*
8716          * In the case of DeepSleep, the device is expected to remain powered
8717          * with the link off, so do not check for bkops.
8718          */
8719         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8720         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
8721         if (ret)
8722                 goto set_dev_active;
8723
8724         ufshcd_vreg_set_lpm(hba);
8725
8726 disable_clks:
8727         /*
8728          * Call vendor specific suspend callback. As these callbacks may access
8729          * vendor specific host controller register space call them before the
8730          * host clocks are ON.
8731          */
8732         ret = ufshcd_vops_suspend(hba, pm_op);
8733         if (ret)
8734                 goto set_link_active;
8735         /*
8736          * Disable the host irq as host controller as there won't be any
8737          * host controller transaction expected till resume.
8738          */
8739         ufshcd_disable_irq(hba);
8740
8741         ufshcd_setup_clocks(hba, false);
8742
8743         if (ufshcd_is_clkgating_allowed(hba)) {
8744                 hba->clk_gating.state = CLKS_OFF;
8745                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8746                                         hba->clk_gating.state);
8747         }
8748
8749         /* Put the host controller in low power mode if possible */
8750         ufshcd_hba_vreg_set_lpm(hba);
8751         goto out;
8752
8753 set_link_active:
8754         if (hba->clk_scaling.is_allowed)
8755                 ufshcd_resume_clkscaling(hba);
8756         ufshcd_vreg_set_hpm(hba);
8757         /*
8758          * Device hardware reset is required to exit DeepSleep. Also, for
8759          * DeepSleep, the link is off so host reset and restore will be done
8760          * further below.
8761          */
8762         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8763                 ufshcd_device_reset(hba);
8764                 WARN_ON(!ufshcd_is_link_off(hba));
8765         }
8766         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8767                 ufshcd_set_link_active(hba);
8768         else if (ufshcd_is_link_off(hba))
8769                 ufshcd_host_reset_and_restore(hba);
8770 set_dev_active:
8771         /* Can also get here needing to exit DeepSleep */
8772         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8773                 ufshcd_device_reset(hba);
8774                 ufshcd_host_reset_and_restore(hba);
8775         }
8776         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8777                 ufshcd_disable_auto_bkops(hba);
8778 enable_gating:
8779         if (hba->clk_scaling.is_allowed)
8780                 ufshcd_resume_clkscaling(hba);
8781         hba->clk_gating.is_suspended = false;
8782         hba->dev_info.b_rpm_dev_flush_capable = false;
8783         ufshcd_release(hba);
8784 out:
8785         if (hba->dev_info.b_rpm_dev_flush_capable) {
8786                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8787                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8788         }
8789
8790         hba->pm_op_in_progress = 0;
8791
8792         if (ret)
8793                 ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
8794         return ret;
8795 }
8796
8797 /**
8798  * ufshcd_resume - helper function for resume operations
8799  * @hba: per adapter instance
8800  * @pm_op: runtime PM or system PM
8801  *
8802  * This function basically brings the UFS device, UniPro link and controller
8803  * to active state.
8804  *
8805  * Returns 0 for success and non-zero for failure
8806  */
8807 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8808 {
8809         int ret;
8810         enum uic_link_state old_link_state;
8811
8812         hba->pm_op_in_progress = 1;
8813         old_link_state = hba->uic_link_state;
8814
8815         ufshcd_hba_vreg_set_hpm(hba);
8816         /* Make sure clocks are enabled before accessing controller */
8817         ret = ufshcd_setup_clocks(hba, true);
8818         if (ret)
8819                 goto out;
8820
8821         /* enable the host irq as host controller would be active soon */
8822         ufshcd_enable_irq(hba);
8823
8824         ret = ufshcd_vreg_set_hpm(hba);
8825         if (ret)
8826                 goto disable_irq_and_vops_clks;
8827
8828         /*
8829          * Call vendor specific resume callback. As these callbacks may access
8830          * vendor specific host controller register space call them when the
8831          * host clocks are ON.
8832          */
8833         ret = ufshcd_vops_resume(hba, pm_op);
8834         if (ret)
8835                 goto disable_vreg;
8836
8837         /* For DeepSleep, the only supported option is to have the link off */
8838         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
8839
8840         if (ufshcd_is_link_hibern8(hba)) {
8841                 ret = ufshcd_uic_hibern8_exit(hba);
8842                 if (!ret) {
8843                         ufshcd_set_link_active(hba);
8844                 } else {
8845                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8846                                         __func__, ret);
8847                         goto vendor_suspend;
8848                 }
8849         } else if (ufshcd_is_link_off(hba)) {
8850                 /*
8851                  * A full initialization of the host and the device is
8852                  * required since the link was put to off during suspend.
8853                  * Note, in the case of DeepSleep, the device will exit
8854                  * DeepSleep due to device reset.
8855                  */
8856                 ret = ufshcd_reset_and_restore(hba);
8857                 /*
8858                  * ufshcd_reset_and_restore() should have already
8859                  * set the link state as active
8860                  */
8861                 if (ret || !ufshcd_is_link_active(hba))
8862                         goto vendor_suspend;
8863         }
8864
8865         if (!ufshcd_is_ufs_dev_active(hba)) {
8866                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8867                 if (ret)
8868                         goto set_old_link_state;
8869         }
8870
8871         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8872                 ufshcd_enable_auto_bkops(hba);
8873         else
8874                 /*
8875                  * If BKOPs operations are urgently needed at this moment then
8876                  * keep auto-bkops enabled or else disable it.
8877                  */
8878                 ufshcd_urgent_bkops(hba);
8879
8880         hba->clk_gating.is_suspended = false;
8881
8882         if (hba->clk_scaling.is_allowed)
8883                 ufshcd_resume_clkscaling(hba);
8884
8885         /* Enable Auto-Hibernate if configured */
8886         ufshcd_auto_hibern8_enable(hba);
8887
8888         if (hba->dev_info.b_rpm_dev_flush_capable) {
8889                 hba->dev_info.b_rpm_dev_flush_capable = false;
8890                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8891         }
8892
8893         /* Schedule clock gating in case of no access to UFS device yet */
8894         ufshcd_release(hba);
8895
8896         goto out;
8897
8898 set_old_link_state:
8899         ufshcd_link_state_transition(hba, old_link_state, 0);
8900 vendor_suspend:
8901         ufshcd_vops_suspend(hba, pm_op);
8902 disable_vreg:
8903         ufshcd_vreg_set_lpm(hba);
8904 disable_irq_and_vops_clks:
8905         ufshcd_disable_irq(hba);
8906         if (hba->clk_scaling.is_allowed)
8907                 ufshcd_suspend_clkscaling(hba);
8908         ufshcd_setup_clocks(hba, false);
8909         if (ufshcd_is_clkgating_allowed(hba)) {
8910                 hba->clk_gating.state = CLKS_OFF;
8911                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8912                                         hba->clk_gating.state);
8913         }
8914 out:
8915         hba->pm_op_in_progress = 0;
8916         if (ret)
8917                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
8918         return ret;
8919 }
8920
8921 /**
8922  * ufshcd_system_suspend - system suspend routine
8923  * @hba: per adapter instance
8924  *
8925  * Check the description of ufshcd_suspend() function for more details.
8926  *
8927  * Returns 0 for success and non-zero for failure
8928  */
8929 int ufshcd_system_suspend(struct ufs_hba *hba)
8930 {
8931         int ret = 0;
8932         ktime_t start = ktime_get();
8933
8934         down(&hba->eh_sem);
8935         if (!hba || !hba->is_powered)
8936                 return 0;
8937
8938         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8939              hba->curr_dev_pwr_mode) &&
8940             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8941              hba->uic_link_state))
8942                 goto out;
8943
8944         if (pm_runtime_suspended(hba->dev)) {
8945                 /*
8946                  * UFS device and/or UFS link low power states during runtime
8947                  * suspend seems to be different than what is expected during
8948                  * system suspend. Hence runtime resume the devic & link and
8949                  * let the system suspend low power states to take effect.
8950                  * TODO: If resume takes longer time, we might have optimize
8951                  * it in future by not resuming everything if possible.
8952                  */
8953                 ret = ufshcd_runtime_resume(hba);
8954                 if (ret)
8955                         goto out;
8956         }
8957
8958         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8959 out:
8960         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8961                 ktime_to_us(ktime_sub(ktime_get(), start)),
8962                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8963         if (!ret)
8964                 hba->is_sys_suspended = true;
8965         else
8966                 up(&hba->eh_sem);
8967         return ret;
8968 }
8969 EXPORT_SYMBOL(ufshcd_system_suspend);
8970
8971 /**
8972  * ufshcd_system_resume - system resume routine
8973  * @hba: per adapter instance
8974  *
8975  * Returns 0 for success and non-zero for failure
8976  */
8977
8978 int ufshcd_system_resume(struct ufs_hba *hba)
8979 {
8980         int ret = 0;
8981         ktime_t start = ktime_get();
8982
8983         if (!hba) {
8984                 up(&hba->eh_sem);
8985                 return -EINVAL;
8986         }
8987
8988         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8989                 /*
8990                  * Let the runtime resume take care of resuming
8991                  * if runtime suspended.
8992                  */
8993                 goto out;
8994         else
8995                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8996 out:
8997         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8998                 ktime_to_us(ktime_sub(ktime_get(), start)),
8999                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9000         if (!ret)
9001                 hba->is_sys_suspended = false;
9002         up(&hba->eh_sem);
9003         return ret;
9004 }
9005 EXPORT_SYMBOL(ufshcd_system_resume);
9006
9007 /**
9008  * ufshcd_runtime_suspend - runtime suspend routine
9009  * @hba: per adapter instance
9010  *
9011  * Check the description of ufshcd_suspend() function for more details.
9012  *
9013  * Returns 0 for success and non-zero for failure
9014  */
9015 int ufshcd_runtime_suspend(struct ufs_hba *hba)
9016 {
9017         int ret = 0;
9018         ktime_t start = ktime_get();
9019
9020         if (!hba)
9021                 return -EINVAL;
9022
9023         if (!hba->is_powered)
9024                 goto out;
9025         else
9026                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
9027 out:
9028         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9029                 ktime_to_us(ktime_sub(ktime_get(), start)),
9030                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9031         return ret;
9032 }
9033 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9034
9035 /**
9036  * ufshcd_runtime_resume - runtime resume routine
9037  * @hba: per adapter instance
9038  *
9039  * This function basically brings the UFS device, UniPro link and controller
9040  * to active state. Following operations are done in this function:
9041  *
9042  * 1. Turn on all the controller related clocks
9043  * 2. Bring the UniPro link out of Hibernate state
9044  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9045  *    to active state.
9046  * 4. If auto-bkops is enabled on the device, disable it.
9047  *
9048  * So following would be the possible power state after this function return
9049  * successfully:
9050  *      S1: UFS device in Active state with VCC rail ON
9051  *          UniPro link in Active state
9052  *          All the UFS/UniPro controller clocks are ON
9053  *
9054  * Returns 0 for success and non-zero for failure
9055  */
9056 int ufshcd_runtime_resume(struct ufs_hba *hba)
9057 {
9058         int ret = 0;
9059         ktime_t start = ktime_get();
9060
9061         if (!hba)
9062                 return -EINVAL;
9063
9064         if (!hba->is_powered)
9065                 goto out;
9066         else
9067                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9068 out:
9069         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9070                 ktime_to_us(ktime_sub(ktime_get(), start)),
9071                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9072         return ret;
9073 }
9074 EXPORT_SYMBOL(ufshcd_runtime_resume);
9075
9076 int ufshcd_runtime_idle(struct ufs_hba *hba)
9077 {
9078         return 0;
9079 }
9080 EXPORT_SYMBOL(ufshcd_runtime_idle);
9081
9082 /**
9083  * ufshcd_shutdown - shutdown routine
9084  * @hba: per adapter instance
9085  *
9086  * This function would power off both UFS device and UFS link.
9087  *
9088  * Returns 0 always to allow force shutdown even in case of errors.
9089  */
9090 int ufshcd_shutdown(struct ufs_hba *hba)
9091 {
9092         int ret = 0;
9093
9094         down(&hba->eh_sem);
9095         if (!hba->is_powered)
9096                 goto out;
9097
9098         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9099                 goto out;
9100
9101         pm_runtime_get_sync(hba->dev);
9102
9103         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9104 out:
9105         if (ret)
9106                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9107         hba->is_powered = false;
9108         up(&hba->eh_sem);
9109         /* allow force shutdown even in case of errors */
9110         return 0;
9111 }
9112 EXPORT_SYMBOL(ufshcd_shutdown);
9113
9114 /**
9115  * ufshcd_remove - de-allocate SCSI host and host memory space
9116  *              data structure memory
9117  * @hba: per adapter instance
9118  */
9119 void ufshcd_remove(struct ufs_hba *hba)
9120 {
9121         ufs_bsg_remove(hba);
9122         ufs_sysfs_remove_nodes(hba->dev);
9123         blk_cleanup_queue(hba->tmf_queue);
9124         blk_mq_free_tag_set(&hba->tmf_tag_set);
9125         blk_cleanup_queue(hba->cmd_queue);
9126         scsi_remove_host(hba->host);
9127         destroy_workqueue(hba->eh_wq);
9128         /* disable interrupts */
9129         ufshcd_disable_intr(hba, hba->intr_mask);
9130         ufshcd_hba_stop(hba);
9131
9132         ufshcd_exit_clk_scaling(hba);
9133         ufshcd_exit_clk_gating(hba);
9134         if (ufshcd_is_clkscaling_supported(hba))
9135                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9136         ufshcd_hba_exit(hba);
9137 }
9138 EXPORT_SYMBOL_GPL(ufshcd_remove);
9139
9140 /**
9141  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9142  * @hba: pointer to Host Bus Adapter (HBA)
9143  */
9144 void ufshcd_dealloc_host(struct ufs_hba *hba)
9145 {
9146         ufshcd_crypto_destroy_keyslot_manager(hba);
9147         scsi_host_put(hba->host);
9148 }
9149 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9150
9151 /**
9152  * ufshcd_set_dma_mask - Set dma mask based on the controller
9153  *                       addressing capability
9154  * @hba: per adapter instance
9155  *
9156  * Returns 0 for success, non-zero for failure
9157  */
9158 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9159 {
9160         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9161                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9162                         return 0;
9163         }
9164         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9165 }
9166
9167 /**
9168  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9169  * @dev: pointer to device handle
9170  * @hba_handle: driver private handle
9171  * Returns 0 on success, non-zero value on failure
9172  */
9173 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9174 {
9175         struct Scsi_Host *host;
9176         struct ufs_hba *hba;
9177         int err = 0;
9178
9179         if (!dev) {
9180                 dev_err(dev,
9181                 "Invalid memory reference for dev is NULL\n");
9182                 err = -ENODEV;
9183                 goto out_error;
9184         }
9185
9186         host = scsi_host_alloc(&ufshcd_driver_template,
9187                                 sizeof(struct ufs_hba));
9188         if (!host) {
9189                 dev_err(dev, "scsi_host_alloc failed\n");
9190                 err = -ENOMEM;
9191                 goto out_error;
9192         }
9193         hba = shost_priv(host);
9194         hba->host = host;
9195         hba->dev = dev;
9196         *hba_handle = hba;
9197         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9198
9199         INIT_LIST_HEAD(&hba->clk_list_head);
9200
9201 out_error:
9202         return err;
9203 }
9204 EXPORT_SYMBOL(ufshcd_alloc_host);
9205
9206 /* This function exists because blk_mq_alloc_tag_set() requires this. */
9207 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9208                                      const struct blk_mq_queue_data *qd)
9209 {
9210         WARN_ON_ONCE(true);
9211         return BLK_STS_NOTSUPP;
9212 }
9213
9214 static const struct blk_mq_ops ufshcd_tmf_ops = {
9215         .queue_rq = ufshcd_queue_tmf,
9216 };
9217
9218 /**
9219  * ufshcd_init - Driver initialization routine
9220  * @hba: per-adapter instance
9221  * @mmio_base: base register address
9222  * @irq: Interrupt line of device
9223  * Returns 0 on success, non-zero value on failure
9224  */
9225 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9226 {
9227         int err;
9228         struct Scsi_Host *host = hba->host;
9229         struct device *dev = hba->dev;
9230         char eh_wq_name[sizeof("ufs_eh_wq_00")];
9231
9232         if (!mmio_base) {
9233                 dev_err(hba->dev,
9234                 "Invalid memory reference for mmio_base is NULL\n");
9235                 err = -ENODEV;
9236                 goto out_error;
9237         }
9238
9239         hba->mmio_base = mmio_base;
9240         hba->irq = irq;
9241         hba->vps = &ufs_hba_vps;
9242
9243         err = ufshcd_hba_init(hba);
9244         if (err)
9245                 goto out_error;
9246
9247         /* Read capabilities registers */
9248         err = ufshcd_hba_capabilities(hba);
9249         if (err)
9250                 goto out_disable;
9251
9252         /* Get UFS version supported by the controller */
9253         hba->ufs_version = ufshcd_get_ufs_version(hba);
9254
9255         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9256             (hba->ufs_version != UFSHCI_VERSION_11) &&
9257             (hba->ufs_version != UFSHCI_VERSION_20) &&
9258             (hba->ufs_version != UFSHCI_VERSION_21))
9259                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
9260                         hba->ufs_version);
9261
9262         /* Get Interrupt bit mask per version */
9263         hba->intr_mask = ufshcd_get_intr_mask(hba);
9264
9265         err = ufshcd_set_dma_mask(hba);
9266         if (err) {
9267                 dev_err(hba->dev, "set dma mask failed\n");
9268                 goto out_disable;
9269         }
9270
9271         /* Allocate memory for host memory space */
9272         err = ufshcd_memory_alloc(hba);
9273         if (err) {
9274                 dev_err(hba->dev, "Memory allocation failed\n");
9275                 goto out_disable;
9276         }
9277
9278         /* Configure LRB */
9279         ufshcd_host_memory_configure(hba);
9280
9281         host->can_queue = hba->nutrs;
9282         host->cmd_per_lun = hba->nutrs;
9283         host->max_id = UFSHCD_MAX_ID;
9284         host->max_lun = UFS_MAX_LUNS;
9285         host->max_channel = UFSHCD_MAX_CHANNEL;
9286         host->unique_id = host->host_no;
9287         host->max_cmd_len = UFS_CDB_SIZE;
9288
9289         hba->max_pwr_info.is_valid = false;
9290
9291         /* Initialize work queues */
9292         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9293                  hba->host->host_no);
9294         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9295         if (!hba->eh_wq) {
9296                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9297                                 __func__);
9298                 err = -ENOMEM;
9299                 goto out_disable;
9300         }
9301         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9302         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9303
9304         sema_init(&hba->eh_sem, 1);
9305
9306         /* Initialize UIC command mutex */
9307         mutex_init(&hba->uic_cmd_mutex);
9308
9309         /* Initialize mutex for device management commands */
9310         mutex_init(&hba->dev_cmd.lock);
9311
9312         init_rwsem(&hba->clk_scaling_lock);
9313
9314         ufshcd_init_clk_gating(hba);
9315
9316         ufshcd_init_clk_scaling(hba);
9317
9318         /*
9319          * In order to avoid any spurious interrupt immediately after
9320          * registering UFS controller interrupt handler, clear any pending UFS
9321          * interrupt status and disable all the UFS interrupts.
9322          */
9323         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9324                       REG_INTERRUPT_STATUS);
9325         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9326         /*
9327          * Make sure that UFS interrupts are disabled and any pending interrupt
9328          * status is cleared before registering UFS interrupt handler.
9329          */
9330         mb();
9331
9332         /* IRQ registration */
9333         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9334         if (err) {
9335                 dev_err(hba->dev, "request irq failed\n");
9336                 goto exit_gating;
9337         } else {
9338                 hba->is_irq_enabled = true;
9339         }
9340
9341         err = scsi_add_host(host, hba->dev);
9342         if (err) {
9343                 dev_err(hba->dev, "scsi_add_host failed\n");
9344                 goto exit_gating;
9345         }
9346
9347         hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9348         if (IS_ERR(hba->cmd_queue)) {
9349                 err = PTR_ERR(hba->cmd_queue);
9350                 goto out_remove_scsi_host;
9351         }
9352
9353         hba->tmf_tag_set = (struct blk_mq_tag_set) {
9354                 .nr_hw_queues   = 1,
9355                 .queue_depth    = hba->nutmrs,
9356                 .ops            = &ufshcd_tmf_ops,
9357                 .flags          = BLK_MQ_F_NO_SCHED,
9358         };
9359         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9360         if (err < 0)
9361                 goto free_cmd_queue;
9362         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9363         if (IS_ERR(hba->tmf_queue)) {
9364                 err = PTR_ERR(hba->tmf_queue);
9365                 goto free_tmf_tag_set;
9366         }
9367
9368         /* Reset the attached device */
9369         ufshcd_device_reset(hba);
9370
9371         ufshcd_init_crypto(hba);
9372
9373         /* Host controller enable */
9374         err = ufshcd_hba_enable(hba);
9375         if (err) {
9376                 dev_err(hba->dev, "Host controller enable failed\n");
9377                 ufshcd_print_evt_hist(hba);
9378                 ufshcd_print_host_state(hba);
9379                 goto free_tmf_queue;
9380         }
9381
9382         /*
9383          * Set the default power management level for runtime and system PM.
9384          * Default power saving mode is to keep UFS link in Hibern8 state
9385          * and UFS device in sleep state.
9386          */
9387         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9388                                                 UFS_SLEEP_PWR_MODE,
9389                                                 UIC_LINK_HIBERN8_STATE);
9390         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9391                                                 UFS_SLEEP_PWR_MODE,
9392                                                 UIC_LINK_HIBERN8_STATE);
9393
9394         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9395                           ufshcd_rpm_dev_flush_recheck_work);
9396
9397         /* Set the default auto-hiberate idle timer value to 150 ms */
9398         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9399                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9400                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9401         }
9402
9403         /* Hold auto suspend until async scan completes */
9404         pm_runtime_get_sync(dev);
9405         atomic_set(&hba->scsi_block_reqs_cnt, 0);
9406         /*
9407          * We are assuming that device wasn't put in sleep/power-down
9408          * state exclusively during the boot stage before kernel.
9409          * This assumption helps avoid doing link startup twice during
9410          * ufshcd_probe_hba().
9411          */
9412         ufshcd_set_ufs_dev_active(hba);
9413
9414         async_schedule(ufshcd_async_scan, hba);
9415         ufs_sysfs_add_nodes(hba->dev);
9416
9417         return 0;
9418
9419 free_tmf_queue:
9420         blk_cleanup_queue(hba->tmf_queue);
9421 free_tmf_tag_set:
9422         blk_mq_free_tag_set(&hba->tmf_tag_set);
9423 free_cmd_queue:
9424         blk_cleanup_queue(hba->cmd_queue);
9425 out_remove_scsi_host:
9426         scsi_remove_host(hba->host);
9427 exit_gating:
9428         ufshcd_exit_clk_scaling(hba);
9429         ufshcd_exit_clk_gating(hba);
9430         destroy_workqueue(hba->eh_wq);
9431 out_disable:
9432         hba->is_irq_enabled = false;
9433         ufshcd_hba_exit(hba);
9434 out_error:
9435         return err;
9436 }
9437 EXPORT_SYMBOL_GPL(ufshcd_init);
9438
9439 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9440 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9441 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9442 MODULE_LICENSE("GPL");
9443 MODULE_VERSION(UFSHCD_DRIVER_VERSION);