c1c401b2b69d06bf8b6634bdd17715c093f9aef9
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include "ufshcd.h"
20 #include "ufs_quirks.h"
21 #include "unipro.h"
22 #include "ufs-sysfs.h"
23 #include "ufs_bsg.h"
24 #include "ufshcd-crypto.h"
25 #include <asm/unaligned.h>
26 #include <linux/blkdev.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/ufs.h>
30
31 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
32                                  UTP_TASK_REQ_COMPL |\
33                                  UFSHCD_ERROR_MASK)
34 /* UIC command timeout, unit: ms */
35 #define UIC_CMD_TIMEOUT 500
36
37 /* NOP OUT retries waiting for NOP IN response */
38 #define NOP_OUT_RETRIES    10
39 /* Timeout after 50 msecs if NOP OUT hangs without response */
40 #define NOP_OUT_TIMEOUT    50 /* msecs */
41
42 /* Query request retries */
43 #define QUERY_REQ_RETRIES 3
44 /* Query request timeout */
45 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
46
47 /* Task management command timeout */
48 #define TM_CMD_TIMEOUT  100 /* msecs */
49
50 /* maximum number of retries for a general UIC command  */
51 #define UFS_UIC_COMMAND_RETRIES 3
52
53 /* maximum number of link-startup retries */
54 #define DME_LINKSTARTUP_RETRIES 3
55
56 /* Maximum retries for Hibern8 enter */
57 #define UIC_HIBERN8_ENTER_RETRIES 3
58
59 /* maximum number of reset retries before giving up */
60 #define MAX_HOST_RESET_RETRIES 5
61
62 /* Expose the flag value from utp_upiu_query.value */
63 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
64
65 /* Interrupt aggregation default timeout, unit: 40us */
66 #define INT_AGGR_DEF_TO 0x02
67
68 /* default delay of autosuspend: 2000 ms */
69 #define RPM_AUTOSUSPEND_DELAY_MS 2000
70
71 /* Default delay of RPM device flush delayed work */
72 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
73
74 /* Default value of wait time before gating device ref clock */
75 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
76
77 /* Polling time to wait for fDeviceInit */
78 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
79
80 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
81         ({                                                              \
82                 int _ret;                                               \
83                 if (_on)                                                \
84                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
85                 else                                                    \
86                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
87                 _ret;                                                   \
88         })
89
90 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
91         size_t __len = (len);                                            \
92         print_hex_dump(KERN_ERR, prefix_str,                             \
93                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
94                        16, 4, buf, __len, false);                        \
95 } while (0)
96
97 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
98                      const char *prefix)
99 {
100         u32 *regs;
101         size_t pos;
102
103         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
104                 return -EINVAL;
105
106         regs = kzalloc(len, GFP_ATOMIC);
107         if (!regs)
108                 return -ENOMEM;
109
110         for (pos = 0; pos < len; pos += 4)
111                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
112
113         ufshcd_hex_dump(prefix, regs, len);
114         kfree(regs);
115
116         return 0;
117 }
118 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
119
120 enum {
121         UFSHCD_MAX_CHANNEL      = 0,
122         UFSHCD_MAX_ID           = 1,
123         UFSHCD_CMD_PER_LUN      = 32,
124         UFSHCD_CAN_QUEUE        = 32,
125 };
126
127 /* UFSHCD states */
128 enum {
129         UFSHCD_STATE_RESET,
130         UFSHCD_STATE_ERROR,
131         UFSHCD_STATE_OPERATIONAL,
132         UFSHCD_STATE_EH_SCHEDULED_FATAL,
133         UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
134 };
135
136 /* UFSHCD error handling flags */
137 enum {
138         UFSHCD_EH_IN_PROGRESS = (1 << 0),
139 };
140
141 /* UFSHCD UIC layer error flags */
142 enum {
143         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
144         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
145         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
146         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
147         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
148         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
149         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
150 };
151
152 #define ufshcd_set_eh_in_progress(h) \
153         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
154 #define ufshcd_eh_in_progress(h) \
155         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
156 #define ufshcd_clear_eh_in_progress(h) \
157         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
158
159 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
160         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
161         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
162         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
163         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
164         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
165         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
166         /*
167          * For DeepSleep, the link is first put in hibern8 and then off.
168          * Leaving the link in hibern8 is not supported.
169          */
170         {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
171 };
172
173 static inline enum ufs_dev_pwr_mode
174 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
175 {
176         return ufs_pm_lvl_states[lvl].dev_state;
177 }
178
179 static inline enum uic_link_state
180 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
181 {
182         return ufs_pm_lvl_states[lvl].link_state;
183 }
184
185 static inline enum ufs_pm_level
186 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
187                                         enum uic_link_state link_state)
188 {
189         enum ufs_pm_level lvl;
190
191         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
192                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
193                         (ufs_pm_lvl_states[lvl].link_state == link_state))
194                         return lvl;
195         }
196
197         /* if no match found, return the level 0 */
198         return UFS_PM_LVL_0;
199 }
200
201 static struct ufs_dev_fix ufs_fixups[] = {
202         /* UFS cards deviations table */
203         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
204                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
206                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
207                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
208                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
209         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
210                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
211         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
212                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
213         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
214                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
215         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
216                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
217         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
218                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
219         END_FIX
220 };
221
222 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
223 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
224 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
225 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
226 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
227 static void ufshcd_hba_exit(struct ufs_hba *hba);
228 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
229 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
230 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
231 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
232 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
233 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
234 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
235 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
236 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
237 static irqreturn_t ufshcd_intr(int irq, void *__hba);
238 static int ufshcd_change_power_mode(struct ufs_hba *hba,
239                              struct ufs_pa_layer_attr *pwr_mode);
240 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
241 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
242 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
243 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
244                                          struct ufs_vreg *vreg);
245 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
246 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
247 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
248 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
249 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
250 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
251 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
252 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
253
254 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
255 {
256         return tag >= 0 && tag < hba->nutrs;
257 }
258
259 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
260 {
261         if (!hba->is_irq_enabled) {
262                 enable_irq(hba->irq);
263                 hba->is_irq_enabled = true;
264         }
265 }
266
267 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
268 {
269         if (hba->is_irq_enabled) {
270                 disable_irq(hba->irq);
271                 hba->is_irq_enabled = false;
272         }
273 }
274
275 static inline void ufshcd_wb_config(struct ufs_hba *hba)
276 {
277         int ret;
278
279         if (!ufshcd_is_wb_allowed(hba))
280                 return;
281
282         ret = ufshcd_wb_ctrl(hba, true);
283         if (ret)
284                 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
285         else
286                 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
287         ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
288         if (ret)
289                 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
290                         __func__, ret);
291         ufshcd_wb_toggle_flush(hba, true);
292 }
293
294 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
295 {
296         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
297                 scsi_unblock_requests(hba->host);
298 }
299
300 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
301 {
302         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
303                 scsi_block_requests(hba->host);
304 }
305
306 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
307                 const char *str)
308 {
309         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
310
311         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
312 }
313
314 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
315                 const char *str)
316 {
317         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
318
319         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
320 }
321
322 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
323                 const char *str)
324 {
325         int off = (int)tag - hba->nutrs;
326         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
327
328         trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
329                         &descp->input_param1);
330 }
331
332 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
333                                          struct uic_command *ucmd,
334                                          const char *str)
335 {
336         u32 cmd;
337
338         if (!trace_ufshcd_uic_command_enabled())
339                 return;
340
341         if (!strcmp(str, "send"))
342                 cmd = ucmd->command;
343         else
344                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
345
346         trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
347                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
348                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
349                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
350 }
351
352 static void ufshcd_add_command_trace(struct ufs_hba *hba,
353                 unsigned int tag, const char *str)
354 {
355         sector_t lba = -1;
356         u8 opcode = 0, group_id = 0;
357         u32 intr, doorbell;
358         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
359         struct scsi_cmnd *cmd = lrbp->cmd;
360         int transfer_len = -1;
361
362         if (!trace_ufshcd_command_enabled()) {
363                 /* trace UPIU W/O tracing command */
364                 if (cmd)
365                         ufshcd_add_cmd_upiu_trace(hba, tag, str);
366                 return;
367         }
368
369         if (cmd) { /* data phase exists */
370                 /* trace UPIU also */
371                 ufshcd_add_cmd_upiu_trace(hba, tag, str);
372                 opcode = cmd->cmnd[0];
373                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
374                         /*
375                          * Currently we only fully trace read(10) and write(10)
376                          * commands
377                          */
378                         if (cmd->request && cmd->request->bio)
379                                 lba = cmd->request->bio->bi_iter.bi_sector;
380                         transfer_len = be32_to_cpu(
381                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
382                         if (opcode == WRITE_10)
383                                 group_id = lrbp->cmd->cmnd[6];
384                 } else if (opcode == UNMAP) {
385                         if (cmd->request) {
386                                 lba = scsi_get_lba(cmd);
387                                 transfer_len = blk_rq_bytes(cmd->request);
388                         }
389                 }
390         }
391
392         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
393         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
394         trace_ufshcd_command(dev_name(hba->dev), str, tag,
395                         doorbell, transfer_len, intr, lba, opcode, group_id);
396 }
397
398 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
399 {
400         struct ufs_clk_info *clki;
401         struct list_head *head = &hba->clk_list_head;
402
403         if (list_empty(head))
404                 return;
405
406         list_for_each_entry(clki, head, list) {
407                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
408                                 clki->max_freq)
409                         dev_err(hba->dev, "clk: %s, rate: %u\n",
410                                         clki->name, clki->curr_freq);
411         }
412 }
413
414 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
415                              char *err_name)
416 {
417         int i;
418         bool found = false;
419         struct ufs_event_hist *e;
420
421         if (id >= UFS_EVT_CNT)
422                 return;
423
424         e = &hba->ufs_stats.event[id];
425
426         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
427                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
428
429                 if (e->tstamp[p] == 0)
430                         continue;
431                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
432                         e->val[p], ktime_to_us(e->tstamp[p]));
433                 found = true;
434         }
435
436         if (!found)
437                 dev_err(hba->dev, "No record of %s\n", err_name);
438 }
439
440 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
441 {
442         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
443
444         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
445         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
446         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
447         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
448         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
449         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
450                          "auto_hibern8_err");
451         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
452         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
453                          "link_startup_fail");
454         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
455         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
456                          "suspend_fail");
457         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
458         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
459         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
460
461         ufshcd_vops_dbg_register_dump(hba);
462 }
463
464 static
465 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
466 {
467         struct ufshcd_lrb *lrbp;
468         int prdt_length;
469         int tag;
470
471         for_each_set_bit(tag, &bitmap, hba->nutrs) {
472                 lrbp = &hba->lrb[tag];
473
474                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
475                                 tag, ktime_to_us(lrbp->issue_time_stamp));
476                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
477                                 tag, ktime_to_us(lrbp->compl_time_stamp));
478                 dev_err(hba->dev,
479                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
480                         tag, (u64)lrbp->utrd_dma_addr);
481
482                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
483                                 sizeof(struct utp_transfer_req_desc));
484                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
485                         (u64)lrbp->ucd_req_dma_addr);
486                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
487                                 sizeof(struct utp_upiu_req));
488                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
489                         (u64)lrbp->ucd_rsp_dma_addr);
490                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
491                                 sizeof(struct utp_upiu_rsp));
492
493                 prdt_length = le16_to_cpu(
494                         lrbp->utr_descriptor_ptr->prd_table_length);
495                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
496                         prdt_length /= sizeof(struct ufshcd_sg_entry);
497
498                 dev_err(hba->dev,
499                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
500                         tag, prdt_length,
501                         (u64)lrbp->ucd_prdt_dma_addr);
502
503                 if (pr_prdt)
504                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
505                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
506         }
507 }
508
509 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
510 {
511         int tag;
512
513         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
514                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
515
516                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
517                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
518         }
519 }
520
521 static void ufshcd_print_host_state(struct ufs_hba *hba)
522 {
523         struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
524
525         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
526         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
527                 hba->outstanding_reqs, hba->outstanding_tasks);
528         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
529                 hba->saved_err, hba->saved_uic_err);
530         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
531                 hba->curr_dev_pwr_mode, hba->uic_link_state);
532         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
533                 hba->pm_op_in_progress, hba->is_sys_suspended);
534         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
535                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
536         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
537         dev_err(hba->dev,
538                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
539                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
540                 hba->ufs_stats.hibern8_exit_cnt);
541         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
542                 ktime_to_us(hba->ufs_stats.last_intr_ts),
543                 hba->ufs_stats.last_intr_status);
544         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
545                 hba->eh_flags, hba->req_abort_count);
546         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
547                 hba->ufs_version, hba->capabilities, hba->caps);
548         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
549                 hba->dev_quirks);
550         if (sdev_ufs)
551                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
552                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
553
554         ufshcd_print_clk_freqs(hba);
555 }
556
557 /**
558  * ufshcd_print_pwr_info - print power params as saved in hba
559  * power info
560  * @hba: per-adapter instance
561  */
562 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
563 {
564         static const char * const names[] = {
565                 "INVALID MODE",
566                 "FAST MODE",
567                 "SLOW_MODE",
568                 "INVALID MODE",
569                 "FASTAUTO_MODE",
570                 "SLOWAUTO_MODE",
571                 "INVALID MODE",
572         };
573
574         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
575                  __func__,
576                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
577                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
578                  names[hba->pwr_info.pwr_rx],
579                  names[hba->pwr_info.pwr_tx],
580                  hba->pwr_info.hs_rate);
581 }
582
583 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
584 {
585         if (!us)
586                 return;
587
588         if (us < 10)
589                 udelay(us);
590         else
591                 usleep_range(us, us + tolerance);
592 }
593 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
594
595 /**
596  * ufshcd_wait_for_register - wait for register value to change
597  * @hba: per-adapter interface
598  * @reg: mmio register offset
599  * @mask: mask to apply to the read register value
600  * @val: value to wait for
601  * @interval_us: polling interval in microseconds
602  * @timeout_ms: timeout in milliseconds
603  *
604  * Return:
605  * -ETIMEDOUT on error, zero on success.
606  */
607 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
608                                 u32 val, unsigned long interval_us,
609                                 unsigned long timeout_ms)
610 {
611         int err = 0;
612         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
613
614         /* ignore bits that we don't intend to wait on */
615         val = val & mask;
616
617         while ((ufshcd_readl(hba, reg) & mask) != val) {
618                 usleep_range(interval_us, interval_us + 50);
619                 if (time_after(jiffies, timeout)) {
620                         if ((ufshcd_readl(hba, reg) & mask) != val)
621                                 err = -ETIMEDOUT;
622                         break;
623                 }
624         }
625
626         return err;
627 }
628
629 /**
630  * ufshcd_get_intr_mask - Get the interrupt bit mask
631  * @hba: Pointer to adapter instance
632  *
633  * Returns interrupt bit mask per version
634  */
635 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
636 {
637         u32 intr_mask = 0;
638
639         switch (hba->ufs_version) {
640         case UFSHCI_VERSION_10:
641                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
642                 break;
643         case UFSHCI_VERSION_11:
644         case UFSHCI_VERSION_20:
645                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
646                 break;
647         case UFSHCI_VERSION_21:
648         default:
649                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
650                 break;
651         }
652
653         return intr_mask;
654 }
655
656 /**
657  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
658  * @hba: Pointer to adapter instance
659  *
660  * Returns UFSHCI version supported by the controller
661  */
662 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
663 {
664         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
665                 return ufshcd_vops_get_ufs_hci_version(hba);
666
667         return ufshcd_readl(hba, REG_UFS_VERSION);
668 }
669
670 /**
671  * ufshcd_is_device_present - Check if any device connected to
672  *                            the host controller
673  * @hba: pointer to adapter instance
674  *
675  * Returns true if device present, false if no device detected
676  */
677 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
678 {
679         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
680                                                 DEVICE_PRESENT) ? true : false;
681 }
682
683 /**
684  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
685  * @lrbp: pointer to local command reference block
686  *
687  * This function is used to get the OCS field from UTRD
688  * Returns the OCS field in the UTRD
689  */
690 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
691 {
692         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
693 }
694
695 /**
696  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
697  * @hba: per adapter instance
698  * @pos: position of the bit to be cleared
699  */
700 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
701 {
702         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
703                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
704         else
705                 ufshcd_writel(hba, ~(1 << pos),
706                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
707 }
708
709 /**
710  * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
711  * @hba: per adapter instance
712  * @pos: position of the bit to be cleared
713  */
714 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
715 {
716         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
717                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
718         else
719                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
720 }
721
722 /**
723  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
724  * @hba: per adapter instance
725  * @tag: position of the bit to be cleared
726  */
727 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
728 {
729         __clear_bit(tag, &hba->outstanding_reqs);
730 }
731
732 /**
733  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
734  * @reg: Register value of host controller status
735  *
736  * Returns integer, 0 on Success and positive value if failed
737  */
738 static inline int ufshcd_get_lists_status(u32 reg)
739 {
740         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
741 }
742
743 /**
744  * ufshcd_get_uic_cmd_result - Get the UIC command result
745  * @hba: Pointer to adapter instance
746  *
747  * This function gets the result of UIC command completion
748  * Returns 0 on success, non zero value on error
749  */
750 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
751 {
752         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
753                MASK_UIC_COMMAND_RESULT;
754 }
755
756 /**
757  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
758  * @hba: Pointer to adapter instance
759  *
760  * This function gets UIC command argument3
761  * Returns 0 on success, non zero value on error
762  */
763 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
764 {
765         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
766 }
767
768 /**
769  * ufshcd_get_req_rsp - returns the TR response transaction type
770  * @ucd_rsp_ptr: pointer to response UPIU
771  */
772 static inline int
773 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
774 {
775         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
776 }
777
778 /**
779  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
780  * @ucd_rsp_ptr: pointer to response UPIU
781  *
782  * This function gets the response status and scsi_status from response UPIU
783  * Returns the response result code.
784  */
785 static inline int
786 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
787 {
788         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
789 }
790
791 /*
792  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
793  *                              from response UPIU
794  * @ucd_rsp_ptr: pointer to response UPIU
795  *
796  * Return the data segment length.
797  */
798 static inline unsigned int
799 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
800 {
801         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
802                 MASK_RSP_UPIU_DATA_SEG_LEN;
803 }
804
805 /**
806  * ufshcd_is_exception_event - Check if the device raised an exception event
807  * @ucd_rsp_ptr: pointer to response UPIU
808  *
809  * The function checks if the device raised an exception event indicated in
810  * the Device Information field of response UPIU.
811  *
812  * Returns true if exception is raised, false otherwise.
813  */
814 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
815 {
816         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
817                         MASK_RSP_EXCEPTION_EVENT ? true : false;
818 }
819
820 /**
821  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
822  * @hba: per adapter instance
823  */
824 static inline void
825 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
826 {
827         ufshcd_writel(hba, INT_AGGR_ENABLE |
828                       INT_AGGR_COUNTER_AND_TIMER_RESET,
829                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
830 }
831
832 /**
833  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
834  * @hba: per adapter instance
835  * @cnt: Interrupt aggregation counter threshold
836  * @tmout: Interrupt aggregation timeout value
837  */
838 static inline void
839 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
840 {
841         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
842                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
843                       INT_AGGR_TIMEOUT_VAL(tmout),
844                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
845 }
846
847 /**
848  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
849  * @hba: per adapter instance
850  */
851 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
852 {
853         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
854 }
855
856 /**
857  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
858  *                      When run-stop registers are set to 1, it indicates the
859  *                      host controller that it can process the requests
860  * @hba: per adapter instance
861  */
862 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
863 {
864         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
865                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
866         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
867                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
868 }
869
870 /**
871  * ufshcd_hba_start - Start controller initialization sequence
872  * @hba: per adapter instance
873  */
874 static inline void ufshcd_hba_start(struct ufs_hba *hba)
875 {
876         u32 val = CONTROLLER_ENABLE;
877
878         if (ufshcd_crypto_enable(hba))
879                 val |= CRYPTO_GENERAL_ENABLE;
880
881         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
882 }
883
884 /**
885  * ufshcd_is_hba_active - Get controller state
886  * @hba: per adapter instance
887  *
888  * Returns false if controller is active, true otherwise
889  */
890 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
891 {
892         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
893                 ? false : true;
894 }
895
896 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
897 {
898         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
899         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
900             (hba->ufs_version == UFSHCI_VERSION_11))
901                 return UFS_UNIPRO_VER_1_41;
902         else
903                 return UFS_UNIPRO_VER_1_6;
904 }
905 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
906
907 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
908 {
909         /*
910          * If both host and device support UniPro ver1.6 or later, PA layer
911          * parameters tuning happens during link startup itself.
912          *
913          * We can manually tune PA layer parameters if either host or device
914          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
915          * logic simple, we will only do manual tuning if local unipro version
916          * doesn't support ver1.6 or later.
917          */
918         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
919                 return true;
920         else
921                 return false;
922 }
923
924 /**
925  * ufshcd_set_clk_freq - set UFS controller clock frequencies
926  * @hba: per adapter instance
927  * @scale_up: If True, set max possible frequency othewise set low frequency
928  *
929  * Returns 0 if successful
930  * Returns < 0 for any other errors
931  */
932 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
933 {
934         int ret = 0;
935         struct ufs_clk_info *clki;
936         struct list_head *head = &hba->clk_list_head;
937
938         if (list_empty(head))
939                 goto out;
940
941         list_for_each_entry(clki, head, list) {
942                 if (!IS_ERR_OR_NULL(clki->clk)) {
943                         if (scale_up && clki->max_freq) {
944                                 if (clki->curr_freq == clki->max_freq)
945                                         continue;
946
947                                 ret = clk_set_rate(clki->clk, clki->max_freq);
948                                 if (ret) {
949                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
950                                                 __func__, clki->name,
951                                                 clki->max_freq, ret);
952                                         break;
953                                 }
954                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
955                                                 "scaled up", clki->name,
956                                                 clki->curr_freq,
957                                                 clki->max_freq);
958
959                                 clki->curr_freq = clki->max_freq;
960
961                         } else if (!scale_up && clki->min_freq) {
962                                 if (clki->curr_freq == clki->min_freq)
963                                         continue;
964
965                                 ret = clk_set_rate(clki->clk, clki->min_freq);
966                                 if (ret) {
967                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
968                                                 __func__, clki->name,
969                                                 clki->min_freq, ret);
970                                         break;
971                                 }
972                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
973                                                 "scaled down", clki->name,
974                                                 clki->curr_freq,
975                                                 clki->min_freq);
976                                 clki->curr_freq = clki->min_freq;
977                         }
978                 }
979                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
980                                 clki->name, clk_get_rate(clki->clk));
981         }
982
983 out:
984         return ret;
985 }
986
987 /**
988  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
989  * @hba: per adapter instance
990  * @scale_up: True if scaling up and false if scaling down
991  *
992  * Returns 0 if successful
993  * Returns < 0 for any other errors
994  */
995 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
996 {
997         int ret = 0;
998         ktime_t start = ktime_get();
999
1000         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1001         if (ret)
1002                 goto out;
1003
1004         ret = ufshcd_set_clk_freq(hba, scale_up);
1005         if (ret)
1006                 goto out;
1007
1008         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1009         if (ret)
1010                 ufshcd_set_clk_freq(hba, !scale_up);
1011
1012 out:
1013         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1014                         (scale_up ? "up" : "down"),
1015                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1016         return ret;
1017 }
1018
1019 /**
1020  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1021  * @hba: per adapter instance
1022  * @scale_up: True if scaling up and false if scaling down
1023  *
1024  * Returns true if scaling is required, false otherwise.
1025  */
1026 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1027                                                bool scale_up)
1028 {
1029         struct ufs_clk_info *clki;
1030         struct list_head *head = &hba->clk_list_head;
1031
1032         if (list_empty(head))
1033                 return false;
1034
1035         list_for_each_entry(clki, head, list) {
1036                 if (!IS_ERR_OR_NULL(clki->clk)) {
1037                         if (scale_up && clki->max_freq) {
1038                                 if (clki->curr_freq == clki->max_freq)
1039                                         continue;
1040                                 return true;
1041                         } else if (!scale_up && clki->min_freq) {
1042                                 if (clki->curr_freq == clki->min_freq)
1043                                         continue;
1044                                 return true;
1045                         }
1046                 }
1047         }
1048
1049         return false;
1050 }
1051
1052 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1053                                         u64 wait_timeout_us)
1054 {
1055         unsigned long flags;
1056         int ret = 0;
1057         u32 tm_doorbell;
1058         u32 tr_doorbell;
1059         bool timeout = false, do_last_check = false;
1060         ktime_t start;
1061
1062         ufshcd_hold(hba, false);
1063         spin_lock_irqsave(hba->host->host_lock, flags);
1064         /*
1065          * Wait for all the outstanding tasks/transfer requests.
1066          * Verify by checking the doorbell registers are clear.
1067          */
1068         start = ktime_get();
1069         do {
1070                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1071                         ret = -EBUSY;
1072                         goto out;
1073                 }
1074
1075                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1076                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1077                 if (!tm_doorbell && !tr_doorbell) {
1078                         timeout = false;
1079                         break;
1080                 } else if (do_last_check) {
1081                         break;
1082                 }
1083
1084                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1085                 schedule();
1086                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1087                     wait_timeout_us) {
1088                         timeout = true;
1089                         /*
1090                          * We might have scheduled out for long time so make
1091                          * sure to check if doorbells are cleared by this time
1092                          * or not.
1093                          */
1094                         do_last_check = true;
1095                 }
1096                 spin_lock_irqsave(hba->host->host_lock, flags);
1097         } while (tm_doorbell || tr_doorbell);
1098
1099         if (timeout) {
1100                 dev_err(hba->dev,
1101                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1102                         __func__, tm_doorbell, tr_doorbell);
1103                 ret = -EBUSY;
1104         }
1105 out:
1106         spin_unlock_irqrestore(hba->host->host_lock, flags);
1107         ufshcd_release(hba);
1108         return ret;
1109 }
1110
1111 /**
1112  * ufshcd_scale_gear - scale up/down UFS gear
1113  * @hba: per adapter instance
1114  * @scale_up: True for scaling up gear and false for scaling down
1115  *
1116  * Returns 0 for success,
1117  * Returns -EBUSY if scaling can't happen at this time
1118  * Returns non-zero for any other errors
1119  */
1120 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1121 {
1122         int ret = 0;
1123         struct ufs_pa_layer_attr new_pwr_info;
1124
1125         if (scale_up) {
1126                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1127                        sizeof(struct ufs_pa_layer_attr));
1128         } else {
1129                 memcpy(&new_pwr_info, &hba->pwr_info,
1130                        sizeof(struct ufs_pa_layer_attr));
1131
1132                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1133                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1134                         /* save the current power mode */
1135                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1136                                 &hba->pwr_info,
1137                                 sizeof(struct ufs_pa_layer_attr));
1138
1139                         /* scale down gear */
1140                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1141                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1142                 }
1143         }
1144
1145         /* check if the power mode needs to be changed or not? */
1146         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1147         if (ret)
1148                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1149                         __func__, ret,
1150                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1151                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1152
1153         return ret;
1154 }
1155
1156 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1157 {
1158         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1159         int ret = 0;
1160         /*
1161          * make sure that there are no outstanding requests when
1162          * clock scaling is in progress
1163          */
1164         ufshcd_scsi_block_requests(hba);
1165         down_write(&hba->clk_scaling_lock);
1166         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1167                 ret = -EBUSY;
1168                 up_write(&hba->clk_scaling_lock);
1169                 ufshcd_scsi_unblock_requests(hba);
1170         }
1171
1172         return ret;
1173 }
1174
1175 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1176 {
1177         up_write(&hba->clk_scaling_lock);
1178         ufshcd_scsi_unblock_requests(hba);
1179 }
1180
1181 /**
1182  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1183  * @hba: per adapter instance
1184  * @scale_up: True for scaling up and false for scalin down
1185  *
1186  * Returns 0 for success,
1187  * Returns -EBUSY if scaling can't happen at this time
1188  * Returns non-zero for any other errors
1189  */
1190 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1191 {
1192         int ret = 0;
1193
1194         /* let's not get into low power until clock scaling is completed */
1195         ufshcd_hold(hba, false);
1196
1197         ret = ufshcd_clock_scaling_prepare(hba);
1198         if (ret)
1199                 goto out;
1200
1201         /* scale down the gear before scaling down clocks */
1202         if (!scale_up) {
1203                 ret = ufshcd_scale_gear(hba, false);
1204                 if (ret)
1205                         goto out_unprepare;
1206         }
1207
1208         ret = ufshcd_scale_clks(hba, scale_up);
1209         if (ret) {
1210                 if (!scale_up)
1211                         ufshcd_scale_gear(hba, true);
1212                 goto out_unprepare;
1213         }
1214
1215         /* scale up the gear after scaling up clocks */
1216         if (scale_up) {
1217                 ret = ufshcd_scale_gear(hba, true);
1218                 if (ret) {
1219                         ufshcd_scale_clks(hba, false);
1220                         goto out_unprepare;
1221                 }
1222         }
1223
1224         /* Enable Write Booster if we have scaled up else disable it */
1225         up_write(&hba->clk_scaling_lock);
1226         ufshcd_wb_ctrl(hba, scale_up);
1227         down_write(&hba->clk_scaling_lock);
1228
1229 out_unprepare:
1230         ufshcd_clock_scaling_unprepare(hba);
1231 out:
1232         ufshcd_release(hba);
1233         return ret;
1234 }
1235
1236 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1237 {
1238         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1239                                            clk_scaling.suspend_work);
1240         unsigned long irq_flags;
1241
1242         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1243         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1244                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1245                 return;
1246         }
1247         hba->clk_scaling.is_suspended = true;
1248         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1249
1250         __ufshcd_suspend_clkscaling(hba);
1251 }
1252
1253 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1254 {
1255         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1256                                            clk_scaling.resume_work);
1257         unsigned long irq_flags;
1258
1259         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1260         if (!hba->clk_scaling.is_suspended) {
1261                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1262                 return;
1263         }
1264         hba->clk_scaling.is_suspended = false;
1265         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1266
1267         devfreq_resume_device(hba->devfreq);
1268 }
1269
1270 static int ufshcd_devfreq_target(struct device *dev,
1271                                 unsigned long *freq, u32 flags)
1272 {
1273         int ret = 0;
1274         struct ufs_hba *hba = dev_get_drvdata(dev);
1275         ktime_t start;
1276         bool scale_up, sched_clk_scaling_suspend_work = false;
1277         struct list_head *clk_list = &hba->clk_list_head;
1278         struct ufs_clk_info *clki;
1279         unsigned long irq_flags;
1280
1281         if (!ufshcd_is_clkscaling_supported(hba))
1282                 return -EINVAL;
1283
1284         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1285         /* Override with the closest supported frequency */
1286         *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1287         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1288         if (ufshcd_eh_in_progress(hba)) {
1289                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1290                 return 0;
1291         }
1292
1293         if (!hba->clk_scaling.active_reqs)
1294                 sched_clk_scaling_suspend_work = true;
1295
1296         if (list_empty(clk_list)) {
1297                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1298                 goto out;
1299         }
1300
1301         /* Decide based on the rounded-off frequency and update */
1302         scale_up = (*freq == clki->max_freq) ? true : false;
1303         if (!scale_up)
1304                 *freq = clki->min_freq;
1305         /* Update the frequency */
1306         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1307                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1308                 ret = 0;
1309                 goto out; /* no state change required */
1310         }
1311         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1312
1313         start = ktime_get();
1314         ret = ufshcd_devfreq_scale(hba, scale_up);
1315
1316         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1317                 (scale_up ? "up" : "down"),
1318                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1319
1320 out:
1321         if (sched_clk_scaling_suspend_work)
1322                 queue_work(hba->clk_scaling.workq,
1323                            &hba->clk_scaling.suspend_work);
1324
1325         return ret;
1326 }
1327
1328 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1329 {
1330         int *busy = priv;
1331
1332         WARN_ON_ONCE(reserved);
1333         (*busy)++;
1334         return false;
1335 }
1336
1337 /* Whether or not any tag is in use by a request that is in progress. */
1338 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1339 {
1340         struct request_queue *q = hba->cmd_queue;
1341         int busy = 0;
1342
1343         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1344         return busy;
1345 }
1346
1347 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1348                 struct devfreq_dev_status *stat)
1349 {
1350         struct ufs_hba *hba = dev_get_drvdata(dev);
1351         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1352         unsigned long flags;
1353         struct list_head *clk_list = &hba->clk_list_head;
1354         struct ufs_clk_info *clki;
1355         ktime_t curr_t;
1356
1357         if (!ufshcd_is_clkscaling_supported(hba))
1358                 return -EINVAL;
1359
1360         memset(stat, 0, sizeof(*stat));
1361
1362         spin_lock_irqsave(hba->host->host_lock, flags);
1363         curr_t = ktime_get();
1364         if (!scaling->window_start_t)
1365                 goto start_window;
1366
1367         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1368         /*
1369          * If current frequency is 0, then the ondemand governor considers
1370          * there's no initial frequency set. And it always requests to set
1371          * to max. frequency.
1372          */
1373         stat->current_frequency = clki->curr_freq;
1374         if (scaling->is_busy_started)
1375                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1376                                 scaling->busy_start_t);
1377
1378         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1379         stat->busy_time = scaling->tot_busy_t;
1380 start_window:
1381         scaling->window_start_t = curr_t;
1382         scaling->tot_busy_t = 0;
1383
1384         if (hba->outstanding_reqs) {
1385                 scaling->busy_start_t = curr_t;
1386                 scaling->is_busy_started = true;
1387         } else {
1388                 scaling->busy_start_t = 0;
1389                 scaling->is_busy_started = false;
1390         }
1391         spin_unlock_irqrestore(hba->host->host_lock, flags);
1392         return 0;
1393 }
1394
1395 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1396 {
1397         struct list_head *clk_list = &hba->clk_list_head;
1398         struct ufs_clk_info *clki;
1399         struct devfreq *devfreq;
1400         int ret;
1401
1402         /* Skip devfreq if we don't have any clocks in the list */
1403         if (list_empty(clk_list))
1404                 return 0;
1405
1406         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1407         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1408         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1409
1410         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1411                                          &hba->vps->ondemand_data);
1412         devfreq = devfreq_add_device(hba->dev,
1413                         &hba->vps->devfreq_profile,
1414                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1415                         &hba->vps->ondemand_data);
1416         if (IS_ERR(devfreq)) {
1417                 ret = PTR_ERR(devfreq);
1418                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1419
1420                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1421                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1422                 return ret;
1423         }
1424
1425         hba->devfreq = devfreq;
1426
1427         return 0;
1428 }
1429
1430 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1431 {
1432         struct list_head *clk_list = &hba->clk_list_head;
1433         struct ufs_clk_info *clki;
1434
1435         if (!hba->devfreq)
1436                 return;
1437
1438         devfreq_remove_device(hba->devfreq);
1439         hba->devfreq = NULL;
1440
1441         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1442         dev_pm_opp_remove(hba->dev, clki->min_freq);
1443         dev_pm_opp_remove(hba->dev, clki->max_freq);
1444 }
1445
1446 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1447 {
1448         unsigned long flags;
1449
1450         devfreq_suspend_device(hba->devfreq);
1451         spin_lock_irqsave(hba->host->host_lock, flags);
1452         hba->clk_scaling.window_start_t = 0;
1453         spin_unlock_irqrestore(hba->host->host_lock, flags);
1454 }
1455
1456 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1457 {
1458         unsigned long flags;
1459         bool suspend = false;
1460
1461         if (!ufshcd_is_clkscaling_supported(hba))
1462                 return;
1463
1464         spin_lock_irqsave(hba->host->host_lock, flags);
1465         if (!hba->clk_scaling.is_suspended) {
1466                 suspend = true;
1467                 hba->clk_scaling.is_suspended = true;
1468         }
1469         spin_unlock_irqrestore(hba->host->host_lock, flags);
1470
1471         if (suspend)
1472                 __ufshcd_suspend_clkscaling(hba);
1473 }
1474
1475 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1476 {
1477         unsigned long flags;
1478         bool resume = false;
1479
1480         if (!ufshcd_is_clkscaling_supported(hba))
1481                 return;
1482
1483         spin_lock_irqsave(hba->host->host_lock, flags);
1484         if (hba->clk_scaling.is_suspended) {
1485                 resume = true;
1486                 hba->clk_scaling.is_suspended = false;
1487         }
1488         spin_unlock_irqrestore(hba->host->host_lock, flags);
1489
1490         if (resume)
1491                 devfreq_resume_device(hba->devfreq);
1492 }
1493
1494 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1495                 struct device_attribute *attr, char *buf)
1496 {
1497         struct ufs_hba *hba = dev_get_drvdata(dev);
1498
1499         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1500 }
1501
1502 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1503                 struct device_attribute *attr, const char *buf, size_t count)
1504 {
1505         struct ufs_hba *hba = dev_get_drvdata(dev);
1506         u32 value;
1507         int err;
1508
1509         if (kstrtou32(buf, 0, &value))
1510                 return -EINVAL;
1511
1512         value = !!value;
1513         if (value == hba->clk_scaling.is_allowed)
1514                 goto out;
1515
1516         pm_runtime_get_sync(hba->dev);
1517         ufshcd_hold(hba, false);
1518
1519         cancel_work_sync(&hba->clk_scaling.suspend_work);
1520         cancel_work_sync(&hba->clk_scaling.resume_work);
1521
1522         hba->clk_scaling.is_allowed = value;
1523
1524         if (value) {
1525                 ufshcd_resume_clkscaling(hba);
1526         } else {
1527                 ufshcd_suspend_clkscaling(hba);
1528                 err = ufshcd_devfreq_scale(hba, true);
1529                 if (err)
1530                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1531                                         __func__, err);
1532         }
1533
1534         ufshcd_release(hba);
1535         pm_runtime_put_sync(hba->dev);
1536 out:
1537         return count;
1538 }
1539
1540 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1541 {
1542         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1543         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1544         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1545         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1546         hba->clk_scaling.enable_attr.attr.mode = 0644;
1547         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1548                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1549 }
1550
1551 static void ufshcd_ungate_work(struct work_struct *work)
1552 {
1553         int ret;
1554         unsigned long flags;
1555         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1556                         clk_gating.ungate_work);
1557
1558         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1559
1560         spin_lock_irqsave(hba->host->host_lock, flags);
1561         if (hba->clk_gating.state == CLKS_ON) {
1562                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1563                 goto unblock_reqs;
1564         }
1565
1566         spin_unlock_irqrestore(hba->host->host_lock, flags);
1567         ufshcd_hba_vreg_set_hpm(hba);
1568         ufshcd_setup_clocks(hba, true);
1569
1570         ufshcd_enable_irq(hba);
1571
1572         /* Exit from hibern8 */
1573         if (ufshcd_can_hibern8_during_gating(hba)) {
1574                 /* Prevent gating in this path */
1575                 hba->clk_gating.is_suspended = true;
1576                 if (ufshcd_is_link_hibern8(hba)) {
1577                         ret = ufshcd_uic_hibern8_exit(hba);
1578                         if (ret)
1579                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1580                                         __func__, ret);
1581                         else
1582                                 ufshcd_set_link_active(hba);
1583                 }
1584                 hba->clk_gating.is_suspended = false;
1585         }
1586 unblock_reqs:
1587         ufshcd_scsi_unblock_requests(hba);
1588 }
1589
1590 /**
1591  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1592  * Also, exit from hibern8 mode and set the link as active.
1593  * @hba: per adapter instance
1594  * @async: This indicates whether caller should ungate clocks asynchronously.
1595  */
1596 int ufshcd_hold(struct ufs_hba *hba, bool async)
1597 {
1598         int rc = 0;
1599         bool flush_result;
1600         unsigned long flags;
1601
1602         if (!ufshcd_is_clkgating_allowed(hba))
1603                 goto out;
1604         spin_lock_irqsave(hba->host->host_lock, flags);
1605         hba->clk_gating.active_reqs++;
1606
1607 start:
1608         switch (hba->clk_gating.state) {
1609         case CLKS_ON:
1610                 /*
1611                  * Wait for the ungate work to complete if in progress.
1612                  * Though the clocks may be in ON state, the link could
1613                  * still be in hibner8 state if hibern8 is allowed
1614                  * during clock gating.
1615                  * Make sure we exit hibern8 state also in addition to
1616                  * clocks being ON.
1617                  */
1618                 if (ufshcd_can_hibern8_during_gating(hba) &&
1619                     ufshcd_is_link_hibern8(hba)) {
1620                         if (async) {
1621                                 rc = -EAGAIN;
1622                                 hba->clk_gating.active_reqs--;
1623                                 break;
1624                         }
1625                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1626                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1627                         if (hba->clk_gating.is_suspended && !flush_result)
1628                                 goto out;
1629                         spin_lock_irqsave(hba->host->host_lock, flags);
1630                         goto start;
1631                 }
1632                 break;
1633         case REQ_CLKS_OFF:
1634                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1635                         hba->clk_gating.state = CLKS_ON;
1636                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1637                                                 hba->clk_gating.state);
1638                         break;
1639                 }
1640                 /*
1641                  * If we are here, it means gating work is either done or
1642                  * currently running. Hence, fall through to cancel gating
1643                  * work and to enable clocks.
1644                  */
1645                 fallthrough;
1646         case CLKS_OFF:
1647                 ufshcd_scsi_block_requests(hba);
1648                 hba->clk_gating.state = REQ_CLKS_ON;
1649                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1650                                         hba->clk_gating.state);
1651                 queue_work(hba->clk_gating.clk_gating_workq,
1652                            &hba->clk_gating.ungate_work);
1653                 /*
1654                  * fall through to check if we should wait for this
1655                  * work to be done or not.
1656                  */
1657                 fallthrough;
1658         case REQ_CLKS_ON:
1659                 if (async) {
1660                         rc = -EAGAIN;
1661                         hba->clk_gating.active_reqs--;
1662                         break;
1663                 }
1664
1665                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1666                 flush_work(&hba->clk_gating.ungate_work);
1667                 /* Make sure state is CLKS_ON before returning */
1668                 spin_lock_irqsave(hba->host->host_lock, flags);
1669                 goto start;
1670         default:
1671                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1672                                 __func__, hba->clk_gating.state);
1673                 break;
1674         }
1675         spin_unlock_irqrestore(hba->host->host_lock, flags);
1676 out:
1677         return rc;
1678 }
1679 EXPORT_SYMBOL_GPL(ufshcd_hold);
1680
1681 static void ufshcd_gate_work(struct work_struct *work)
1682 {
1683         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1684                         clk_gating.gate_work.work);
1685         unsigned long flags;
1686         int ret;
1687
1688         spin_lock_irqsave(hba->host->host_lock, flags);
1689         /*
1690          * In case you are here to cancel this work the gating state
1691          * would be marked as REQ_CLKS_ON. In this case save time by
1692          * skipping the gating work and exit after changing the clock
1693          * state to CLKS_ON.
1694          */
1695         if (hba->clk_gating.is_suspended ||
1696                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1697                 hba->clk_gating.state = CLKS_ON;
1698                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1699                                         hba->clk_gating.state);
1700                 goto rel_lock;
1701         }
1702
1703         if (hba->clk_gating.active_reqs
1704                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1705                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1706                 || hba->active_uic_cmd || hba->uic_async_done)
1707                 goto rel_lock;
1708
1709         spin_unlock_irqrestore(hba->host->host_lock, flags);
1710
1711         /* put the link into hibern8 mode before turning off clocks */
1712         if (ufshcd_can_hibern8_during_gating(hba)) {
1713                 ret = ufshcd_uic_hibern8_enter(hba);
1714                 if (ret) {
1715                         hba->clk_gating.state = CLKS_ON;
1716                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1717                                         __func__, ret);
1718                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1719                                                 hba->clk_gating.state);
1720                         goto out;
1721                 }
1722                 ufshcd_set_link_hibern8(hba);
1723         }
1724
1725         ufshcd_disable_irq(hba);
1726
1727         ufshcd_setup_clocks(hba, false);
1728
1729         /* Put the host controller in low power mode if possible */
1730         ufshcd_hba_vreg_set_lpm(hba);
1731         /*
1732          * In case you are here to cancel this work the gating state
1733          * would be marked as REQ_CLKS_ON. In this case keep the state
1734          * as REQ_CLKS_ON which would anyway imply that clocks are off
1735          * and a request to turn them on is pending. By doing this way,
1736          * we keep the state machine in tact and this would ultimately
1737          * prevent from doing cancel work multiple times when there are
1738          * new requests arriving before the current cancel work is done.
1739          */
1740         spin_lock_irqsave(hba->host->host_lock, flags);
1741         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1742                 hba->clk_gating.state = CLKS_OFF;
1743                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1744                                         hba->clk_gating.state);
1745         }
1746 rel_lock:
1747         spin_unlock_irqrestore(hba->host->host_lock, flags);
1748 out:
1749         return;
1750 }
1751
1752 /* host lock must be held before calling this variant */
1753 static void __ufshcd_release(struct ufs_hba *hba)
1754 {
1755         if (!ufshcd_is_clkgating_allowed(hba))
1756                 return;
1757
1758         hba->clk_gating.active_reqs--;
1759
1760         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1761             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1762             hba->outstanding_tasks ||
1763             hba->active_uic_cmd || hba->uic_async_done ||
1764             hba->clk_gating.state == CLKS_OFF)
1765                 return;
1766
1767         hba->clk_gating.state = REQ_CLKS_OFF;
1768         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1769         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1770                            &hba->clk_gating.gate_work,
1771                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1772 }
1773
1774 void ufshcd_release(struct ufs_hba *hba)
1775 {
1776         unsigned long flags;
1777
1778         spin_lock_irqsave(hba->host->host_lock, flags);
1779         __ufshcd_release(hba);
1780         spin_unlock_irqrestore(hba->host->host_lock, flags);
1781 }
1782 EXPORT_SYMBOL_GPL(ufshcd_release);
1783
1784 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1785                 struct device_attribute *attr, char *buf)
1786 {
1787         struct ufs_hba *hba = dev_get_drvdata(dev);
1788
1789         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1790 }
1791
1792 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1793                 struct device_attribute *attr, const char *buf, size_t count)
1794 {
1795         struct ufs_hba *hba = dev_get_drvdata(dev);
1796         unsigned long flags, value;
1797
1798         if (kstrtoul(buf, 0, &value))
1799                 return -EINVAL;
1800
1801         spin_lock_irqsave(hba->host->host_lock, flags);
1802         hba->clk_gating.delay_ms = value;
1803         spin_unlock_irqrestore(hba->host->host_lock, flags);
1804         return count;
1805 }
1806
1807 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1808                 struct device_attribute *attr, char *buf)
1809 {
1810         struct ufs_hba *hba = dev_get_drvdata(dev);
1811
1812         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1813 }
1814
1815 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1816                 struct device_attribute *attr, const char *buf, size_t count)
1817 {
1818         struct ufs_hba *hba = dev_get_drvdata(dev);
1819         unsigned long flags;
1820         u32 value;
1821
1822         if (kstrtou32(buf, 0, &value))
1823                 return -EINVAL;
1824
1825         value = !!value;
1826
1827         spin_lock_irqsave(hba->host->host_lock, flags);
1828         if (value == hba->clk_gating.is_enabled)
1829                 goto out;
1830
1831         if (value)
1832                 __ufshcd_release(hba);
1833         else
1834                 hba->clk_gating.active_reqs++;
1835
1836         hba->clk_gating.is_enabled = value;
1837 out:
1838         spin_unlock_irqrestore(hba->host->host_lock, flags);
1839         return count;
1840 }
1841
1842 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1843 {
1844         char wq_name[sizeof("ufs_clkscaling_00")];
1845
1846         if (!ufshcd_is_clkscaling_supported(hba))
1847                 return;
1848
1849         if (!hba->clk_scaling.min_gear)
1850                 hba->clk_scaling.min_gear = UFS_HS_G1;
1851
1852         INIT_WORK(&hba->clk_scaling.suspend_work,
1853                   ufshcd_clk_scaling_suspend_work);
1854         INIT_WORK(&hba->clk_scaling.resume_work,
1855                   ufshcd_clk_scaling_resume_work);
1856
1857         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1858                  hba->host->host_no);
1859         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1860
1861         ufshcd_clkscaling_init_sysfs(hba);
1862 }
1863
1864 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1865 {
1866         if (!ufshcd_is_clkscaling_supported(hba))
1867                 return;
1868
1869         destroy_workqueue(hba->clk_scaling.workq);
1870         ufshcd_devfreq_remove(hba);
1871 }
1872
1873 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1874 {
1875         char wq_name[sizeof("ufs_clk_gating_00")];
1876
1877         if (!ufshcd_is_clkgating_allowed(hba))
1878                 return;
1879
1880         hba->clk_gating.state = CLKS_ON;
1881
1882         hba->clk_gating.delay_ms = 150;
1883         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1884         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1885
1886         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1887                  hba->host->host_no);
1888         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1889                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
1890
1891         hba->clk_gating.is_enabled = true;
1892
1893         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1894         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1895         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1896         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1897         hba->clk_gating.delay_attr.attr.mode = 0644;
1898         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1899                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1900
1901         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1902         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1903         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1904         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1905         hba->clk_gating.enable_attr.attr.mode = 0644;
1906         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1907                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1908 }
1909
1910 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1911 {
1912         if (!ufshcd_is_clkgating_allowed(hba))
1913                 return;
1914         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1915         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1916         cancel_work_sync(&hba->clk_gating.ungate_work);
1917         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1918         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1919 }
1920
1921 /* Must be called with host lock acquired */
1922 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1923 {
1924         bool queue_resume_work = false;
1925         ktime_t curr_t = ktime_get();
1926
1927         if (!ufshcd_is_clkscaling_supported(hba))
1928                 return;
1929
1930         if (!hba->clk_scaling.active_reqs++)
1931                 queue_resume_work = true;
1932
1933         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1934                 return;
1935
1936         if (queue_resume_work)
1937                 queue_work(hba->clk_scaling.workq,
1938                            &hba->clk_scaling.resume_work);
1939
1940         if (!hba->clk_scaling.window_start_t) {
1941                 hba->clk_scaling.window_start_t = curr_t;
1942                 hba->clk_scaling.tot_busy_t = 0;
1943                 hba->clk_scaling.is_busy_started = false;
1944         }
1945
1946         if (!hba->clk_scaling.is_busy_started) {
1947                 hba->clk_scaling.busy_start_t = curr_t;
1948                 hba->clk_scaling.is_busy_started = true;
1949         }
1950 }
1951
1952 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1953 {
1954         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1955
1956         if (!ufshcd_is_clkscaling_supported(hba))
1957                 return;
1958
1959         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1960                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1961                                         scaling->busy_start_t));
1962                 scaling->busy_start_t = 0;
1963                 scaling->is_busy_started = false;
1964         }
1965 }
1966 /**
1967  * ufshcd_send_command - Send SCSI or device management commands
1968  * @hba: per adapter instance
1969  * @task_tag: Task tag of the command
1970  */
1971 static inline
1972 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1973 {
1974         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
1975
1976         lrbp->issue_time_stamp = ktime_get();
1977         lrbp->compl_time_stamp = ktime_set(0, 0);
1978         ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
1979         ufshcd_add_command_trace(hba, task_tag, "send");
1980         ufshcd_clk_scaling_start_busy(hba);
1981         __set_bit(task_tag, &hba->outstanding_reqs);
1982         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1983         /* Make sure that doorbell is committed immediately */
1984         wmb();
1985 }
1986
1987 /**
1988  * ufshcd_copy_sense_data - Copy sense data in case of check condition
1989  * @lrbp: pointer to local reference block
1990  */
1991 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1992 {
1993         int len;
1994         if (lrbp->sense_buffer &&
1995             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1996                 int len_to_copy;
1997
1998                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1999                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2000
2001                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2002                        len_to_copy);
2003         }
2004 }
2005
2006 /**
2007  * ufshcd_copy_query_response() - Copy the Query Response and the data
2008  * descriptor
2009  * @hba: per adapter instance
2010  * @lrbp: pointer to local reference block
2011  */
2012 static
2013 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2014 {
2015         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2016
2017         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2018
2019         /* Get the descriptor */
2020         if (hba->dev_cmd.query.descriptor &&
2021             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2022                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2023                                 GENERAL_UPIU_REQUEST_SIZE;
2024                 u16 resp_len;
2025                 u16 buf_len;
2026
2027                 /* data segment length */
2028                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2029                                                 MASK_QUERY_DATA_SEG_LEN;
2030                 buf_len = be16_to_cpu(
2031                                 hba->dev_cmd.query.request.upiu_req.length);
2032                 if (likely(buf_len >= resp_len)) {
2033                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2034                 } else {
2035                         dev_warn(hba->dev,
2036                                  "%s: rsp size %d is bigger than buffer size %d",
2037                                  __func__, resp_len, buf_len);
2038                         return -EINVAL;
2039                 }
2040         }
2041
2042         return 0;
2043 }
2044
2045 /**
2046  * ufshcd_hba_capabilities - Read controller capabilities
2047  * @hba: per adapter instance
2048  *
2049  * Return: 0 on success, negative on error.
2050  */
2051 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2052 {
2053         int err;
2054
2055         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2056
2057         /* nutrs and nutmrs are 0 based values */
2058         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2059         hba->nutmrs =
2060         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2061
2062         /* Read crypto capabilities */
2063         err = ufshcd_hba_init_crypto_capabilities(hba);
2064         if (err)
2065                 dev_err(hba->dev, "crypto setup failed\n");
2066
2067         return err;
2068 }
2069
2070 /**
2071  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2072  *                            to accept UIC commands
2073  * @hba: per adapter instance
2074  * Return true on success, else false
2075  */
2076 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2077 {
2078         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2079                 return true;
2080         else
2081                 return false;
2082 }
2083
2084 /**
2085  * ufshcd_get_upmcrs - Get the power mode change request status
2086  * @hba: Pointer to adapter instance
2087  *
2088  * This function gets the UPMCRS field of HCS register
2089  * Returns value of UPMCRS field
2090  */
2091 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2092 {
2093         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2094 }
2095
2096 /**
2097  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2098  * @hba: per adapter instance
2099  * @uic_cmd: UIC command
2100  *
2101  * Mutex must be held.
2102  */
2103 static inline void
2104 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2105 {
2106         WARN_ON(hba->active_uic_cmd);
2107
2108         hba->active_uic_cmd = uic_cmd;
2109
2110         /* Write Args */
2111         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2112         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2113         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2114
2115         ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2116
2117         /* Write UIC Cmd */
2118         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2119                       REG_UIC_COMMAND);
2120 }
2121
2122 /**
2123  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2124  * @hba: per adapter instance
2125  * @uic_cmd: UIC command
2126  *
2127  * Must be called with mutex held.
2128  * Returns 0 only if success.
2129  */
2130 static int
2131 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2132 {
2133         int ret;
2134         unsigned long flags;
2135
2136         if (wait_for_completion_timeout(&uic_cmd->done,
2137                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2138                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2139         else
2140                 ret = -ETIMEDOUT;
2141
2142         spin_lock_irqsave(hba->host->host_lock, flags);
2143         hba->active_uic_cmd = NULL;
2144         spin_unlock_irqrestore(hba->host->host_lock, flags);
2145
2146         return ret;
2147 }
2148
2149 /**
2150  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2151  * @hba: per adapter instance
2152  * @uic_cmd: UIC command
2153  * @completion: initialize the completion only if this is set to true
2154  *
2155  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2156  * with mutex held and host_lock locked.
2157  * Returns 0 only if success.
2158  */
2159 static int
2160 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2161                       bool completion)
2162 {
2163         if (!ufshcd_ready_for_uic_cmd(hba)) {
2164                 dev_err(hba->dev,
2165                         "Controller not ready to accept UIC commands\n");
2166                 return -EIO;
2167         }
2168
2169         if (completion)
2170                 init_completion(&uic_cmd->done);
2171
2172         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2173
2174         return 0;
2175 }
2176
2177 /**
2178  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2179  * @hba: per adapter instance
2180  * @uic_cmd: UIC command
2181  *
2182  * Returns 0 only if success.
2183  */
2184 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2185 {
2186         int ret;
2187         unsigned long flags;
2188
2189         ufshcd_hold(hba, false);
2190         mutex_lock(&hba->uic_cmd_mutex);
2191         ufshcd_add_delay_before_dme_cmd(hba);
2192
2193         spin_lock_irqsave(hba->host->host_lock, flags);
2194         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2195         spin_unlock_irqrestore(hba->host->host_lock, flags);
2196         if (!ret)
2197                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2198
2199         mutex_unlock(&hba->uic_cmd_mutex);
2200
2201         ufshcd_release(hba);
2202         return ret;
2203 }
2204
2205 /**
2206  * ufshcd_map_sg - Map scatter-gather list to prdt
2207  * @hba: per adapter instance
2208  * @lrbp: pointer to local reference block
2209  *
2210  * Returns 0 in case of success, non-zero value in case of failure
2211  */
2212 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2213 {
2214         struct ufshcd_sg_entry *prd_table;
2215         struct scatterlist *sg;
2216         struct scsi_cmnd *cmd;
2217         int sg_segments;
2218         int i;
2219
2220         cmd = lrbp->cmd;
2221         sg_segments = scsi_dma_map(cmd);
2222         if (sg_segments < 0)
2223                 return sg_segments;
2224
2225         if (sg_segments) {
2226
2227                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2228                         lrbp->utr_descriptor_ptr->prd_table_length =
2229                                 cpu_to_le16((sg_segments *
2230                                         sizeof(struct ufshcd_sg_entry)));
2231                 else
2232                         lrbp->utr_descriptor_ptr->prd_table_length =
2233                                 cpu_to_le16((u16) (sg_segments));
2234
2235                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2236
2237                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2238                         prd_table[i].size  =
2239                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2240                         prd_table[i].base_addr =
2241                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2242                         prd_table[i].upper_addr =
2243                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2244                         prd_table[i].reserved = 0;
2245                 }
2246         } else {
2247                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2248         }
2249
2250         return 0;
2251 }
2252
2253 /**
2254  * ufshcd_enable_intr - enable interrupts
2255  * @hba: per adapter instance
2256  * @intrs: interrupt bits
2257  */
2258 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2259 {
2260         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2261
2262         if (hba->ufs_version == UFSHCI_VERSION_10) {
2263                 u32 rw;
2264                 rw = set & INTERRUPT_MASK_RW_VER_10;
2265                 set = rw | ((set ^ intrs) & intrs);
2266         } else {
2267                 set |= intrs;
2268         }
2269
2270         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2271 }
2272
2273 /**
2274  * ufshcd_disable_intr - disable interrupts
2275  * @hba: per adapter instance
2276  * @intrs: interrupt bits
2277  */
2278 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2279 {
2280         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2281
2282         if (hba->ufs_version == UFSHCI_VERSION_10) {
2283                 u32 rw;
2284                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2285                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2286                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2287
2288         } else {
2289                 set &= ~intrs;
2290         }
2291
2292         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2293 }
2294
2295 /**
2296  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2297  * descriptor according to request
2298  * @lrbp: pointer to local reference block
2299  * @upiu_flags: flags required in the header
2300  * @cmd_dir: requests data direction
2301  */
2302 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2303                         u8 *upiu_flags, enum dma_data_direction cmd_dir)
2304 {
2305         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2306         u32 data_direction;
2307         u32 dword_0;
2308         u32 dword_1 = 0;
2309         u32 dword_3 = 0;
2310
2311         if (cmd_dir == DMA_FROM_DEVICE) {
2312                 data_direction = UTP_DEVICE_TO_HOST;
2313                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2314         } else if (cmd_dir == DMA_TO_DEVICE) {
2315                 data_direction = UTP_HOST_TO_DEVICE;
2316                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2317         } else {
2318                 data_direction = UTP_NO_DATA_TRANSFER;
2319                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2320         }
2321
2322         dword_0 = data_direction | (lrbp->command_type
2323                                 << UPIU_COMMAND_TYPE_OFFSET);
2324         if (lrbp->intr_cmd)
2325                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2326
2327         /* Prepare crypto related dwords */
2328         ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2329
2330         /* Transfer request descriptor header fields */
2331         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2332         req_desc->header.dword_1 = cpu_to_le32(dword_1);
2333         /*
2334          * assigning invalid value for command status. Controller
2335          * updates OCS on command completion, with the command
2336          * status
2337          */
2338         req_desc->header.dword_2 =
2339                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2340         req_desc->header.dword_3 = cpu_to_le32(dword_3);
2341
2342         req_desc->prd_table_length = 0;
2343 }
2344
2345 /**
2346  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2347  * for scsi commands
2348  * @lrbp: local reference block pointer
2349  * @upiu_flags: flags
2350  */
2351 static
2352 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2353 {
2354         struct scsi_cmnd *cmd = lrbp->cmd;
2355         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2356         unsigned short cdb_len;
2357
2358         /* command descriptor fields */
2359         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2360                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2361                                 lrbp->lun, lrbp->task_tag);
2362         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2363                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2364
2365         /* Total EHS length and Data segment length will be zero */
2366         ucd_req_ptr->header.dword_2 = 0;
2367
2368         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2369
2370         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2371         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2372         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2373
2374         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2375 }
2376
2377 /**
2378  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2379  * for query requsts
2380  * @hba: UFS hba
2381  * @lrbp: local reference block pointer
2382  * @upiu_flags: flags
2383  */
2384 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2385                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2386 {
2387         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2388         struct ufs_query *query = &hba->dev_cmd.query;
2389         u16 len = be16_to_cpu(query->request.upiu_req.length);
2390
2391         /* Query request header */
2392         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2393                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2394                         lrbp->lun, lrbp->task_tag);
2395         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2396                         0, query->request.query_func, 0, 0);
2397
2398         /* Data segment length only need for WRITE_DESC */
2399         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2400                 ucd_req_ptr->header.dword_2 =
2401                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2402         else
2403                 ucd_req_ptr->header.dword_2 = 0;
2404
2405         /* Copy the Query Request buffer as is */
2406         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2407                         QUERY_OSF_SIZE);
2408
2409         /* Copy the Descriptor */
2410         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2411                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2412
2413         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2414 }
2415
2416 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2417 {
2418         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2419
2420         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2421
2422         /* command descriptor fields */
2423         ucd_req_ptr->header.dword_0 =
2424                 UPIU_HEADER_DWORD(
2425                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2426         /* clear rest of the fields of basic header */
2427         ucd_req_ptr->header.dword_1 = 0;
2428         ucd_req_ptr->header.dword_2 = 0;
2429
2430         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2431 }
2432
2433 /**
2434  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2435  *                           for Device Management Purposes
2436  * @hba: per adapter instance
2437  * @lrbp: pointer to local reference block
2438  */
2439 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2440                                       struct ufshcd_lrb *lrbp)
2441 {
2442         u8 upiu_flags;
2443         int ret = 0;
2444
2445         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2446             (hba->ufs_version == UFSHCI_VERSION_11))
2447                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2448         else
2449                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2450
2451         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2452         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2453                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2454         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2455                 ufshcd_prepare_utp_nop_upiu(lrbp);
2456         else
2457                 ret = -EINVAL;
2458
2459         return ret;
2460 }
2461
2462 /**
2463  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2464  *                         for SCSI Purposes
2465  * @hba: per adapter instance
2466  * @lrbp: pointer to local reference block
2467  */
2468 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2469 {
2470         u8 upiu_flags;
2471         int ret = 0;
2472
2473         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2474             (hba->ufs_version == UFSHCI_VERSION_11))
2475                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2476         else
2477                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2478
2479         if (likely(lrbp->cmd)) {
2480                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2481                                                 lrbp->cmd->sc_data_direction);
2482                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2483         } else {
2484                 ret = -EINVAL;
2485         }
2486
2487         return ret;
2488 }
2489
2490 /**
2491  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2492  * @upiu_wlun_id: UPIU W-LUN id
2493  *
2494  * Returns SCSI W-LUN id
2495  */
2496 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2497 {
2498         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2499 }
2500
2501 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2502 {
2503         struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2504         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2505         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2506                 i * sizeof(struct utp_transfer_cmd_desc);
2507         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2508                                        response_upiu);
2509         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2510
2511         lrb->utr_descriptor_ptr = utrdlp + i;
2512         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2513                 i * sizeof(struct utp_transfer_req_desc);
2514         lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2515         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2516         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2517         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2518         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2519         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2520 }
2521
2522 /**
2523  * ufshcd_queuecommand - main entry point for SCSI requests
2524  * @host: SCSI host pointer
2525  * @cmd: command from SCSI Midlayer
2526  *
2527  * Returns 0 for success, non-zero in case of failure
2528  */
2529 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2530 {
2531         struct ufshcd_lrb *lrbp;
2532         struct ufs_hba *hba;
2533         unsigned long flags;
2534         int tag;
2535         int err = 0;
2536
2537         hba = shost_priv(host);
2538
2539         tag = cmd->request->tag;
2540         if (!ufshcd_valid_tag(hba, tag)) {
2541                 dev_err(hba->dev,
2542                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2543                         __func__, tag, cmd, cmd->request);
2544                 BUG();
2545         }
2546
2547         if (!down_read_trylock(&hba->clk_scaling_lock))
2548                 return SCSI_MLQUEUE_HOST_BUSY;
2549
2550         hba->req_abort_count = 0;
2551
2552         err = ufshcd_hold(hba, true);
2553         if (err) {
2554                 err = SCSI_MLQUEUE_HOST_BUSY;
2555                 goto out;
2556         }
2557         WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2558                 (hba->clk_gating.state != CLKS_ON));
2559
2560         lrbp = &hba->lrb[tag];
2561         if (unlikely(lrbp->in_use)) {
2562                 if (hba->pm_op_in_progress)
2563                         set_host_byte(cmd, DID_BAD_TARGET);
2564                 else
2565                         err = SCSI_MLQUEUE_HOST_BUSY;
2566                 ufshcd_release(hba);
2567                 goto out;
2568         }
2569
2570         WARN_ON(lrbp->cmd);
2571         lrbp->cmd = cmd;
2572         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2573         lrbp->sense_buffer = cmd->sense_buffer;
2574         lrbp->task_tag = tag;
2575         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2576         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2577
2578         ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2579
2580         lrbp->req_abort_skip = false;
2581
2582         ufshcd_comp_scsi_upiu(hba, lrbp);
2583
2584         err = ufshcd_map_sg(hba, lrbp);
2585         if (err) {
2586                 lrbp->cmd = NULL;
2587                 ufshcd_release(hba);
2588                 goto out;
2589         }
2590         /* Make sure descriptors are ready before ringing the doorbell */
2591         wmb();
2592
2593         spin_lock_irqsave(hba->host->host_lock, flags);
2594         switch (hba->ufshcd_state) {
2595         case UFSHCD_STATE_OPERATIONAL:
2596         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2597                 break;
2598         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2599                 /*
2600                  * pm_runtime_get_sync() is used at error handling preparation
2601                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2602                  * PM ops, it can never be finished if we let SCSI layer keep
2603                  * retrying it, which gets err handler stuck forever. Neither
2604                  * can we let the scsi cmd pass through, because UFS is in bad
2605                  * state, the scsi cmd may eventually time out, which will get
2606                  * err handler blocked for too long. So, just fail the scsi cmd
2607                  * sent from PM ops, err handler can recover PM error anyways.
2608                  */
2609                 if (hba->pm_op_in_progress) {
2610                         hba->force_reset = true;
2611                         set_host_byte(cmd, DID_BAD_TARGET);
2612                         goto out_compl_cmd;
2613                 }
2614                 fallthrough;
2615         case UFSHCD_STATE_RESET:
2616                 err = SCSI_MLQUEUE_HOST_BUSY;
2617                 goto out_compl_cmd;
2618         case UFSHCD_STATE_ERROR:
2619                 set_host_byte(cmd, DID_ERROR);
2620                 goto out_compl_cmd;
2621         default:
2622                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2623                                 __func__, hba->ufshcd_state);
2624                 set_host_byte(cmd, DID_BAD_TARGET);
2625                 goto out_compl_cmd;
2626         }
2627         ufshcd_send_command(hba, tag);
2628         spin_unlock_irqrestore(hba->host->host_lock, flags);
2629         goto out;
2630
2631 out_compl_cmd:
2632         scsi_dma_unmap(lrbp->cmd);
2633         lrbp->cmd = NULL;
2634         spin_unlock_irqrestore(hba->host->host_lock, flags);
2635         ufshcd_release(hba);
2636         if (!err)
2637                 cmd->scsi_done(cmd);
2638 out:
2639         up_read(&hba->clk_scaling_lock);
2640         return err;
2641 }
2642
2643 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2644                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2645 {
2646         lrbp->cmd = NULL;
2647         lrbp->sense_bufflen = 0;
2648         lrbp->sense_buffer = NULL;
2649         lrbp->task_tag = tag;
2650         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2651         lrbp->intr_cmd = true; /* No interrupt aggregation */
2652         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2653         hba->dev_cmd.type = cmd_type;
2654
2655         return ufshcd_compose_devman_upiu(hba, lrbp);
2656 }
2657
2658 static int
2659 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2660 {
2661         int err = 0;
2662         unsigned long flags;
2663         u32 mask = 1 << tag;
2664
2665         /* clear outstanding transaction before retry */
2666         spin_lock_irqsave(hba->host->host_lock, flags);
2667         ufshcd_utrl_clear(hba, tag);
2668         spin_unlock_irqrestore(hba->host->host_lock, flags);
2669
2670         /*
2671          * wait for for h/w to clear corresponding bit in door-bell.
2672          * max. wait is 1 sec.
2673          */
2674         err = ufshcd_wait_for_register(hba,
2675                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2676                         mask, ~mask, 1000, 1000);
2677
2678         return err;
2679 }
2680
2681 static int
2682 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2683 {
2684         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2685
2686         /* Get the UPIU response */
2687         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2688                                 UPIU_RSP_CODE_OFFSET;
2689         return query_res->response;
2690 }
2691
2692 /**
2693  * ufshcd_dev_cmd_completion() - handles device management command responses
2694  * @hba: per adapter instance
2695  * @lrbp: pointer to local reference block
2696  */
2697 static int
2698 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2699 {
2700         int resp;
2701         int err = 0;
2702
2703         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2704         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2705
2706         switch (resp) {
2707         case UPIU_TRANSACTION_NOP_IN:
2708                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2709                         err = -EINVAL;
2710                         dev_err(hba->dev, "%s: unexpected response %x\n",
2711                                         __func__, resp);
2712                 }
2713                 break;
2714         case UPIU_TRANSACTION_QUERY_RSP:
2715                 err = ufshcd_check_query_response(hba, lrbp);
2716                 if (!err)
2717                         err = ufshcd_copy_query_response(hba, lrbp);
2718                 break;
2719         case UPIU_TRANSACTION_REJECT_UPIU:
2720                 /* TODO: handle Reject UPIU Response */
2721                 err = -EPERM;
2722                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2723                                 __func__);
2724                 break;
2725         default:
2726                 err = -EINVAL;
2727                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2728                                 __func__, resp);
2729                 break;
2730         }
2731
2732         return err;
2733 }
2734
2735 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2736                 struct ufshcd_lrb *lrbp, int max_timeout)
2737 {
2738         int err = 0;
2739         unsigned long time_left;
2740         unsigned long flags;
2741
2742         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2743                         msecs_to_jiffies(max_timeout));
2744
2745         /* Make sure descriptors are ready before ringing the doorbell */
2746         wmb();
2747         spin_lock_irqsave(hba->host->host_lock, flags);
2748         hba->dev_cmd.complete = NULL;
2749         if (likely(time_left)) {
2750                 err = ufshcd_get_tr_ocs(lrbp);
2751                 if (!err)
2752                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2753         }
2754         spin_unlock_irqrestore(hba->host->host_lock, flags);
2755
2756         if (!time_left) {
2757                 err = -ETIMEDOUT;
2758                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2759                         __func__, lrbp->task_tag);
2760                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2761                         /* successfully cleared the command, retry if needed */
2762                         err = -EAGAIN;
2763                 /*
2764                  * in case of an error, after clearing the doorbell,
2765                  * we also need to clear the outstanding_request
2766                  * field in hba
2767                  */
2768                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2769         }
2770
2771         return err;
2772 }
2773
2774 /**
2775  * ufshcd_exec_dev_cmd - API for sending device management requests
2776  * @hba: UFS hba
2777  * @cmd_type: specifies the type (NOP, Query...)
2778  * @timeout: time in seconds
2779  *
2780  * NOTE: Since there is only one available tag for device management commands,
2781  * it is expected you hold the hba->dev_cmd.lock mutex.
2782  */
2783 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2784                 enum dev_cmd_type cmd_type, int timeout)
2785 {
2786         struct request_queue *q = hba->cmd_queue;
2787         struct request *req;
2788         struct ufshcd_lrb *lrbp;
2789         int err;
2790         int tag;
2791         struct completion wait;
2792         unsigned long flags;
2793
2794         down_read(&hba->clk_scaling_lock);
2795
2796         /*
2797          * Get free slot, sleep if slots are unavailable.
2798          * Even though we use wait_event() which sleeps indefinitely,
2799          * the maximum wait time is bounded by SCSI request timeout.
2800          */
2801         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2802         if (IS_ERR(req)) {
2803                 err = PTR_ERR(req);
2804                 goto out_unlock;
2805         }
2806         tag = req->tag;
2807         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2808
2809         init_completion(&wait);
2810         lrbp = &hba->lrb[tag];
2811         if (unlikely(lrbp->in_use)) {
2812                 err = -EBUSY;
2813                 goto out;
2814         }
2815
2816         WARN_ON(lrbp->cmd);
2817         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2818         if (unlikely(err))
2819                 goto out_put_tag;
2820
2821         hba->dev_cmd.complete = &wait;
2822
2823         ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2824         /* Make sure descriptors are ready before ringing the doorbell */
2825         wmb();
2826         spin_lock_irqsave(hba->host->host_lock, flags);
2827         ufshcd_send_command(hba, tag);
2828         spin_unlock_irqrestore(hba->host->host_lock, flags);
2829
2830         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2831
2832 out:
2833         ufshcd_add_query_upiu_trace(hba, tag,
2834                         err ? "query_complete_err" : "query_complete");
2835
2836 out_put_tag:
2837         blk_put_request(req);
2838 out_unlock:
2839         up_read(&hba->clk_scaling_lock);
2840         return err;
2841 }
2842
2843 /**
2844  * ufshcd_init_query() - init the query response and request parameters
2845  * @hba: per-adapter instance
2846  * @request: address of the request pointer to be initialized
2847  * @response: address of the response pointer to be initialized
2848  * @opcode: operation to perform
2849  * @idn: flag idn to access
2850  * @index: LU number to access
2851  * @selector: query/flag/descriptor further identification
2852  */
2853 static inline void ufshcd_init_query(struct ufs_hba *hba,
2854                 struct ufs_query_req **request, struct ufs_query_res **response,
2855                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2856 {
2857         *request = &hba->dev_cmd.query.request;
2858         *response = &hba->dev_cmd.query.response;
2859         memset(*request, 0, sizeof(struct ufs_query_req));
2860         memset(*response, 0, sizeof(struct ufs_query_res));
2861         (*request)->upiu_req.opcode = opcode;
2862         (*request)->upiu_req.idn = idn;
2863         (*request)->upiu_req.index = index;
2864         (*request)->upiu_req.selector = selector;
2865 }
2866
2867 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2868         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
2869 {
2870         int ret;
2871         int retries;
2872
2873         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2874                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2875                 if (ret)
2876                         dev_dbg(hba->dev,
2877                                 "%s: failed with error %d, retries %d\n",
2878                                 __func__, ret, retries);
2879                 else
2880                         break;
2881         }
2882
2883         if (ret)
2884                 dev_err(hba->dev,
2885                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2886                         __func__, opcode, idn, ret, retries);
2887         return ret;
2888 }
2889
2890 /**
2891  * ufshcd_query_flag() - API function for sending flag query requests
2892  * @hba: per-adapter instance
2893  * @opcode: flag query to perform
2894  * @idn: flag idn to access
2895  * @index: flag index to access
2896  * @flag_res: the flag value after the query request completes
2897  *
2898  * Returns 0 for success, non-zero in case of failure
2899  */
2900 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2901                         enum flag_idn idn, u8 index, bool *flag_res)
2902 {
2903         struct ufs_query_req *request = NULL;
2904         struct ufs_query_res *response = NULL;
2905         int err, selector = 0;
2906         int timeout = QUERY_REQ_TIMEOUT;
2907
2908         BUG_ON(!hba);
2909
2910         ufshcd_hold(hba, false);
2911         mutex_lock(&hba->dev_cmd.lock);
2912         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2913                         selector);
2914
2915         switch (opcode) {
2916         case UPIU_QUERY_OPCODE_SET_FLAG:
2917         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2918         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2919                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2920                 break;
2921         case UPIU_QUERY_OPCODE_READ_FLAG:
2922                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2923                 if (!flag_res) {
2924                         /* No dummy reads */
2925                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2926                                         __func__);
2927                         err = -EINVAL;
2928                         goto out_unlock;
2929                 }
2930                 break;
2931         default:
2932                 dev_err(hba->dev,
2933                         "%s: Expected query flag opcode but got = %d\n",
2934                         __func__, opcode);
2935                 err = -EINVAL;
2936                 goto out_unlock;
2937         }
2938
2939         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2940
2941         if (err) {
2942                 dev_err(hba->dev,
2943                         "%s: Sending flag query for idn %d failed, err = %d\n",
2944                         __func__, idn, err);
2945                 goto out_unlock;
2946         }
2947
2948         if (flag_res)
2949                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2950                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2951
2952 out_unlock:
2953         mutex_unlock(&hba->dev_cmd.lock);
2954         ufshcd_release(hba);
2955         return err;
2956 }
2957
2958 /**
2959  * ufshcd_query_attr - API function for sending attribute requests
2960  * @hba: per-adapter instance
2961  * @opcode: attribute opcode
2962  * @idn: attribute idn to access
2963  * @index: index field
2964  * @selector: selector field
2965  * @attr_val: the attribute value after the query request completes
2966  *
2967  * Returns 0 for success, non-zero in case of failure
2968 */
2969 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2970                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2971 {
2972         struct ufs_query_req *request = NULL;
2973         struct ufs_query_res *response = NULL;
2974         int err;
2975
2976         BUG_ON(!hba);
2977
2978         if (!attr_val) {
2979                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2980                                 __func__, opcode);
2981                 return -EINVAL;
2982         }
2983
2984         ufshcd_hold(hba, false);
2985
2986         mutex_lock(&hba->dev_cmd.lock);
2987         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2988                         selector);
2989
2990         switch (opcode) {
2991         case UPIU_QUERY_OPCODE_WRITE_ATTR:
2992                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2993                 request->upiu_req.value = cpu_to_be32(*attr_val);
2994                 break;
2995         case UPIU_QUERY_OPCODE_READ_ATTR:
2996                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2997                 break;
2998         default:
2999                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3000                                 __func__, opcode);
3001                 err = -EINVAL;
3002                 goto out_unlock;
3003         }
3004
3005         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3006
3007         if (err) {
3008                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3009                                 __func__, opcode, idn, index, err);
3010                 goto out_unlock;
3011         }
3012
3013         *attr_val = be32_to_cpu(response->upiu_res.value);
3014
3015 out_unlock:
3016         mutex_unlock(&hba->dev_cmd.lock);
3017         ufshcd_release(hba);
3018         return err;
3019 }
3020
3021 /**
3022  * ufshcd_query_attr_retry() - API function for sending query
3023  * attribute with retries
3024  * @hba: per-adapter instance
3025  * @opcode: attribute opcode
3026  * @idn: attribute idn to access
3027  * @index: index field
3028  * @selector: selector field
3029  * @attr_val: the attribute value after the query request
3030  * completes
3031  *
3032  * Returns 0 for success, non-zero in case of failure
3033 */
3034 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3035         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3036         u32 *attr_val)
3037 {
3038         int ret = 0;
3039         u32 retries;
3040
3041         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3042                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3043                                                 selector, attr_val);
3044                 if (ret)
3045                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3046                                 __func__, ret, retries);
3047                 else
3048                         break;
3049         }
3050
3051         if (ret)
3052                 dev_err(hba->dev,
3053                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3054                         __func__, idn, ret, QUERY_REQ_RETRIES);
3055         return ret;
3056 }
3057
3058 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3059                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3060                         u8 selector, u8 *desc_buf, int *buf_len)
3061 {
3062         struct ufs_query_req *request = NULL;
3063         struct ufs_query_res *response = NULL;
3064         int err;
3065
3066         BUG_ON(!hba);
3067
3068         if (!desc_buf) {
3069                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3070                                 __func__, opcode);
3071                 return -EINVAL;
3072         }
3073
3074         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3075                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3076                                 __func__, *buf_len);
3077                 return -EINVAL;
3078         }
3079
3080         ufshcd_hold(hba, false);
3081
3082         mutex_lock(&hba->dev_cmd.lock);
3083         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3084                         selector);
3085         hba->dev_cmd.query.descriptor = desc_buf;
3086         request->upiu_req.length = cpu_to_be16(*buf_len);
3087
3088         switch (opcode) {
3089         case UPIU_QUERY_OPCODE_WRITE_DESC:
3090                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3091                 break;
3092         case UPIU_QUERY_OPCODE_READ_DESC:
3093                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3094                 break;
3095         default:
3096                 dev_err(hba->dev,
3097                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3098                                 __func__, opcode);
3099                 err = -EINVAL;
3100                 goto out_unlock;
3101         }
3102
3103         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3104
3105         if (err) {
3106                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3107                                 __func__, opcode, idn, index, err);
3108                 goto out_unlock;
3109         }
3110
3111         *buf_len = be16_to_cpu(response->upiu_res.length);
3112
3113 out_unlock:
3114         hba->dev_cmd.query.descriptor = NULL;
3115         mutex_unlock(&hba->dev_cmd.lock);
3116         ufshcd_release(hba);
3117         return err;
3118 }
3119
3120 /**
3121  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3122  * @hba: per-adapter instance
3123  * @opcode: attribute opcode
3124  * @idn: attribute idn to access
3125  * @index: index field
3126  * @selector: selector field
3127  * @desc_buf: the buffer that contains the descriptor
3128  * @buf_len: length parameter passed to the device
3129  *
3130  * Returns 0 for success, non-zero in case of failure.
3131  * The buf_len parameter will contain, on return, the length parameter
3132  * received on the response.
3133  */
3134 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3135                                   enum query_opcode opcode,
3136                                   enum desc_idn idn, u8 index,
3137                                   u8 selector,
3138                                   u8 *desc_buf, int *buf_len)
3139 {
3140         int err;
3141         int retries;
3142
3143         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3144                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3145                                                 selector, desc_buf, buf_len);
3146                 if (!err || err == -EINVAL)
3147                         break;
3148         }
3149
3150         return err;
3151 }
3152
3153 /**
3154  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3155  * @hba: Pointer to adapter instance
3156  * @desc_id: descriptor idn value
3157  * @desc_len: mapped desc length (out)
3158  */
3159 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3160                                   int *desc_len)
3161 {
3162         if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3163             desc_id == QUERY_DESC_IDN_RFU_1)
3164                 *desc_len = 0;
3165         else
3166                 *desc_len = hba->desc_size[desc_id];
3167 }
3168 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3169
3170 static void ufshcd_update_desc_length(struct ufs_hba *hba,
3171                                       enum desc_idn desc_id, int desc_index,
3172                                       unsigned char desc_len)
3173 {
3174         if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3175             desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3176                 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3177                  * than the RPMB unit, however, both descriptors share the same
3178                  * desc_idn, to cover both unit descriptors with one length, we
3179                  * choose the normal unit descriptor length by desc_index.
3180                  */
3181                 hba->desc_size[desc_id] = desc_len;
3182 }
3183
3184 /**
3185  * ufshcd_read_desc_param - read the specified descriptor parameter
3186  * @hba: Pointer to adapter instance
3187  * @desc_id: descriptor idn value
3188  * @desc_index: descriptor index
3189  * @param_offset: offset of the parameter to read
3190  * @param_read_buf: pointer to buffer where parameter would be read
3191  * @param_size: sizeof(param_read_buf)
3192  *
3193  * Return 0 in case of success, non-zero otherwise
3194  */
3195 int ufshcd_read_desc_param(struct ufs_hba *hba,
3196                            enum desc_idn desc_id,
3197                            int desc_index,
3198                            u8 param_offset,
3199                            u8 *param_read_buf,
3200                            u8 param_size)
3201 {
3202         int ret;
3203         u8 *desc_buf;
3204         int buff_len;
3205         bool is_kmalloc = true;
3206
3207         /* Safety check */
3208         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3209                 return -EINVAL;
3210
3211         /* Get the length of descriptor */
3212         ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3213         if (!buff_len) {
3214                 dev_err(hba->dev, "%s: Failed to get desc length", __func__);
3215                 return -EINVAL;
3216         }
3217
3218         /* Check whether we need temp memory */
3219         if (param_offset != 0 || param_size < buff_len) {
3220                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3221                 if (!desc_buf)
3222                         return -ENOMEM;
3223         } else {
3224                 desc_buf = param_read_buf;
3225                 is_kmalloc = false;
3226         }
3227
3228         /* Request for full descriptor */
3229         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3230                                         desc_id, desc_index, 0,
3231                                         desc_buf, &buff_len);
3232
3233         if (ret) {
3234                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3235                         __func__, desc_id, desc_index, param_offset, ret);
3236                 goto out;
3237         }
3238
3239         /* Sanity check */
3240         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3241                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3242                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3243                 ret = -EINVAL;
3244                 goto out;
3245         }
3246
3247         /* Update descriptor length */
3248         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3249         ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3250
3251         /* Check wherher we will not copy more data, than available */
3252         if (is_kmalloc && (param_offset + param_size) > buff_len)
3253                 param_size = buff_len - param_offset;
3254
3255         if (is_kmalloc)
3256                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3257 out:
3258         if (is_kmalloc)
3259                 kfree(desc_buf);
3260         return ret;
3261 }
3262
3263 /**
3264  * struct uc_string_id - unicode string
3265  *
3266  * @len: size of this descriptor inclusive
3267  * @type: descriptor type
3268  * @uc: unicode string character
3269  */
3270 struct uc_string_id {
3271         u8 len;
3272         u8 type;
3273         wchar_t uc[];
3274 } __packed;
3275
3276 /* replace non-printable or non-ASCII characters with spaces */
3277 static inline char ufshcd_remove_non_printable(u8 ch)
3278 {
3279         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3280 }
3281
3282 /**
3283  * ufshcd_read_string_desc - read string descriptor
3284  * @hba: pointer to adapter instance
3285  * @desc_index: descriptor index
3286  * @buf: pointer to buffer where descriptor would be read,
3287  *       the caller should free the memory.
3288  * @ascii: if true convert from unicode to ascii characters
3289  *         null terminated string.
3290  *
3291  * Return:
3292  * *      string size on success.
3293  * *      -ENOMEM: on allocation failure
3294  * *      -EINVAL: on a wrong parameter
3295  */
3296 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3297                             u8 **buf, bool ascii)
3298 {
3299         struct uc_string_id *uc_str;
3300         u8 *str;
3301         int ret;
3302
3303         if (!buf)
3304                 return -EINVAL;
3305
3306         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3307         if (!uc_str)
3308                 return -ENOMEM;
3309
3310         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3311                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3312         if (ret < 0) {
3313                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3314                         QUERY_REQ_RETRIES, ret);
3315                 str = NULL;
3316                 goto out;
3317         }
3318
3319         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3320                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3321                 str = NULL;
3322                 ret = 0;
3323                 goto out;
3324         }
3325
3326         if (ascii) {
3327                 ssize_t ascii_len;
3328                 int i;
3329                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3330                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3331                 str = kzalloc(ascii_len, GFP_KERNEL);
3332                 if (!str) {
3333                         ret = -ENOMEM;
3334                         goto out;
3335                 }
3336
3337                 /*
3338                  * the descriptor contains string in UTF16 format
3339                  * we need to convert to utf-8 so it can be displayed
3340                  */
3341                 ret = utf16s_to_utf8s(uc_str->uc,
3342                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3343                                       UTF16_BIG_ENDIAN, str, ascii_len);
3344
3345                 /* replace non-printable or non-ASCII characters with spaces */
3346                 for (i = 0; i < ret; i++)
3347                         str[i] = ufshcd_remove_non_printable(str[i]);
3348
3349                 str[ret++] = '\0';
3350
3351         } else {
3352                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3353                 if (!str) {
3354                         ret = -ENOMEM;
3355                         goto out;
3356                 }
3357                 ret = uc_str->len;
3358         }
3359 out:
3360         *buf = str;
3361         kfree(uc_str);
3362         return ret;
3363 }
3364
3365 /**
3366  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3367  * @hba: Pointer to adapter instance
3368  * @lun: lun id
3369  * @param_offset: offset of the parameter to read
3370  * @param_read_buf: pointer to buffer where parameter would be read
3371  * @param_size: sizeof(param_read_buf)
3372  *
3373  * Return 0 in case of success, non-zero otherwise
3374  */
3375 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3376                                               int lun,
3377                                               enum unit_desc_param param_offset,
3378                                               u8 *param_read_buf,
3379                                               u32 param_size)
3380 {
3381         /*
3382          * Unit descriptors are only available for general purpose LUs (LUN id
3383          * from 0 to 7) and RPMB Well known LU.
3384          */
3385         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3386                 return -EOPNOTSUPP;
3387
3388         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3389                                       param_offset, param_read_buf, param_size);
3390 }
3391
3392 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3393 {
3394         int err = 0;
3395         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3396
3397         if (hba->dev_info.wspecversion >= 0x300) {
3398                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3399                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3400                                 &gating_wait);
3401                 if (err)
3402                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3403                                          err, gating_wait);
3404
3405                 if (gating_wait == 0) {
3406                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3407                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3408                                          gating_wait);
3409                 }
3410
3411                 hba->dev_info.clk_gating_wait_us = gating_wait;
3412         }
3413
3414         return err;
3415 }
3416
3417 /**
3418  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3419  * @hba: per adapter instance
3420  *
3421  * 1. Allocate DMA memory for Command Descriptor array
3422  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3423  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3424  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3425  *      (UTMRDL)
3426  * 4. Allocate memory for local reference block(lrb).
3427  *
3428  * Returns 0 for success, non-zero in case of failure
3429  */
3430 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3431 {
3432         size_t utmrdl_size, utrdl_size, ucdl_size;
3433
3434         /* Allocate memory for UTP command descriptors */
3435         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3436         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3437                                                   ucdl_size,
3438                                                   &hba->ucdl_dma_addr,
3439                                                   GFP_KERNEL);
3440
3441         /*
3442          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3443          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3444          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3445          * be aligned to 128 bytes as well
3446          */
3447         if (!hba->ucdl_base_addr ||
3448             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3449                 dev_err(hba->dev,
3450                         "Command Descriptor Memory allocation failed\n");
3451                 goto out;
3452         }
3453
3454         /*
3455          * Allocate memory for UTP Transfer descriptors
3456          * UFSHCI requires 1024 byte alignment of UTRD
3457          */
3458         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3459         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3460                                                    utrdl_size,
3461                                                    &hba->utrdl_dma_addr,
3462                                                    GFP_KERNEL);
3463         if (!hba->utrdl_base_addr ||
3464             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3465                 dev_err(hba->dev,
3466                         "Transfer Descriptor Memory allocation failed\n");
3467                 goto out;
3468         }
3469
3470         /*
3471          * Allocate memory for UTP Task Management descriptors
3472          * UFSHCI requires 1024 byte alignment of UTMRD
3473          */
3474         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3475         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3476                                                     utmrdl_size,
3477                                                     &hba->utmrdl_dma_addr,
3478                                                     GFP_KERNEL);
3479         if (!hba->utmrdl_base_addr ||
3480             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3481                 dev_err(hba->dev,
3482                 "Task Management Descriptor Memory allocation failed\n");
3483                 goto out;
3484         }
3485
3486         /* Allocate memory for local reference block */
3487         hba->lrb = devm_kcalloc(hba->dev,
3488                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3489                                 GFP_KERNEL);
3490         if (!hba->lrb) {
3491                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3492                 goto out;
3493         }
3494         return 0;
3495 out:
3496         return -ENOMEM;
3497 }
3498
3499 /**
3500  * ufshcd_host_memory_configure - configure local reference block with
3501  *                              memory offsets
3502  * @hba: per adapter instance
3503  *
3504  * Configure Host memory space
3505  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3506  * address.
3507  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3508  * and PRDT offset.
3509  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3510  * into local reference block.
3511  */
3512 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3513 {
3514         struct utp_transfer_req_desc *utrdlp;
3515         dma_addr_t cmd_desc_dma_addr;
3516         dma_addr_t cmd_desc_element_addr;
3517         u16 response_offset;
3518         u16 prdt_offset;
3519         int cmd_desc_size;
3520         int i;
3521
3522         utrdlp = hba->utrdl_base_addr;
3523
3524         response_offset =
3525                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3526         prdt_offset =
3527                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3528
3529         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3530         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3531
3532         for (i = 0; i < hba->nutrs; i++) {
3533                 /* Configure UTRD with command descriptor base address */
3534                 cmd_desc_element_addr =
3535                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3536                 utrdlp[i].command_desc_base_addr_lo =
3537                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3538                 utrdlp[i].command_desc_base_addr_hi =
3539                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3540
3541                 /* Response upiu and prdt offset should be in double words */
3542                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3543                         utrdlp[i].response_upiu_offset =
3544                                 cpu_to_le16(response_offset);
3545                         utrdlp[i].prd_table_offset =
3546                                 cpu_to_le16(prdt_offset);
3547                         utrdlp[i].response_upiu_length =
3548                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3549                 } else {
3550                         utrdlp[i].response_upiu_offset =
3551                                 cpu_to_le16(response_offset >> 2);
3552                         utrdlp[i].prd_table_offset =
3553                                 cpu_to_le16(prdt_offset >> 2);
3554                         utrdlp[i].response_upiu_length =
3555                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3556                 }
3557
3558                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3559         }
3560 }
3561
3562 /**
3563  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3564  * @hba: per adapter instance
3565  *
3566  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3567  * in order to initialize the Unipro link startup procedure.
3568  * Once the Unipro links are up, the device connected to the controller
3569  * is detected.
3570  *
3571  * Returns 0 on success, non-zero value on failure
3572  */
3573 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3574 {
3575         struct uic_command uic_cmd = {0};
3576         int ret;
3577
3578         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3579
3580         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3581         if (ret)
3582                 dev_dbg(hba->dev,
3583                         "dme-link-startup: error code %d\n", ret);
3584         return ret;
3585 }
3586 /**
3587  * ufshcd_dme_reset - UIC command for DME_RESET
3588  * @hba: per adapter instance
3589  *
3590  * DME_RESET command is issued in order to reset UniPro stack.
3591  * This function now deals with cold reset.
3592  *
3593  * Returns 0 on success, non-zero value on failure
3594  */
3595 static int ufshcd_dme_reset(struct ufs_hba *hba)
3596 {
3597         struct uic_command uic_cmd = {0};
3598         int ret;
3599
3600         uic_cmd.command = UIC_CMD_DME_RESET;
3601
3602         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3603         if (ret)
3604                 dev_err(hba->dev,
3605                         "dme-reset: error code %d\n", ret);
3606
3607         return ret;
3608 }
3609
3610 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3611                                int agreed_gear,
3612                                int adapt_val)
3613 {
3614         int ret;
3615
3616         if (agreed_gear != UFS_HS_G4)
3617                 adapt_val = PA_NO_ADAPT;
3618
3619         ret = ufshcd_dme_set(hba,
3620                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3621                              adapt_val);
3622         return ret;
3623 }
3624 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3625
3626 /**
3627  * ufshcd_dme_enable - UIC command for DME_ENABLE
3628  * @hba: per adapter instance
3629  *
3630  * DME_ENABLE command is issued in order to enable UniPro stack.
3631  *
3632  * Returns 0 on success, non-zero value on failure
3633  */
3634 static int ufshcd_dme_enable(struct ufs_hba *hba)
3635 {
3636         struct uic_command uic_cmd = {0};
3637         int ret;
3638
3639         uic_cmd.command = UIC_CMD_DME_ENABLE;
3640
3641         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3642         if (ret)
3643                 dev_err(hba->dev,
3644                         "dme-reset: error code %d\n", ret);
3645
3646         return ret;
3647 }
3648
3649 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3650 {
3651         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3652         unsigned long min_sleep_time_us;
3653
3654         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3655                 return;
3656
3657         /*
3658          * last_dme_cmd_tstamp will be 0 only for 1st call to
3659          * this function
3660          */
3661         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3662                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3663         } else {
3664                 unsigned long delta =
3665                         (unsigned long) ktime_to_us(
3666                                 ktime_sub(ktime_get(),
3667                                 hba->last_dme_cmd_tstamp));
3668
3669                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3670                         min_sleep_time_us =
3671                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3672                 else
3673                         return; /* no more delay required */
3674         }
3675
3676         /* allow sleep for extra 50us if needed */
3677         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3678 }
3679
3680 /**
3681  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3682  * @hba: per adapter instance
3683  * @attr_sel: uic command argument1
3684  * @attr_set: attribute set type as uic command argument2
3685  * @mib_val: setting value as uic command argument3
3686  * @peer: indicate whether peer or local
3687  *
3688  * Returns 0 on success, non-zero value on failure
3689  */
3690 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3691                         u8 attr_set, u32 mib_val, u8 peer)
3692 {
3693         struct uic_command uic_cmd = {0};
3694         static const char *const action[] = {
3695                 "dme-set",
3696                 "dme-peer-set"
3697         };
3698         const char *set = action[!!peer];
3699         int ret;
3700         int retries = UFS_UIC_COMMAND_RETRIES;
3701
3702         uic_cmd.command = peer ?
3703                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3704         uic_cmd.argument1 = attr_sel;
3705         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3706         uic_cmd.argument3 = mib_val;
3707
3708         do {
3709                 /* for peer attributes we retry upon failure */
3710                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3711                 if (ret)
3712                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3713                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3714         } while (ret && peer && --retries);
3715
3716         if (ret)
3717                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3718                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3719                         UFS_UIC_COMMAND_RETRIES - retries);
3720
3721         return ret;
3722 }
3723 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3724
3725 /**
3726  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3727  * @hba: per adapter instance
3728  * @attr_sel: uic command argument1
3729  * @mib_val: the value of the attribute as returned by the UIC command
3730  * @peer: indicate whether peer or local
3731  *
3732  * Returns 0 on success, non-zero value on failure
3733  */
3734 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3735                         u32 *mib_val, u8 peer)
3736 {
3737         struct uic_command uic_cmd = {0};
3738         static const char *const action[] = {
3739                 "dme-get",
3740                 "dme-peer-get"
3741         };
3742         const char *get = action[!!peer];
3743         int ret;
3744         int retries = UFS_UIC_COMMAND_RETRIES;
3745         struct ufs_pa_layer_attr orig_pwr_info;
3746         struct ufs_pa_layer_attr temp_pwr_info;
3747         bool pwr_mode_change = false;
3748
3749         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3750                 orig_pwr_info = hba->pwr_info;
3751                 temp_pwr_info = orig_pwr_info;
3752
3753                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3754                     orig_pwr_info.pwr_rx == FAST_MODE) {
3755                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3756                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3757                         pwr_mode_change = true;
3758                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3759                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3760                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3761                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3762                         pwr_mode_change = true;
3763                 }
3764                 if (pwr_mode_change) {
3765                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3766                         if (ret)
3767                                 goto out;
3768                 }
3769         }
3770
3771         uic_cmd.command = peer ?
3772                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3773         uic_cmd.argument1 = attr_sel;
3774
3775         do {
3776                 /* for peer attributes we retry upon failure */
3777                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3778                 if (ret)
3779                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3780                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3781         } while (ret && peer && --retries);
3782
3783         if (ret)
3784                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3785                         get, UIC_GET_ATTR_ID(attr_sel),
3786                         UFS_UIC_COMMAND_RETRIES - retries);
3787
3788         if (mib_val && !ret)
3789                 *mib_val = uic_cmd.argument3;
3790
3791         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3792             && pwr_mode_change)
3793                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3794 out:
3795         return ret;
3796 }
3797 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3798
3799 /**
3800  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3801  * state) and waits for it to take effect.
3802  *
3803  * @hba: per adapter instance
3804  * @cmd: UIC command to execute
3805  *
3806  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3807  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3808  * and device UniPro link and hence it's final completion would be indicated by
3809  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3810  * addition to normal UIC command completion Status (UCCS). This function only
3811  * returns after the relevant status bits indicate the completion.
3812  *
3813  * Returns 0 on success, non-zero value on failure
3814  */
3815 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3816 {
3817         struct completion uic_async_done;
3818         unsigned long flags;
3819         u8 status;
3820         int ret;
3821         bool reenable_intr = false;
3822
3823         mutex_lock(&hba->uic_cmd_mutex);
3824         init_completion(&uic_async_done);
3825         ufshcd_add_delay_before_dme_cmd(hba);
3826
3827         spin_lock_irqsave(hba->host->host_lock, flags);
3828         if (ufshcd_is_link_broken(hba)) {
3829                 ret = -ENOLINK;
3830                 goto out_unlock;
3831         }
3832         hba->uic_async_done = &uic_async_done;
3833         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3834                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3835                 /*
3836                  * Make sure UIC command completion interrupt is disabled before
3837                  * issuing UIC command.
3838                  */
3839                 wmb();
3840                 reenable_intr = true;
3841         }
3842         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3843         spin_unlock_irqrestore(hba->host->host_lock, flags);
3844         if (ret) {
3845                 dev_err(hba->dev,
3846                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3847                         cmd->command, cmd->argument3, ret);
3848                 goto out;
3849         }
3850
3851         if (!wait_for_completion_timeout(hba->uic_async_done,
3852                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3853                 dev_err(hba->dev,
3854                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3855                         cmd->command, cmd->argument3);
3856                 ret = -ETIMEDOUT;
3857                 goto out;
3858         }
3859
3860         status = ufshcd_get_upmcrs(hba);
3861         if (status != PWR_LOCAL) {
3862                 dev_err(hba->dev,
3863                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3864                         cmd->command, status);
3865                 ret = (status != PWR_OK) ? status : -1;
3866         }
3867 out:
3868         if (ret) {
3869                 ufshcd_print_host_state(hba);
3870                 ufshcd_print_pwr_info(hba);
3871                 ufshcd_print_evt_hist(hba);
3872         }
3873
3874         spin_lock_irqsave(hba->host->host_lock, flags);
3875         hba->active_uic_cmd = NULL;
3876         hba->uic_async_done = NULL;
3877         if (reenable_intr)
3878                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3879         if (ret) {
3880                 ufshcd_set_link_broken(hba);
3881                 ufshcd_schedule_eh_work(hba);
3882         }
3883 out_unlock:
3884         spin_unlock_irqrestore(hba->host->host_lock, flags);
3885         mutex_unlock(&hba->uic_cmd_mutex);
3886
3887         return ret;
3888 }
3889
3890 /**
3891  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3892  *                              using DME_SET primitives.
3893  * @hba: per adapter instance
3894  * @mode: powr mode value
3895  *
3896  * Returns 0 on success, non-zero value on failure
3897  */
3898 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3899 {
3900         struct uic_command uic_cmd = {0};
3901         int ret;
3902
3903         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3904                 ret = ufshcd_dme_set(hba,
3905                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3906                 if (ret) {
3907                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3908                                                 __func__, ret);
3909                         goto out;
3910                 }
3911         }
3912
3913         uic_cmd.command = UIC_CMD_DME_SET;
3914         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3915         uic_cmd.argument3 = mode;
3916         ufshcd_hold(hba, false);
3917         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3918         ufshcd_release(hba);
3919
3920 out:
3921         return ret;
3922 }
3923
3924 int ufshcd_link_recovery(struct ufs_hba *hba)
3925 {
3926         int ret;
3927         unsigned long flags;
3928
3929         spin_lock_irqsave(hba->host->host_lock, flags);
3930         hba->ufshcd_state = UFSHCD_STATE_RESET;
3931         ufshcd_set_eh_in_progress(hba);
3932         spin_unlock_irqrestore(hba->host->host_lock, flags);
3933
3934         /* Reset the attached device */
3935         ufshcd_vops_device_reset(hba);
3936
3937         ret = ufshcd_host_reset_and_restore(hba);
3938
3939         spin_lock_irqsave(hba->host->host_lock, flags);
3940         if (ret)
3941                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3942         ufshcd_clear_eh_in_progress(hba);
3943         spin_unlock_irqrestore(hba->host->host_lock, flags);
3944
3945         if (ret)
3946                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3947                         __func__, ret);
3948
3949         return ret;
3950 }
3951 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
3952
3953 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3954 {
3955         int ret;
3956         struct uic_command uic_cmd = {0};
3957         ktime_t start = ktime_get();
3958
3959         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3960
3961         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3962         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3963         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3964                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3965
3966         if (ret)
3967                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3968                         __func__, ret);
3969         else
3970                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3971                                                                 POST_CHANGE);
3972
3973         return ret;
3974 }
3975
3976 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3977 {
3978         struct uic_command uic_cmd = {0};
3979         int ret;
3980         ktime_t start = ktime_get();
3981
3982         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3983
3984         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3985         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3986         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3987                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3988
3989         if (ret) {
3990                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3991                         __func__, ret);
3992         } else {
3993                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3994                                                                 POST_CHANGE);
3995                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3996                 hba->ufs_stats.hibern8_exit_cnt++;
3997         }
3998
3999         return ret;
4000 }
4001 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4002
4003 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4004 {
4005         unsigned long flags;
4006         bool update = false;
4007
4008         if (!ufshcd_is_auto_hibern8_supported(hba))
4009                 return;
4010
4011         spin_lock_irqsave(hba->host->host_lock, flags);
4012         if (hba->ahit != ahit) {
4013                 hba->ahit = ahit;
4014                 update = true;
4015         }
4016         spin_unlock_irqrestore(hba->host->host_lock, flags);
4017
4018         if (update && !pm_runtime_suspended(hba->dev)) {
4019                 pm_runtime_get_sync(hba->dev);
4020                 ufshcd_hold(hba, false);
4021                 ufshcd_auto_hibern8_enable(hba);
4022                 ufshcd_release(hba);
4023                 pm_runtime_put(hba->dev);
4024         }
4025 }
4026 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4027
4028 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4029 {
4030         unsigned long flags;
4031
4032         if (!ufshcd_is_auto_hibern8_supported(hba))
4033                 return;
4034
4035         spin_lock_irqsave(hba->host->host_lock, flags);
4036         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4037         spin_unlock_irqrestore(hba->host->host_lock, flags);
4038 }
4039
4040  /**
4041  * ufshcd_init_pwr_info - setting the POR (power on reset)
4042  * values in hba power info
4043  * @hba: per-adapter instance
4044  */
4045 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4046 {
4047         hba->pwr_info.gear_rx = UFS_PWM_G1;
4048         hba->pwr_info.gear_tx = UFS_PWM_G1;
4049         hba->pwr_info.lane_rx = 1;
4050         hba->pwr_info.lane_tx = 1;
4051         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4052         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4053         hba->pwr_info.hs_rate = 0;
4054 }
4055
4056 /**
4057  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4058  * @hba: per-adapter instance
4059  */
4060 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4061 {
4062         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4063
4064         if (hba->max_pwr_info.is_valid)
4065                 return 0;
4066
4067         pwr_info->pwr_tx = FAST_MODE;
4068         pwr_info->pwr_rx = FAST_MODE;
4069         pwr_info->hs_rate = PA_HS_MODE_B;
4070
4071         /* Get the connected lane count */
4072         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4073                         &pwr_info->lane_rx);
4074         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4075                         &pwr_info->lane_tx);
4076
4077         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4078                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4079                                 __func__,
4080                                 pwr_info->lane_rx,
4081                                 pwr_info->lane_tx);
4082                 return -EINVAL;
4083         }
4084
4085         /*
4086          * First, get the maximum gears of HS speed.
4087          * If a zero value, it means there is no HSGEAR capability.
4088          * Then, get the maximum gears of PWM speed.
4089          */
4090         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4091         if (!pwr_info->gear_rx) {
4092                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4093                                 &pwr_info->gear_rx);
4094                 if (!pwr_info->gear_rx) {
4095                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4096                                 __func__, pwr_info->gear_rx);
4097                         return -EINVAL;
4098                 }
4099                 pwr_info->pwr_rx = SLOW_MODE;
4100         }
4101
4102         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4103                         &pwr_info->gear_tx);
4104         if (!pwr_info->gear_tx) {
4105                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4106                                 &pwr_info->gear_tx);
4107                 if (!pwr_info->gear_tx) {
4108                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4109                                 __func__, pwr_info->gear_tx);
4110                         return -EINVAL;
4111                 }
4112                 pwr_info->pwr_tx = SLOW_MODE;
4113         }
4114
4115         hba->max_pwr_info.is_valid = true;
4116         return 0;
4117 }
4118
4119 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4120                              struct ufs_pa_layer_attr *pwr_mode)
4121 {
4122         int ret;
4123
4124         /* if already configured to the requested pwr_mode */
4125         if (!hba->force_pmc &&
4126             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4127             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4128             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4129             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4130             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4131             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4132             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4133                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4134                 return 0;
4135         }
4136
4137         /*
4138          * Configure attributes for power mode change with below.
4139          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4140          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4141          * - PA_HSSERIES
4142          */
4143         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4144         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4145                         pwr_mode->lane_rx);
4146         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4147                         pwr_mode->pwr_rx == FAST_MODE)
4148                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4149         else
4150                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4151
4152         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4153         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4154                         pwr_mode->lane_tx);
4155         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4156                         pwr_mode->pwr_tx == FAST_MODE)
4157                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4158         else
4159                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4160
4161         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4162             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4163             pwr_mode->pwr_rx == FAST_MODE ||
4164             pwr_mode->pwr_tx == FAST_MODE)
4165                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4166                                                 pwr_mode->hs_rate);
4167
4168         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4169                         DL_FC0ProtectionTimeOutVal_Default);
4170         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4171                         DL_TC0ReplayTimeOutVal_Default);
4172         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4173                         DL_AFC0ReqTimeOutVal_Default);
4174         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4175                         DL_FC1ProtectionTimeOutVal_Default);
4176         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4177                         DL_TC1ReplayTimeOutVal_Default);
4178         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4179                         DL_AFC1ReqTimeOutVal_Default);
4180
4181         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4182                         DL_FC0ProtectionTimeOutVal_Default);
4183         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4184                         DL_TC0ReplayTimeOutVal_Default);
4185         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4186                         DL_AFC0ReqTimeOutVal_Default);
4187
4188         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4189                         | pwr_mode->pwr_tx);
4190
4191         if (ret) {
4192                 dev_err(hba->dev,
4193                         "%s: power mode change failed %d\n", __func__, ret);
4194         } else {
4195                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4196                                                                 pwr_mode);
4197
4198                 memcpy(&hba->pwr_info, pwr_mode,
4199                         sizeof(struct ufs_pa_layer_attr));
4200         }
4201
4202         return ret;
4203 }
4204
4205 /**
4206  * ufshcd_config_pwr_mode - configure a new power mode
4207  * @hba: per-adapter instance
4208  * @desired_pwr_mode: desired power configuration
4209  */
4210 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4211                 struct ufs_pa_layer_attr *desired_pwr_mode)
4212 {
4213         struct ufs_pa_layer_attr final_params = { 0 };
4214         int ret;
4215
4216         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4217                                         desired_pwr_mode, &final_params);
4218
4219         if (ret)
4220                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4221
4222         ret = ufshcd_change_power_mode(hba, &final_params);
4223
4224         return ret;
4225 }
4226 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4227
4228 /**
4229  * ufshcd_complete_dev_init() - checks device readiness
4230  * @hba: per-adapter instance
4231  *
4232  * Set fDeviceInit flag and poll until device toggles it.
4233  */
4234 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4235 {
4236         int err;
4237         bool flag_res = true;
4238         ktime_t timeout;
4239
4240         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4241                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4242         if (err) {
4243                 dev_err(hba->dev,
4244                         "%s setting fDeviceInit flag failed with error %d\n",
4245                         __func__, err);
4246                 goto out;
4247         }
4248
4249         /* Poll fDeviceInit flag to be cleared */
4250         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4251         do {
4252                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4253                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4254                 if (!flag_res)
4255                         break;
4256                 usleep_range(5000, 10000);
4257         } while (ktime_before(ktime_get(), timeout));
4258
4259         if (err) {
4260                 dev_err(hba->dev,
4261                                 "%s reading fDeviceInit flag failed with error %d\n",
4262                                 __func__, err);
4263         } else if (flag_res) {
4264                 dev_err(hba->dev,
4265                                 "%s fDeviceInit was not cleared by the device\n",
4266                                 __func__);
4267                 err = -EBUSY;
4268         }
4269 out:
4270         return err;
4271 }
4272
4273 /**
4274  * ufshcd_make_hba_operational - Make UFS controller operational
4275  * @hba: per adapter instance
4276  *
4277  * To bring UFS host controller to operational state,
4278  * 1. Enable required interrupts
4279  * 2. Configure interrupt aggregation
4280  * 3. Program UTRL and UTMRL base address
4281  * 4. Configure run-stop-registers
4282  *
4283  * Returns 0 on success, non-zero value on failure
4284  */
4285 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4286 {
4287         int err = 0;
4288         u32 reg;
4289
4290         /* Enable required interrupts */
4291         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4292
4293         /* Configure interrupt aggregation */
4294         if (ufshcd_is_intr_aggr_allowed(hba))
4295                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4296         else
4297                 ufshcd_disable_intr_aggr(hba);
4298
4299         /* Configure UTRL and UTMRL base address registers */
4300         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4301                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4302         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4303                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4304         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4305                         REG_UTP_TASK_REQ_LIST_BASE_L);
4306         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4307                         REG_UTP_TASK_REQ_LIST_BASE_H);
4308
4309         /*
4310          * Make sure base address and interrupt setup are updated before
4311          * enabling the run/stop registers below.
4312          */
4313         wmb();
4314
4315         /*
4316          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4317          */
4318         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4319         if (!(ufshcd_get_lists_status(reg))) {
4320                 ufshcd_enable_run_stop_reg(hba);
4321         } else {
4322                 dev_err(hba->dev,
4323                         "Host controller not ready to process requests");
4324                 err = -EIO;
4325         }
4326
4327         return err;
4328 }
4329 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4330
4331 /**
4332  * ufshcd_hba_stop - Send controller to reset state
4333  * @hba: per adapter instance
4334  */
4335 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4336 {
4337         unsigned long flags;
4338         int err;
4339
4340         /*
4341          * Obtain the host lock to prevent that the controller is disabled
4342          * while the UFS interrupt handler is active on another CPU.
4343          */
4344         spin_lock_irqsave(hba->host->host_lock, flags);
4345         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4346         spin_unlock_irqrestore(hba->host->host_lock, flags);
4347
4348         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4349                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4350                                         10, 1);
4351         if (err)
4352                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4353 }
4354
4355 /**
4356  * ufshcd_hba_execute_hce - initialize the controller
4357  * @hba: per adapter instance
4358  *
4359  * The controller resets itself and controller firmware initialization
4360  * sequence kicks off. When controller is ready it will set
4361  * the Host Controller Enable bit to 1.
4362  *
4363  * Returns 0 on success, non-zero value on failure
4364  */
4365 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4366 {
4367         int retry_outer = 3;
4368         int retry_inner;
4369
4370 start:
4371         if (!ufshcd_is_hba_active(hba))
4372                 /* change controller state to "reset state" */
4373                 ufshcd_hba_stop(hba);
4374
4375         /* UniPro link is disabled at this point */
4376         ufshcd_set_link_off(hba);
4377
4378         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4379
4380         /* start controller initialization sequence */
4381         ufshcd_hba_start(hba);
4382
4383         /*
4384          * To initialize a UFS host controller HCE bit must be set to 1.
4385          * During initialization the HCE bit value changes from 1->0->1.
4386          * When the host controller completes initialization sequence
4387          * it sets the value of HCE bit to 1. The same HCE bit is read back
4388          * to check if the controller has completed initialization sequence.
4389          * So without this delay the value HCE = 1, set in the previous
4390          * instruction might be read back.
4391          * This delay can be changed based on the controller.
4392          */
4393         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4394
4395         /* wait for the host controller to complete initialization */
4396         retry_inner = 50;
4397         while (ufshcd_is_hba_active(hba)) {
4398                 if (retry_inner) {
4399                         retry_inner--;
4400                 } else {
4401                         dev_err(hba->dev,
4402                                 "Controller enable failed\n");
4403                         if (retry_outer) {
4404                                 retry_outer--;
4405                                 goto start;
4406                         }
4407                         return -EIO;
4408                 }
4409                 usleep_range(1000, 1100);
4410         }
4411
4412         /* enable UIC related interrupts */
4413         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4414
4415         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4416
4417         return 0;
4418 }
4419
4420 int ufshcd_hba_enable(struct ufs_hba *hba)
4421 {
4422         int ret;
4423
4424         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4425                 ufshcd_set_link_off(hba);
4426                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4427
4428                 /* enable UIC related interrupts */
4429                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4430                 ret = ufshcd_dme_reset(hba);
4431                 if (!ret) {
4432                         ret = ufshcd_dme_enable(hba);
4433                         if (!ret)
4434                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4435                         if (ret)
4436                                 dev_err(hba->dev,
4437                                         "Host controller enable failed with non-hce\n");
4438                 }
4439         } else {
4440                 ret = ufshcd_hba_execute_hce(hba);
4441         }
4442
4443         return ret;
4444 }
4445 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4446
4447 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4448 {
4449         int tx_lanes = 0, i, err = 0;
4450
4451         if (!peer)
4452                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4453                                &tx_lanes);
4454         else
4455                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4456                                     &tx_lanes);
4457         for (i = 0; i < tx_lanes; i++) {
4458                 if (!peer)
4459                         err = ufshcd_dme_set(hba,
4460                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4461                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4462                                         0);
4463                 else
4464                         err = ufshcd_dme_peer_set(hba,
4465                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4466                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4467                                         0);
4468                 if (err) {
4469                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4470                                 __func__, peer, i, err);
4471                         break;
4472                 }
4473         }
4474
4475         return err;
4476 }
4477
4478 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4479 {
4480         return ufshcd_disable_tx_lcc(hba, true);
4481 }
4482
4483 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4484 {
4485         struct ufs_event_hist *e;
4486
4487         if (id >= UFS_EVT_CNT)
4488                 return;
4489
4490         e = &hba->ufs_stats.event[id];
4491         e->val[e->pos] = val;
4492         e->tstamp[e->pos] = ktime_get();
4493         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4494
4495         ufshcd_vops_event_notify(hba, id, &val);
4496 }
4497 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4498
4499 /**
4500  * ufshcd_link_startup - Initialize unipro link startup
4501  * @hba: per adapter instance
4502  *
4503  * Returns 0 for success, non-zero in case of failure
4504  */
4505 static int ufshcd_link_startup(struct ufs_hba *hba)
4506 {
4507         int ret;
4508         int retries = DME_LINKSTARTUP_RETRIES;
4509         bool link_startup_again = false;
4510
4511         /*
4512          * If UFS device isn't active then we will have to issue link startup
4513          * 2 times to make sure the device state move to active.
4514          */
4515         if (!ufshcd_is_ufs_dev_active(hba))
4516                 link_startup_again = true;
4517
4518 link_startup:
4519         do {
4520                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4521
4522                 ret = ufshcd_dme_link_startup(hba);
4523
4524                 /* check if device is detected by inter-connect layer */
4525                 if (!ret && !ufshcd_is_device_present(hba)) {
4526                         ufshcd_update_evt_hist(hba,
4527                                                UFS_EVT_LINK_STARTUP_FAIL,
4528                                                0);
4529                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4530                         ret = -ENXIO;
4531                         goto out;
4532                 }
4533
4534                 /*
4535                  * DME link lost indication is only received when link is up,
4536                  * but we can't be sure if the link is up until link startup
4537                  * succeeds. So reset the local Uni-Pro and try again.
4538                  */
4539                 if (ret && ufshcd_hba_enable(hba)) {
4540                         ufshcd_update_evt_hist(hba,
4541                                                UFS_EVT_LINK_STARTUP_FAIL,
4542                                                (u32)ret);
4543                         goto out;
4544                 }
4545         } while (ret && retries--);
4546
4547         if (ret) {
4548                 /* failed to get the link up... retire */
4549                 ufshcd_update_evt_hist(hba,
4550                                        UFS_EVT_LINK_STARTUP_FAIL,
4551                                        (u32)ret);
4552                 goto out;
4553         }
4554
4555         if (link_startup_again) {
4556                 link_startup_again = false;
4557                 retries = DME_LINKSTARTUP_RETRIES;
4558                 goto link_startup;
4559         }
4560
4561         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4562         ufshcd_init_pwr_info(hba);
4563         ufshcd_print_pwr_info(hba);
4564
4565         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4566                 ret = ufshcd_disable_device_tx_lcc(hba);
4567                 if (ret)
4568                         goto out;
4569         }
4570
4571         /* Include any host controller configuration via UIC commands */
4572         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4573         if (ret)
4574                 goto out;
4575
4576         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4577         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4578         ret = ufshcd_make_hba_operational(hba);
4579 out:
4580         if (ret) {
4581                 dev_err(hba->dev, "link startup failed %d\n", ret);
4582                 ufshcd_print_host_state(hba);
4583                 ufshcd_print_pwr_info(hba);
4584                 ufshcd_print_evt_hist(hba);
4585         }
4586         return ret;
4587 }
4588
4589 /**
4590  * ufshcd_verify_dev_init() - Verify device initialization
4591  * @hba: per-adapter instance
4592  *
4593  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4594  * device Transport Protocol (UTP) layer is ready after a reset.
4595  * If the UTP layer at the device side is not initialized, it may
4596  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4597  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4598  */
4599 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4600 {
4601         int err = 0;
4602         int retries;
4603
4604         ufshcd_hold(hba, false);
4605         mutex_lock(&hba->dev_cmd.lock);
4606         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4607                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4608                                                NOP_OUT_TIMEOUT);
4609
4610                 if (!err || err == -ETIMEDOUT)
4611                         break;
4612
4613                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4614         }
4615         mutex_unlock(&hba->dev_cmd.lock);
4616         ufshcd_release(hba);
4617
4618         if (err)
4619                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4620         return err;
4621 }
4622
4623 /**
4624  * ufshcd_set_queue_depth - set lun queue depth
4625  * @sdev: pointer to SCSI device
4626  *
4627  * Read bLUQueueDepth value and activate scsi tagged command
4628  * queueing. For WLUN, queue depth is set to 1. For best-effort
4629  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4630  * value that host can queue.
4631  */
4632 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4633 {
4634         int ret = 0;
4635         u8 lun_qdepth;
4636         struct ufs_hba *hba;
4637
4638         hba = shost_priv(sdev->host);
4639
4640         lun_qdepth = hba->nutrs;
4641         ret = ufshcd_read_unit_desc_param(hba,
4642                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4643                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4644                                           &lun_qdepth,
4645                                           sizeof(lun_qdepth));
4646
4647         /* Some WLUN doesn't support unit descriptor */
4648         if (ret == -EOPNOTSUPP)
4649                 lun_qdepth = 1;
4650         else if (!lun_qdepth)
4651                 /* eventually, we can figure out the real queue depth */
4652                 lun_qdepth = hba->nutrs;
4653         else
4654                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4655
4656         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4657                         __func__, lun_qdepth);
4658         scsi_change_queue_depth(sdev, lun_qdepth);
4659 }
4660
4661 /*
4662  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4663  * @hba: per-adapter instance
4664  * @lun: UFS device lun id
4665  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4666  *
4667  * Returns 0 in case of success and b_lu_write_protect status would be returned
4668  * @b_lu_write_protect parameter.
4669  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4670  * Returns -EINVAL in case of invalid parameters passed to this function.
4671  */
4672 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4673                             u8 lun,
4674                             u8 *b_lu_write_protect)
4675 {
4676         int ret;
4677
4678         if (!b_lu_write_protect)
4679                 ret = -EINVAL;
4680         /*
4681          * According to UFS device spec, RPMB LU can't be write
4682          * protected so skip reading bLUWriteProtect parameter for
4683          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4684          */
4685         else if (lun >= hba->dev_info.max_lu_supported)
4686                 ret = -ENOTSUPP;
4687         else
4688                 ret = ufshcd_read_unit_desc_param(hba,
4689                                           lun,
4690                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4691                                           b_lu_write_protect,
4692                                           sizeof(*b_lu_write_protect));
4693         return ret;
4694 }
4695
4696 /**
4697  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4698  * status
4699  * @hba: per-adapter instance
4700  * @sdev: pointer to SCSI device
4701  *
4702  */
4703 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4704                                                     struct scsi_device *sdev)
4705 {
4706         if (hba->dev_info.f_power_on_wp_en &&
4707             !hba->dev_info.is_lu_power_on_wp) {
4708                 u8 b_lu_write_protect;
4709
4710                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4711                                       &b_lu_write_protect) &&
4712                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4713                         hba->dev_info.is_lu_power_on_wp = true;
4714         }
4715 }
4716
4717 /**
4718  * ufshcd_slave_alloc - handle initial SCSI device configurations
4719  * @sdev: pointer to SCSI device
4720  *
4721  * Returns success
4722  */
4723 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4724 {
4725         struct ufs_hba *hba;
4726
4727         hba = shost_priv(sdev->host);
4728
4729         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4730         sdev->use_10_for_ms = 1;
4731
4732         /* DBD field should be set to 1 in mode sense(10) */
4733         sdev->set_dbd_for_ms = 1;
4734
4735         /* allow SCSI layer to restart the device in case of errors */
4736         sdev->allow_restart = 1;
4737
4738         /* REPORT SUPPORTED OPERATION CODES is not supported */
4739         sdev->no_report_opcodes = 1;
4740
4741         /* WRITE_SAME command is not supported */
4742         sdev->no_write_same = 1;
4743
4744         ufshcd_set_queue_depth(sdev);
4745
4746         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4747
4748         return 0;
4749 }
4750
4751 /**
4752  * ufshcd_change_queue_depth - change queue depth
4753  * @sdev: pointer to SCSI device
4754  * @depth: required depth to set
4755  *
4756  * Change queue depth and make sure the max. limits are not crossed.
4757  */
4758 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4759 {
4760         struct ufs_hba *hba = shost_priv(sdev->host);
4761
4762         if (depth > hba->nutrs)
4763                 depth = hba->nutrs;
4764         return scsi_change_queue_depth(sdev, depth);
4765 }
4766
4767 /**
4768  * ufshcd_slave_configure - adjust SCSI device configurations
4769  * @sdev: pointer to SCSI device
4770  */
4771 static int ufshcd_slave_configure(struct scsi_device *sdev)
4772 {
4773         struct ufs_hba *hba = shost_priv(sdev->host);
4774         struct request_queue *q = sdev->request_queue;
4775
4776         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4777
4778         if (ufshcd_is_rpm_autosuspend_allowed(hba))
4779                 sdev->rpm_autosuspend = 1;
4780
4781         ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4782
4783         return 0;
4784 }
4785
4786 /**
4787  * ufshcd_slave_destroy - remove SCSI device configurations
4788  * @sdev: pointer to SCSI device
4789  */
4790 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4791 {
4792         struct ufs_hba *hba;
4793
4794         hba = shost_priv(sdev->host);
4795         /* Drop the reference as it won't be needed anymore */
4796         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4797                 unsigned long flags;
4798
4799                 spin_lock_irqsave(hba->host->host_lock, flags);
4800                 hba->sdev_ufs_device = NULL;
4801                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4802         }
4803 }
4804
4805 /**
4806  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4807  * @lrbp: pointer to local reference block of completed command
4808  * @scsi_status: SCSI command status
4809  *
4810  * Returns value base on SCSI command status
4811  */
4812 static inline int
4813 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4814 {
4815         int result = 0;
4816
4817         switch (scsi_status) {
4818         case SAM_STAT_CHECK_CONDITION:
4819                 ufshcd_copy_sense_data(lrbp);
4820                 fallthrough;
4821         case SAM_STAT_GOOD:
4822                 result |= DID_OK << 16 |
4823                           COMMAND_COMPLETE << 8 |
4824                           scsi_status;
4825                 break;
4826         case SAM_STAT_TASK_SET_FULL:
4827         case SAM_STAT_BUSY:
4828         case SAM_STAT_TASK_ABORTED:
4829                 ufshcd_copy_sense_data(lrbp);
4830                 result |= scsi_status;
4831                 break;
4832         default:
4833                 result |= DID_ERROR << 16;
4834                 break;
4835         } /* end of switch */
4836
4837         return result;
4838 }
4839
4840 /**
4841  * ufshcd_transfer_rsp_status - Get overall status of the response
4842  * @hba: per adapter instance
4843  * @lrbp: pointer to local reference block of completed command
4844  *
4845  * Returns result of the command to notify SCSI midlayer
4846  */
4847 static inline int
4848 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4849 {
4850         int result = 0;
4851         int scsi_status;
4852         int ocs;
4853
4854         /* overall command status of utrd */
4855         ocs = ufshcd_get_tr_ocs(lrbp);
4856
4857         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4858                 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
4859                                         MASK_RSP_UPIU_RESULT)
4860                         ocs = OCS_SUCCESS;
4861         }
4862
4863         switch (ocs) {
4864         case OCS_SUCCESS:
4865                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4866                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4867                 switch (result) {
4868                 case UPIU_TRANSACTION_RESPONSE:
4869                         /*
4870                          * get the response UPIU result to extract
4871                          * the SCSI command status
4872                          */
4873                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4874
4875                         /*
4876                          * get the result based on SCSI status response
4877                          * to notify the SCSI midlayer of the command status
4878                          */
4879                         scsi_status = result & MASK_SCSI_STATUS;
4880                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4881
4882                         /*
4883                          * Currently we are only supporting BKOPs exception
4884                          * events hence we can ignore BKOPs exception event
4885                          * during power management callbacks. BKOPs exception
4886                          * event is not expected to be raised in runtime suspend
4887                          * callback as it allows the urgent bkops.
4888                          * During system suspend, we are anyway forcefully
4889                          * disabling the bkops and if urgent bkops is needed
4890                          * it will be enabled on system resume. Long term
4891                          * solution could be to abort the system suspend if
4892                          * UFS device needs urgent BKOPs.
4893                          */
4894                         if (!hba->pm_op_in_progress &&
4895                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4896                             schedule_work(&hba->eeh_work)) {
4897                                 /*
4898                                  * Prevent suspend once eeh_work is scheduled
4899                                  * to avoid deadlock between ufshcd_suspend
4900                                  * and exception event handler.
4901                                  */
4902                                 pm_runtime_get_noresume(hba->dev);
4903                         }
4904                         break;
4905                 case UPIU_TRANSACTION_REJECT_UPIU:
4906                         /* TODO: handle Reject UPIU Response */
4907                         result = DID_ERROR << 16;
4908                         dev_err(hba->dev,
4909                                 "Reject UPIU not fully implemented\n");
4910                         break;
4911                 default:
4912                         dev_err(hba->dev,
4913                                 "Unexpected request response code = %x\n",
4914                                 result);
4915                         result = DID_ERROR << 16;
4916                         break;
4917                 }
4918                 break;
4919         case OCS_ABORTED:
4920                 result |= DID_ABORT << 16;
4921                 break;
4922         case OCS_INVALID_COMMAND_STATUS:
4923                 result |= DID_REQUEUE << 16;
4924                 break;
4925         case OCS_INVALID_CMD_TABLE_ATTR:
4926         case OCS_INVALID_PRDT_ATTR:
4927         case OCS_MISMATCH_DATA_BUF_SIZE:
4928         case OCS_MISMATCH_RESP_UPIU_SIZE:
4929         case OCS_PEER_COMM_FAILURE:
4930         case OCS_FATAL_ERROR:
4931         case OCS_DEVICE_FATAL_ERROR:
4932         case OCS_INVALID_CRYPTO_CONFIG:
4933         case OCS_GENERAL_CRYPTO_ERROR:
4934         default:
4935                 result |= DID_ERROR << 16;
4936                 dev_err(hba->dev,
4937                                 "OCS error from controller = %x for tag %d\n",
4938                                 ocs, lrbp->task_tag);
4939                 ufshcd_print_evt_hist(hba);
4940                 ufshcd_print_host_state(hba);
4941                 break;
4942         } /* end of switch */
4943
4944         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4945                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4946         return result;
4947 }
4948
4949 /**
4950  * ufshcd_uic_cmd_compl - handle completion of uic command
4951  * @hba: per adapter instance
4952  * @intr_status: interrupt status generated by the controller
4953  *
4954  * Returns
4955  *  IRQ_HANDLED - If interrupt is valid
4956  *  IRQ_NONE    - If invalid interrupt
4957  */
4958 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4959 {
4960         irqreturn_t retval = IRQ_NONE;
4961
4962         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4963                 hba->active_uic_cmd->argument2 |=
4964                         ufshcd_get_uic_cmd_result(hba);
4965                 hba->active_uic_cmd->argument3 =
4966                         ufshcd_get_dme_attr_val(hba);
4967                 complete(&hba->active_uic_cmd->done);
4968                 retval = IRQ_HANDLED;
4969         }
4970
4971         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
4972                 complete(hba->uic_async_done);
4973                 retval = IRQ_HANDLED;
4974         }
4975
4976         if (retval == IRQ_HANDLED)
4977                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
4978                                              "complete");
4979         return retval;
4980 }
4981
4982 /**
4983  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4984  * @hba: per adapter instance
4985  * @completed_reqs: requests to complete
4986  */
4987 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4988                                         unsigned long completed_reqs)
4989 {
4990         struct ufshcd_lrb *lrbp;
4991         struct scsi_cmnd *cmd;
4992         int result;
4993         int index;
4994         bool update_scaling = false;
4995
4996         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4997                 lrbp = &hba->lrb[index];
4998                 lrbp->in_use = false;
4999                 lrbp->compl_time_stamp = ktime_get();
5000                 cmd = lrbp->cmd;
5001                 if (cmd) {
5002                         ufshcd_add_command_trace(hba, index, "complete");
5003                         result = ufshcd_transfer_rsp_status(hba, lrbp);
5004                         scsi_dma_unmap(cmd);
5005                         cmd->result = result;
5006                         /* Mark completed command as NULL in LRB */
5007                         lrbp->cmd = NULL;
5008                         /* Do not touch lrbp after scsi done */
5009                         cmd->scsi_done(cmd);
5010                         __ufshcd_release(hba);
5011                         update_scaling = true;
5012                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5013                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5014                         if (hba->dev_cmd.complete) {
5015                                 ufshcd_add_command_trace(hba, index,
5016                                                 "dev_complete");
5017                                 complete(hba->dev_cmd.complete);
5018                                 update_scaling = true;
5019                         }
5020                 }
5021                 if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
5022                         hba->clk_scaling.active_reqs--;
5023         }
5024
5025         /* clear corresponding bits of completed commands */
5026         hba->outstanding_reqs ^= completed_reqs;
5027
5028         ufshcd_clk_scaling_update_busy(hba);
5029 }
5030
5031 /**
5032  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5033  * @hba: per adapter instance
5034  *
5035  * Returns
5036  *  IRQ_HANDLED - If interrupt is valid
5037  *  IRQ_NONE    - If invalid interrupt
5038  */
5039 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5040 {
5041         unsigned long completed_reqs;
5042         u32 tr_doorbell;
5043
5044         /* Resetting interrupt aggregation counters first and reading the
5045          * DOOR_BELL afterward allows us to handle all the completed requests.
5046          * In order to prevent other interrupts starvation the DB is read once
5047          * after reset. The down side of this solution is the possibility of
5048          * false interrupt if device completes another request after resetting
5049          * aggregation and before reading the DB.
5050          */
5051         if (ufshcd_is_intr_aggr_allowed(hba) &&
5052             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5053                 ufshcd_reset_intr_aggr(hba);
5054
5055         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5056         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5057
5058         if (completed_reqs) {
5059                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5060                 return IRQ_HANDLED;
5061         } else {
5062                 return IRQ_NONE;
5063         }
5064 }
5065
5066 /**
5067  * ufshcd_disable_ee - disable exception event
5068  * @hba: per-adapter instance
5069  * @mask: exception event to disable
5070  *
5071  * Disables exception event in the device so that the EVENT_ALERT
5072  * bit is not set.
5073  *
5074  * Returns zero on success, non-zero error value on failure.
5075  */
5076 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5077 {
5078         int err = 0;
5079         u32 val;
5080
5081         if (!(hba->ee_ctrl_mask & mask))
5082                 goto out;
5083
5084         val = hba->ee_ctrl_mask & ~mask;
5085         val &= MASK_EE_STATUS;
5086         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5087                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5088         if (!err)
5089                 hba->ee_ctrl_mask &= ~mask;
5090 out:
5091         return err;
5092 }
5093
5094 /**
5095  * ufshcd_enable_ee - enable exception event
5096  * @hba: per-adapter instance
5097  * @mask: exception event to enable
5098  *
5099  * Enable corresponding exception event in the device to allow
5100  * device to alert host in critical scenarios.
5101  *
5102  * Returns zero on success, non-zero error value on failure.
5103  */
5104 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5105 {
5106         int err = 0;
5107         u32 val;
5108
5109         if (hba->ee_ctrl_mask & mask)
5110                 goto out;
5111
5112         val = hba->ee_ctrl_mask | mask;
5113         val &= MASK_EE_STATUS;
5114         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5115                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5116         if (!err)
5117                 hba->ee_ctrl_mask |= mask;
5118 out:
5119         return err;
5120 }
5121
5122 /**
5123  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5124  * @hba: per-adapter instance
5125  *
5126  * Allow device to manage background operations on its own. Enabling
5127  * this might lead to inconsistent latencies during normal data transfers
5128  * as the device is allowed to manage its own way of handling background
5129  * operations.
5130  *
5131  * Returns zero on success, non-zero on failure.
5132  */
5133 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5134 {
5135         int err = 0;
5136
5137         if (hba->auto_bkops_enabled)
5138                 goto out;
5139
5140         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5141                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5142         if (err) {
5143                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5144                                 __func__, err);
5145                 goto out;
5146         }
5147
5148         hba->auto_bkops_enabled = true;
5149         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5150
5151         /* No need of URGENT_BKOPS exception from the device */
5152         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5153         if (err)
5154                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5155                                 __func__, err);
5156 out:
5157         return err;
5158 }
5159
5160 /**
5161  * ufshcd_disable_auto_bkops - block device in doing background operations
5162  * @hba: per-adapter instance
5163  *
5164  * Disabling background operations improves command response latency but
5165  * has drawback of device moving into critical state where the device is
5166  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5167  * host is idle so that BKOPS are managed effectively without any negative
5168  * impacts.
5169  *
5170  * Returns zero on success, non-zero on failure.
5171  */
5172 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5173 {
5174         int err = 0;
5175
5176         if (!hba->auto_bkops_enabled)
5177                 goto out;
5178
5179         /*
5180          * If host assisted BKOPs is to be enabled, make sure
5181          * urgent bkops exception is allowed.
5182          */
5183         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5184         if (err) {
5185                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5186                                 __func__, err);
5187                 goto out;
5188         }
5189
5190         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5191                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5192         if (err) {
5193                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5194                                 __func__, err);
5195                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5196                 goto out;
5197         }
5198
5199         hba->auto_bkops_enabled = false;
5200         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5201         hba->is_urgent_bkops_lvl_checked = false;
5202 out:
5203         return err;
5204 }
5205
5206 /**
5207  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5208  * @hba: per adapter instance
5209  *
5210  * After a device reset the device may toggle the BKOPS_EN flag
5211  * to default value. The s/w tracking variables should be updated
5212  * as well. This function would change the auto-bkops state based on
5213  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5214  */
5215 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5216 {
5217         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5218                 hba->auto_bkops_enabled = false;
5219                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5220                 ufshcd_enable_auto_bkops(hba);
5221         } else {
5222                 hba->auto_bkops_enabled = true;
5223                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5224                 ufshcd_disable_auto_bkops(hba);
5225         }
5226         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5227         hba->is_urgent_bkops_lvl_checked = false;
5228 }
5229
5230 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5231 {
5232         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5233                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5234 }
5235
5236 /**
5237  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5238  * @hba: per-adapter instance
5239  * @status: bkops_status value
5240  *
5241  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5242  * flag in the device to permit background operations if the device
5243  * bkops_status is greater than or equal to "status" argument passed to
5244  * this function, disable otherwise.
5245  *
5246  * Returns 0 for success, non-zero in case of failure.
5247  *
5248  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5249  * to know whether auto bkops is enabled or disabled after this function
5250  * returns control to it.
5251  */
5252 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5253                              enum bkops_status status)
5254 {
5255         int err;
5256         u32 curr_status = 0;
5257
5258         err = ufshcd_get_bkops_status(hba, &curr_status);
5259         if (err) {
5260                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5261                                 __func__, err);
5262                 goto out;
5263         } else if (curr_status > BKOPS_STATUS_MAX) {
5264                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5265                                 __func__, curr_status);
5266                 err = -EINVAL;
5267                 goto out;
5268         }
5269
5270         if (curr_status >= status)
5271                 err = ufshcd_enable_auto_bkops(hba);
5272         else
5273                 err = ufshcd_disable_auto_bkops(hba);
5274 out:
5275         return err;
5276 }
5277
5278 /**
5279  * ufshcd_urgent_bkops - handle urgent bkops exception event
5280  * @hba: per-adapter instance
5281  *
5282  * Enable fBackgroundOpsEn flag in the device to permit background
5283  * operations.
5284  *
5285  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5286  * and negative error value for any other failure.
5287  */
5288 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5289 {
5290         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5291 }
5292
5293 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5294 {
5295         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5296                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5297 }
5298
5299 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5300 {
5301         int err;
5302         u32 curr_status = 0;
5303
5304         if (hba->is_urgent_bkops_lvl_checked)
5305                 goto enable_auto_bkops;
5306
5307         err = ufshcd_get_bkops_status(hba, &curr_status);
5308         if (err) {
5309                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5310                                 __func__, err);
5311                 goto out;
5312         }
5313
5314         /*
5315          * We are seeing that some devices are raising the urgent bkops
5316          * exception events even when BKOPS status doesn't indicate performace
5317          * impacted or critical. Handle these device by determining their urgent
5318          * bkops status at runtime.
5319          */
5320         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5321                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5322                                 __func__, curr_status);
5323                 /* update the current status as the urgent bkops level */
5324                 hba->urgent_bkops_lvl = curr_status;
5325                 hba->is_urgent_bkops_lvl_checked = true;
5326         }
5327
5328 enable_auto_bkops:
5329         err = ufshcd_enable_auto_bkops(hba);
5330 out:
5331         if (err < 0)
5332                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5333                                 __func__, err);
5334 }
5335
5336 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5337 {
5338         int ret;
5339         u8 index;
5340         enum query_opcode opcode;
5341
5342         if (!ufshcd_is_wb_allowed(hba))
5343                 return 0;
5344
5345         if (!(enable ^ hba->wb_enabled))
5346                 return 0;
5347         if (enable)
5348                 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5349         else
5350                 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5351
5352         index = ufshcd_wb_get_query_index(hba);
5353         ret = ufshcd_query_flag_retry(hba, opcode,
5354                                       QUERY_FLAG_IDN_WB_EN, index, NULL);
5355         if (ret) {
5356                 dev_err(hba->dev, "%s write booster %s failed %d\n",
5357                         __func__, enable ? "enable" : "disable", ret);
5358                 return ret;
5359         }
5360
5361         hba->wb_enabled = enable;
5362         dev_dbg(hba->dev, "%s write booster %s %d\n",
5363                         __func__, enable ? "enable" : "disable", ret);
5364
5365         return ret;
5366 }
5367
5368 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5369 {
5370         int val;
5371         u8 index;
5372
5373         if (set)
5374                 val =  UPIU_QUERY_OPCODE_SET_FLAG;
5375         else
5376                 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5377
5378         index = ufshcd_wb_get_query_index(hba);
5379         return ufshcd_query_flag_retry(hba, val,
5380                                 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5381                                 index, NULL);
5382 }
5383
5384 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5385 {
5386         if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
5387                 return;
5388
5389         if (enable)
5390                 ufshcd_wb_buf_flush_enable(hba);
5391         else
5392                 ufshcd_wb_buf_flush_disable(hba);
5393
5394 }
5395
5396 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5397 {
5398         int ret;
5399         u8 index;
5400
5401         if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5402                 return 0;
5403
5404         index = ufshcd_wb_get_query_index(hba);
5405         ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5406                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5407                                       index, NULL);
5408         if (ret)
5409                 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5410                         __func__, ret);
5411         else
5412                 hba->wb_buf_flush_enabled = true;
5413
5414         dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5415         return ret;
5416 }
5417
5418 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5419 {
5420         int ret;
5421         u8 index;
5422
5423         if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5424                 return 0;
5425
5426         index = ufshcd_wb_get_query_index(hba);
5427         ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5428                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5429                                       index, NULL);
5430         if (ret) {
5431                 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5432                          __func__, ret);
5433         } else {
5434                 hba->wb_buf_flush_enabled = false;
5435                 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5436         }
5437
5438         return ret;
5439 }
5440
5441 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5442                                                 u32 avail_buf)
5443 {
5444         u32 cur_buf;
5445         int ret;
5446         u8 index;
5447
5448         index = ufshcd_wb_get_query_index(hba);
5449         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5450                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5451                                               index, 0, &cur_buf);
5452         if (ret) {
5453                 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5454                         __func__, ret);
5455                 return false;
5456         }
5457
5458         if (!cur_buf) {
5459                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5460                          cur_buf);
5461                 return false;
5462         }
5463         /* Let it continue to flush when available buffer exceeds threshold */
5464         if (avail_buf < hba->vps->wb_flush_threshold)
5465                 return true;
5466
5467         return false;
5468 }
5469
5470 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5471 {
5472         int ret;
5473         u32 avail_buf;
5474         u8 index;
5475
5476         if (!ufshcd_is_wb_allowed(hba))
5477                 return false;
5478         /*
5479          * The ufs device needs the vcc to be ON to flush.
5480          * With user-space reduction enabled, it's enough to enable flush
5481          * by checking only the available buffer. The threshold
5482          * defined here is > 90% full.
5483          * With user-space preserved enabled, the current-buffer
5484          * should be checked too because the wb buffer size can reduce
5485          * when disk tends to be full. This info is provided by current
5486          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5487          * keeping vcc on when current buffer is empty.
5488          */
5489         index = ufshcd_wb_get_query_index(hba);
5490         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5491                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5492                                       index, 0, &avail_buf);
5493         if (ret) {
5494                 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5495                          __func__, ret);
5496                 return false;
5497         }
5498
5499         if (!hba->dev_info.b_presrv_uspc_en) {
5500                 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5501                         return true;
5502                 return false;
5503         }
5504
5505         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5506 }
5507
5508 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5509 {
5510         struct ufs_hba *hba = container_of(to_delayed_work(work),
5511                                            struct ufs_hba,
5512                                            rpm_dev_flush_recheck_work);
5513         /*
5514          * To prevent unnecessary VCC power drain after device finishes
5515          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5516          * after a certain delay to recheck the threshold by next runtime
5517          * suspend.
5518          */
5519         pm_runtime_get_sync(hba->dev);
5520         pm_runtime_put_sync(hba->dev);
5521 }
5522
5523 /**
5524  * ufshcd_exception_event_handler - handle exceptions raised by device
5525  * @work: pointer to work data
5526  *
5527  * Read bExceptionEventStatus attribute from the device and handle the
5528  * exception event accordingly.
5529  */
5530 static void ufshcd_exception_event_handler(struct work_struct *work)
5531 {
5532         struct ufs_hba *hba;
5533         int err;
5534         u32 status = 0;
5535         hba = container_of(work, struct ufs_hba, eeh_work);
5536
5537         pm_runtime_get_sync(hba->dev);
5538         ufshcd_scsi_block_requests(hba);
5539         err = ufshcd_get_ee_status(hba, &status);
5540         if (err) {
5541                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5542                                 __func__, err);
5543                 goto out;
5544         }
5545
5546         status &= hba->ee_ctrl_mask;
5547
5548         if (status & MASK_EE_URGENT_BKOPS)
5549                 ufshcd_bkops_exception_event_handler(hba);
5550
5551 out:
5552         ufshcd_scsi_unblock_requests(hba);
5553         /*
5554          * pm_runtime_get_noresume is called while scheduling
5555          * eeh_work to avoid suspend racing with exception work.
5556          * Hence decrement usage counter using pm_runtime_put_noidle
5557          * to allow suspend on completion of exception event handler.
5558          */
5559         pm_runtime_put_noidle(hba->dev);
5560         pm_runtime_put(hba->dev);
5561         return;
5562 }
5563
5564 /* Complete requests that have door-bell cleared */
5565 static void ufshcd_complete_requests(struct ufs_hba *hba)
5566 {
5567         ufshcd_transfer_req_compl(hba);
5568         ufshcd_tmc_handler(hba);
5569 }
5570
5571 /**
5572  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5573  *                              to recover from the DL NAC errors or not.
5574  * @hba: per-adapter instance
5575  *
5576  * Returns true if error handling is required, false otherwise
5577  */
5578 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5579 {
5580         unsigned long flags;
5581         bool err_handling = true;
5582
5583         spin_lock_irqsave(hba->host->host_lock, flags);
5584         /*
5585          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5586          * device fatal error and/or DL NAC & REPLAY timeout errors.
5587          */
5588         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5589                 goto out;
5590
5591         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5592             ((hba->saved_err & UIC_ERROR) &&
5593              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5594                 goto out;
5595
5596         if ((hba->saved_err & UIC_ERROR) &&
5597             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5598                 int err;
5599                 /*
5600                  * wait for 50ms to see if we can get any other errors or not.
5601                  */
5602                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5603                 msleep(50);
5604                 spin_lock_irqsave(hba->host->host_lock, flags);
5605
5606                 /*
5607                  * now check if we have got any other severe errors other than
5608                  * DL NAC error?
5609                  */
5610                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5611                     ((hba->saved_err & UIC_ERROR) &&
5612                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5613                         goto out;
5614
5615                 /*
5616                  * As DL NAC is the only error received so far, send out NOP
5617                  * command to confirm if link is still active or not.
5618                  *   - If we don't get any response then do error recovery.
5619                  *   - If we get response then clear the DL NAC error bit.
5620                  */
5621
5622                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5623                 err = ufshcd_verify_dev_init(hba);
5624                 spin_lock_irqsave(hba->host->host_lock, flags);
5625
5626                 if (err)
5627                         goto out;
5628
5629                 /* Link seems to be alive hence ignore the DL NAC errors */
5630                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5631                         hba->saved_err &= ~UIC_ERROR;
5632                 /* clear NAC error */
5633                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5634                 if (!hba->saved_uic_err)
5635                         err_handling = false;
5636         }
5637 out:
5638         spin_unlock_irqrestore(hba->host->host_lock, flags);
5639         return err_handling;
5640 }
5641
5642 /* host lock must be held before calling this func */
5643 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5644 {
5645         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5646                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5647 }
5648
5649 /* host lock must be held before calling this func */
5650 static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5651 {
5652         /* handle fatal errors only when link is not in error state */
5653         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5654                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5655                     ufshcd_is_saved_err_fatal(hba))
5656                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5657                 else
5658                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5659                 queue_work(hba->eh_wq, &hba->eh_work);
5660         }
5661 }
5662
5663 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5664 {
5665         pm_runtime_get_sync(hba->dev);
5666         if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
5667                 enum ufs_pm_op pm_op;
5668
5669                 /*
5670                  * Don't assume anything of pm_runtime_get_sync(), if
5671                  * resume fails, irq and clocks can be OFF, and powers
5672                  * can be OFF or in LPM.
5673                  */
5674                 ufshcd_setup_hba_vreg(hba, true);
5675                 ufshcd_enable_irq(hba);
5676                 ufshcd_setup_vreg(hba, true);
5677                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5678                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5679                 ufshcd_hold(hba, false);
5680                 if (!ufshcd_is_clkgating_allowed(hba))
5681                         ufshcd_setup_clocks(hba, true);
5682                 ufshcd_release(hba);
5683                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5684                 ufshcd_vops_resume(hba, pm_op);
5685         } else {
5686                 ufshcd_hold(hba, false);
5687                 if (hba->clk_scaling.is_allowed) {
5688                         cancel_work_sync(&hba->clk_scaling.suspend_work);
5689                         cancel_work_sync(&hba->clk_scaling.resume_work);
5690                         ufshcd_suspend_clkscaling(hba);
5691                 }
5692         }
5693 }
5694
5695 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5696 {
5697         ufshcd_release(hba);
5698         if (hba->clk_scaling.is_allowed)
5699                 ufshcd_resume_clkscaling(hba);
5700         pm_runtime_put(hba->dev);
5701 }
5702
5703 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5704 {
5705         return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5706                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5707                         ufshcd_is_link_broken(hba))));
5708 }
5709
5710 #ifdef CONFIG_PM
5711 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5712 {
5713         struct Scsi_Host *shost = hba->host;
5714         struct scsi_device *sdev;
5715         struct request_queue *q;
5716         int ret;
5717
5718         hba->is_sys_suspended = false;
5719         /*
5720          * Set RPM status of hba device to RPM_ACTIVE,
5721          * this also clears its runtime error.
5722          */
5723         ret = pm_runtime_set_active(hba->dev);
5724         /*
5725          * If hba device had runtime error, we also need to resume those
5726          * scsi devices under hba in case any of them has failed to be
5727          * resumed due to hba runtime resume failure. This is to unblock
5728          * blk_queue_enter in case there are bios waiting inside it.
5729          */
5730         if (!ret) {
5731                 shost_for_each_device(sdev, shost) {
5732                         q = sdev->request_queue;
5733                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
5734                                        q->rpm_status == RPM_SUSPENDING))
5735                                 pm_request_resume(q->dev);
5736                 }
5737         }
5738 }
5739 #else
5740 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
5741 {
5742 }
5743 #endif
5744
5745 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
5746 {
5747         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
5748         u32 mode;
5749
5750         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
5751
5752         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
5753                 return true;
5754
5755         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
5756                 return true;
5757
5758         return false;
5759 }
5760
5761 /**
5762  * ufshcd_err_handler - handle UFS errors that require s/w attention
5763  * @work: pointer to work structure
5764  */
5765 static void ufshcd_err_handler(struct work_struct *work)
5766 {
5767         struct ufs_hba *hba;
5768         unsigned long flags;
5769         bool err_xfer = false;
5770         bool err_tm = false;
5771         int err = 0, pmc_err;
5772         int tag;
5773         bool needs_reset = false, needs_restore = false;
5774
5775         hba = container_of(work, struct ufs_hba, eh_work);
5776
5777         down(&hba->eh_sem);
5778         spin_lock_irqsave(hba->host->host_lock, flags);
5779         if (ufshcd_err_handling_should_stop(hba)) {
5780                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5781                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5782                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5783                 up(&hba->eh_sem);
5784                 return;
5785         }
5786         ufshcd_set_eh_in_progress(hba);
5787         spin_unlock_irqrestore(hba->host->host_lock, flags);
5788         ufshcd_err_handling_prepare(hba);
5789         spin_lock_irqsave(hba->host->host_lock, flags);
5790         ufshcd_scsi_block_requests(hba);
5791         hba->ufshcd_state = UFSHCD_STATE_RESET;
5792
5793         /* Complete requests that have door-bell cleared by h/w */
5794         ufshcd_complete_requests(hba);
5795
5796         /*
5797          * A full reset and restore might have happened after preparation
5798          * is finished, double check whether we should stop.
5799          */
5800         if (ufshcd_err_handling_should_stop(hba))
5801                 goto skip_err_handling;
5802
5803         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5804                 bool ret;
5805
5806                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5807                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5808                 ret = ufshcd_quirk_dl_nac_errors(hba);
5809                 spin_lock_irqsave(hba->host->host_lock, flags);
5810                 if (!ret && ufshcd_err_handling_should_stop(hba))
5811                         goto skip_err_handling;
5812         }
5813
5814         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
5815             (hba->saved_uic_err &&
5816              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
5817                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
5818
5819                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5820                 ufshcd_print_host_state(hba);
5821                 ufshcd_print_pwr_info(hba);
5822                 ufshcd_print_evt_hist(hba);
5823                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5824                 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
5825                 spin_lock_irqsave(hba->host->host_lock, flags);
5826         }
5827
5828         /*
5829          * if host reset is required then skip clearing the pending
5830          * transfers forcefully because they will get cleared during
5831          * host reset and restore
5832          */
5833         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5834             ufshcd_is_saved_err_fatal(hba) ||
5835             ((hba->saved_err & UIC_ERROR) &&
5836              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5837                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
5838                 needs_reset = true;
5839                 goto do_reset;
5840         }
5841
5842         /*
5843          * If LINERESET was caught, UFS might have been put to PWM mode,
5844          * check if power mode restore is needed.
5845          */
5846         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
5847                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5848                 if (!hba->saved_uic_err)
5849                         hba->saved_err &= ~UIC_ERROR;
5850                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5851                 if (ufshcd_is_pwr_mode_restore_needed(hba))
5852                         needs_restore = true;
5853                 spin_lock_irqsave(hba->host->host_lock, flags);
5854                 if (!hba->saved_err && !needs_restore)
5855                         goto skip_err_handling;
5856         }
5857
5858         hba->silence_err_logs = true;
5859         /* release lock as clear command might sleep */
5860         spin_unlock_irqrestore(hba->host->host_lock, flags);
5861         /* Clear pending transfer requests */
5862         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5863                 if (ufshcd_try_to_abort_task(hba, tag)) {
5864                         err_xfer = true;
5865                         goto lock_skip_pending_xfer_clear;
5866                 }
5867         }
5868
5869         /* Clear pending task management requests */
5870         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5871                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5872                         err_tm = true;
5873                         goto lock_skip_pending_xfer_clear;
5874                 }
5875         }
5876
5877 lock_skip_pending_xfer_clear:
5878         spin_lock_irqsave(hba->host->host_lock, flags);
5879
5880         /* Complete the requests that are cleared by s/w */
5881         ufshcd_complete_requests(hba);
5882         hba->silence_err_logs = false;
5883
5884         if (err_xfer || err_tm) {
5885                 needs_reset = true;
5886                 goto do_reset;
5887         }
5888
5889         /*
5890          * After all reqs and tasks are cleared from doorbell,
5891          * now it is safe to retore power mode.
5892          */
5893         if (needs_restore) {
5894                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5895                 /*
5896                  * Hold the scaling lock just in case dev cmds
5897                  * are sent via bsg and/or sysfs.
5898                  */
5899                 down_write(&hba->clk_scaling_lock);
5900                 hba->force_pmc = true;
5901                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
5902                 if (pmc_err) {
5903                         needs_reset = true;
5904                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
5905                                         __func__, pmc_err);
5906                 }
5907                 hba->force_pmc = false;
5908                 ufshcd_print_pwr_info(hba);
5909                 up_write(&hba->clk_scaling_lock);
5910                 spin_lock_irqsave(hba->host->host_lock, flags);
5911         }
5912
5913 do_reset:
5914         /* Fatal errors need reset */
5915         if (needs_reset) {
5916                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5917
5918                 /*
5919                  * ufshcd_reset_and_restore() does the link reinitialization
5920                  * which will need atleast one empty doorbell slot to send the
5921                  * device management commands (NOP and query commands).
5922                  * If there is no slot empty at this moment then free up last
5923                  * slot forcefully.
5924                  */
5925                 if (hba->outstanding_reqs == max_doorbells)
5926                         __ufshcd_transfer_req_compl(hba,
5927                                                     (1UL << (hba->nutrs - 1)));
5928
5929                 hba->force_reset = false;
5930                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5931                 err = ufshcd_reset_and_restore(hba);
5932                 if (err)
5933                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
5934                                         __func__, err);
5935                 else
5936                         ufshcd_recover_pm_error(hba);
5937                 spin_lock_irqsave(hba->host->host_lock, flags);
5938         }
5939
5940 skip_err_handling:
5941         if (!needs_reset) {
5942                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5943                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5944                 if (hba->saved_err || hba->saved_uic_err)
5945                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5946                             __func__, hba->saved_err, hba->saved_uic_err);
5947         }
5948         ufshcd_clear_eh_in_progress(hba);
5949         spin_unlock_irqrestore(hba->host->host_lock, flags);
5950         ufshcd_scsi_unblock_requests(hba);
5951         ufshcd_err_handling_unprepare(hba);
5952         up(&hba->eh_sem);
5953 }
5954
5955 /**
5956  * ufshcd_update_uic_error - check and set fatal UIC error flags.
5957  * @hba: per-adapter instance
5958  *
5959  * Returns
5960  *  IRQ_HANDLED - If interrupt is valid
5961  *  IRQ_NONE    - If invalid interrupt
5962  */
5963 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
5964 {
5965         u32 reg;
5966         irqreturn_t retval = IRQ_NONE;
5967
5968         /* PHY layer error */
5969         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5970         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5971             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
5972                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
5973                 /*
5974                  * To know whether this error is fatal or not, DB timeout
5975                  * must be checked but this error is handled separately.
5976                  */
5977                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
5978                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
5979                                         __func__);
5980
5981                 /* Got a LINERESET indication. */
5982                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
5983                         struct uic_command *cmd = NULL;
5984
5985                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
5986                         if (hba->uic_async_done && hba->active_uic_cmd)
5987                                 cmd = hba->active_uic_cmd;
5988                         /*
5989                          * Ignore the LINERESET during power mode change
5990                          * operation via DME_SET command.
5991                          */
5992                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
5993                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5994                 }
5995                 retval |= IRQ_HANDLED;
5996         }
5997
5998         /* PA_INIT_ERROR is fatal and needs UIC reset */
5999         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6000         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6001             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6002                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6003
6004                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6005                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6006                 else if (hba->dev_quirks &
6007                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6008                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6009                                 hba->uic_error |=
6010                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6011                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6012                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6013                 }
6014                 retval |= IRQ_HANDLED;
6015         }
6016
6017         /* UIC NL/TL/DME errors needs software retry */
6018         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6019         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6020             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6021                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6022                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6023                 retval |= IRQ_HANDLED;
6024         }
6025
6026         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6027         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6028             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6029                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6030                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6031                 retval |= IRQ_HANDLED;
6032         }
6033
6034         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6035         if ((reg & UIC_DME_ERROR) &&
6036             (reg & UIC_DME_ERROR_CODE_MASK)) {
6037                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6038                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6039                 retval |= IRQ_HANDLED;
6040         }
6041
6042         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6043                         __func__, hba->uic_error);
6044         return retval;
6045 }
6046
6047 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
6048                                          u32 intr_mask)
6049 {
6050         if (!ufshcd_is_auto_hibern8_supported(hba) ||
6051             !ufshcd_is_auto_hibern8_enabled(hba))
6052                 return false;
6053
6054         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
6055                 return false;
6056
6057         if (hba->active_uic_cmd &&
6058             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
6059             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
6060                 return false;
6061
6062         return true;
6063 }
6064
6065 /**
6066  * ufshcd_check_errors - Check for errors that need s/w attention
6067  * @hba: per-adapter instance
6068  *
6069  * Returns
6070  *  IRQ_HANDLED - If interrupt is valid
6071  *  IRQ_NONE    - If invalid interrupt
6072  */
6073 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6074 {
6075         bool queue_eh_work = false;
6076         irqreturn_t retval = IRQ_NONE;
6077
6078         if (hba->errors & INT_FATAL_ERRORS) {
6079                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6080                                        hba->errors);
6081                 queue_eh_work = true;
6082         }
6083
6084         if (hba->errors & UIC_ERROR) {
6085                 hba->uic_error = 0;
6086                 retval = ufshcd_update_uic_error(hba);
6087                 if (hba->uic_error)
6088                         queue_eh_work = true;
6089         }
6090
6091         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6092                 dev_err(hba->dev,
6093                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6094                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6095                         "Enter" : "Exit",
6096                         hba->errors, ufshcd_get_upmcrs(hba));
6097                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6098                                        hba->errors);
6099                 ufshcd_set_link_broken(hba);
6100                 queue_eh_work = true;
6101         }
6102
6103         if (queue_eh_work) {
6104                 /*
6105                  * update the transfer error masks to sticky bits, let's do this
6106                  * irrespective of current ufshcd_state.
6107                  */
6108                 hba->saved_err |= hba->errors;
6109                 hba->saved_uic_err |= hba->uic_error;
6110
6111                 /* dump controller state before resetting */
6112                 if ((hba->saved_err &
6113                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6114                     (hba->saved_uic_err &&
6115                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6116                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6117                                         __func__, hba->saved_err,
6118                                         hba->saved_uic_err);
6119                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6120                                          "host_regs: ");
6121                         ufshcd_print_pwr_info(hba);
6122                 }
6123                 ufshcd_schedule_eh_work(hba);
6124                 retval |= IRQ_HANDLED;
6125         }
6126         /*
6127          * if (!queue_eh_work) -
6128          * Other errors are either non-fatal where host recovers
6129          * itself without s/w intervention or errors that will be
6130          * handled by the SCSI core layer.
6131          */
6132         return retval;
6133 }
6134
6135 struct ctm_info {
6136         struct ufs_hba  *hba;
6137         unsigned long   pending;
6138         unsigned int    ncpl;
6139 };
6140
6141 static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
6142 {
6143         struct ctm_info *const ci = priv;
6144         struct completion *c;
6145
6146         WARN_ON_ONCE(reserved);
6147         if (test_bit(req->tag, &ci->pending))
6148                 return true;
6149         ci->ncpl++;
6150         c = req->end_io_data;
6151         if (c)
6152                 complete(c);
6153         return true;
6154 }
6155
6156 /**
6157  * ufshcd_tmc_handler - handle task management function completion
6158  * @hba: per adapter instance
6159  *
6160  * Returns
6161  *  IRQ_HANDLED - If interrupt is valid
6162  *  IRQ_NONE    - If invalid interrupt
6163  */
6164 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6165 {
6166         struct request_queue *q = hba->tmf_queue;
6167         struct ctm_info ci = {
6168                 .hba     = hba,
6169                 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
6170         };
6171
6172         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
6173         return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
6174 }
6175
6176 /**
6177  * ufshcd_sl_intr - Interrupt service routine
6178  * @hba: per adapter instance
6179  * @intr_status: contains interrupts generated by the controller
6180  *
6181  * Returns
6182  *  IRQ_HANDLED - If interrupt is valid
6183  *  IRQ_NONE    - If invalid interrupt
6184  */
6185 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6186 {
6187         irqreturn_t retval = IRQ_NONE;
6188
6189         hba->errors = UFSHCD_ERROR_MASK & intr_status;
6190
6191         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
6192                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
6193
6194         if (hba->errors)
6195                 retval |= ufshcd_check_errors(hba);
6196
6197         if (intr_status & UFSHCD_UIC_MASK)
6198                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6199
6200         if (intr_status & UTP_TASK_REQ_COMPL)
6201                 retval |= ufshcd_tmc_handler(hba);
6202
6203         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6204                 retval |= ufshcd_transfer_req_compl(hba);
6205
6206         return retval;
6207 }
6208
6209 /**
6210  * ufshcd_intr - Main interrupt service routine
6211  * @irq: irq number
6212  * @__hba: pointer to adapter instance
6213  *
6214  * Returns
6215  *  IRQ_HANDLED - If interrupt is valid
6216  *  IRQ_NONE    - If invalid interrupt
6217  */
6218 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6219 {
6220         u32 intr_status, enabled_intr_status = 0;
6221         irqreturn_t retval = IRQ_NONE;
6222         struct ufs_hba *hba = __hba;
6223         int retries = hba->nutrs;
6224
6225         spin_lock(hba->host->host_lock);
6226         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6227         hba->ufs_stats.last_intr_status = intr_status;
6228         hba->ufs_stats.last_intr_ts = ktime_get();
6229
6230         /*
6231          * There could be max of hba->nutrs reqs in flight and in worst case
6232          * if the reqs get finished 1 by 1 after the interrupt status is
6233          * read, make sure we handle them by checking the interrupt status
6234          * again in a loop until we process all of the reqs before returning.
6235          */
6236         while (intr_status && retries--) {
6237                 enabled_intr_status =
6238                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6239                 if (intr_status)
6240                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6241                 if (enabled_intr_status)
6242                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6243
6244                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6245         }
6246
6247         if (enabled_intr_status && retval == IRQ_NONE) {
6248                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6249                                         __func__, intr_status);
6250                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6251         }
6252
6253         spin_unlock(hba->host->host_lock);
6254         return retval;
6255 }
6256
6257 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6258 {
6259         int err = 0;
6260         u32 mask = 1 << tag;
6261         unsigned long flags;
6262
6263         if (!test_bit(tag, &hba->outstanding_tasks))
6264                 goto out;
6265
6266         spin_lock_irqsave(hba->host->host_lock, flags);
6267         ufshcd_utmrl_clear(hba, tag);
6268         spin_unlock_irqrestore(hba->host->host_lock, flags);
6269
6270         /* poll for max. 1 sec to clear door bell register by h/w */
6271         err = ufshcd_wait_for_register(hba,
6272                         REG_UTP_TASK_REQ_DOOR_BELL,
6273                         mask, 0, 1000, 1000);
6274 out:
6275         return err;
6276 }
6277
6278 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6279                 struct utp_task_req_desc *treq, u8 tm_function)
6280 {
6281         struct request_queue *q = hba->tmf_queue;
6282         struct Scsi_Host *host = hba->host;
6283         DECLARE_COMPLETION_ONSTACK(wait);
6284         struct request *req;
6285         unsigned long flags;
6286         int free_slot, task_tag, err;
6287
6288         /*
6289          * Get free slot, sleep if slots are unavailable.
6290          * Even though we use wait_event() which sleeps indefinitely,
6291          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6292          */
6293         req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
6294         req->end_io_data = &wait;
6295         free_slot = req->tag;
6296         WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
6297         ufshcd_hold(hba, false);
6298
6299         spin_lock_irqsave(host->host_lock, flags);
6300         task_tag = hba->nutrs + free_slot;
6301
6302         treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6303
6304         memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
6305         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
6306
6307         /* send command to the controller */
6308         __set_bit(free_slot, &hba->outstanding_tasks);
6309
6310         /* Make sure descriptors are ready before ringing the task doorbell */
6311         wmb();
6312
6313         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6314         /* Make sure that doorbell is committed immediately */
6315         wmb();
6316
6317         spin_unlock_irqrestore(host->host_lock, flags);
6318
6319         ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6320
6321         /* wait until the task management command is completed */
6322         err = wait_for_completion_io_timeout(&wait,
6323                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6324         if (!err) {
6325                 /*
6326                  * Make sure that ufshcd_compl_tm() does not trigger a
6327                  * use-after-free.
6328                  */
6329                 req->end_io_data = NULL;
6330                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6331                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6332                                 __func__, tm_function);
6333                 if (ufshcd_clear_tm_cmd(hba, free_slot))
6334                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6335                                         __func__, free_slot);
6336                 err = -ETIMEDOUT;
6337         } else {
6338                 err = 0;
6339                 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
6340
6341                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6342         }
6343
6344         spin_lock_irqsave(hba->host->host_lock, flags);
6345         __clear_bit(free_slot, &hba->outstanding_tasks);
6346         spin_unlock_irqrestore(hba->host->host_lock, flags);
6347
6348         blk_put_request(req);
6349
6350         ufshcd_release(hba);
6351         return err;
6352 }
6353
6354 /**
6355  * ufshcd_issue_tm_cmd - issues task management commands to controller
6356  * @hba: per adapter instance
6357  * @lun_id: LUN ID to which TM command is sent
6358  * @task_id: task ID to which the TM command is applicable
6359  * @tm_function: task management function opcode
6360  * @tm_response: task management service response return value
6361  *
6362  * Returns non-zero value on error, zero on success.
6363  */
6364 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6365                 u8 tm_function, u8 *tm_response)
6366 {
6367         struct utp_task_req_desc treq = { { 0 }, };
6368         int ocs_value, err;
6369
6370         /* Configure task request descriptor */
6371         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6372         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6373
6374         /* Configure task request UPIU */
6375         treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6376                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6377         treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6378
6379         /*
6380          * The host shall provide the same value for LUN field in the basic
6381          * header and for Input Parameter.
6382          */
6383         treq.input_param1 = cpu_to_be32(lun_id);
6384         treq.input_param2 = cpu_to_be32(task_id);
6385
6386         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6387         if (err == -ETIMEDOUT)
6388                 return err;
6389
6390         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6391         if (ocs_value != OCS_SUCCESS)
6392                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6393                                 __func__, ocs_value);
6394         else if (tm_response)
6395                 *tm_response = be32_to_cpu(treq.output_param1) &
6396                                 MASK_TM_SERVICE_RESP;
6397         return err;
6398 }
6399
6400 /**
6401  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6402  * @hba:        per-adapter instance
6403  * @req_upiu:   upiu request
6404  * @rsp_upiu:   upiu reply
6405  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6406  * @buff_len:   descriptor size, 0 if NA
6407  * @cmd_type:   specifies the type (NOP, Query...)
6408  * @desc_op:    descriptor operation
6409  *
6410  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6411  * Therefore, it "rides" the device management infrastructure: uses its tag and
6412  * tasks work queues.
6413  *
6414  * Since there is only one available tag for device management commands,
6415  * the caller is expected to hold the hba->dev_cmd.lock mutex.
6416  */
6417 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6418                                         struct utp_upiu_req *req_upiu,
6419                                         struct utp_upiu_req *rsp_upiu,
6420                                         u8 *desc_buff, int *buff_len,
6421                                         enum dev_cmd_type cmd_type,
6422                                         enum query_opcode desc_op)
6423 {
6424         struct request_queue *q = hba->cmd_queue;
6425         struct request *req;
6426         struct ufshcd_lrb *lrbp;
6427         int err = 0;
6428         int tag;
6429         struct completion wait;
6430         unsigned long flags;
6431         u8 upiu_flags;
6432
6433         down_read(&hba->clk_scaling_lock);
6434
6435         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6436         if (IS_ERR(req)) {
6437                 err = PTR_ERR(req);
6438                 goto out_unlock;
6439         }
6440         tag = req->tag;
6441         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6442
6443         init_completion(&wait);
6444         lrbp = &hba->lrb[tag];
6445         if (unlikely(lrbp->in_use)) {
6446                 err = -EBUSY;
6447                 goto out;
6448         }
6449
6450         WARN_ON(lrbp->cmd);
6451         lrbp->cmd = NULL;
6452         lrbp->sense_bufflen = 0;
6453         lrbp->sense_buffer = NULL;
6454         lrbp->task_tag = tag;
6455         lrbp->lun = 0;
6456         lrbp->intr_cmd = true;
6457         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6458         hba->dev_cmd.type = cmd_type;
6459
6460         switch (hba->ufs_version) {
6461         case UFSHCI_VERSION_10:
6462         case UFSHCI_VERSION_11:
6463                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6464                 break;
6465         default:
6466                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6467                 break;
6468         }
6469
6470         /* update the task tag in the request upiu */
6471         req_upiu->header.dword_0 |= cpu_to_be32(tag);
6472
6473         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6474
6475         /* just copy the upiu request as it is */
6476         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6477         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6478                 /* The Data Segment Area is optional depending upon the query
6479                  * function value. for WRITE DESCRIPTOR, the data segment
6480                  * follows right after the tsf.
6481                  */
6482                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6483                 *buff_len = 0;
6484         }
6485
6486         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6487
6488         hba->dev_cmd.complete = &wait;
6489
6490         /* Make sure descriptors are ready before ringing the doorbell */
6491         wmb();
6492         spin_lock_irqsave(hba->host->host_lock, flags);
6493         ufshcd_send_command(hba, tag);
6494         spin_unlock_irqrestore(hba->host->host_lock, flags);
6495
6496         /*
6497          * ignore the returning value here - ufshcd_check_query_response is
6498          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6499          * read the response directly ignoring all errors.
6500          */
6501         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6502
6503         /* just copy the upiu response as it is */
6504         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6505         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6506                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6507                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6508                                MASK_QUERY_DATA_SEG_LEN;
6509
6510                 if (*buff_len >= resp_len) {
6511                         memcpy(desc_buff, descp, resp_len);
6512                         *buff_len = resp_len;
6513                 } else {
6514                         dev_warn(hba->dev,
6515                                  "%s: rsp size %d is bigger than buffer size %d",
6516                                  __func__, resp_len, *buff_len);
6517                         *buff_len = 0;
6518                         err = -EINVAL;
6519                 }
6520         }
6521
6522 out:
6523         blk_put_request(req);
6524 out_unlock:
6525         up_read(&hba->clk_scaling_lock);
6526         return err;
6527 }
6528
6529 /**
6530  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6531  * @hba:        per-adapter instance
6532  * @req_upiu:   upiu request
6533  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
6534  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
6535  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6536  * @buff_len:   descriptor size, 0 if NA
6537  * @desc_op:    descriptor operation
6538  *
6539  * Supports UTP Transfer requests (nop and query), and UTP Task
6540  * Management requests.
6541  * It is up to the caller to fill the upiu conent properly, as it will
6542  * be copied without any further input validations.
6543  */
6544 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6545                              struct utp_upiu_req *req_upiu,
6546                              struct utp_upiu_req *rsp_upiu,
6547                              int msgcode,
6548                              u8 *desc_buff, int *buff_len,
6549                              enum query_opcode desc_op)
6550 {
6551         int err;
6552         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6553         struct utp_task_req_desc treq = { { 0 }, };
6554         int ocs_value;
6555         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6556
6557         switch (msgcode) {
6558         case UPIU_TRANSACTION_NOP_OUT:
6559                 cmd_type = DEV_CMD_TYPE_NOP;
6560                 fallthrough;
6561         case UPIU_TRANSACTION_QUERY_REQ:
6562                 ufshcd_hold(hba, false);
6563                 mutex_lock(&hba->dev_cmd.lock);
6564                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6565                                                    desc_buff, buff_len,
6566                                                    cmd_type, desc_op);
6567                 mutex_unlock(&hba->dev_cmd.lock);
6568                 ufshcd_release(hba);
6569
6570                 break;
6571         case UPIU_TRANSACTION_TASK_REQ:
6572                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6573                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6574
6575                 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6576
6577                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6578                 if (err == -ETIMEDOUT)
6579                         break;
6580
6581                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6582                 if (ocs_value != OCS_SUCCESS) {
6583                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6584                                 ocs_value);
6585                         break;
6586                 }
6587
6588                 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6589
6590                 break;
6591         default:
6592                 err = -EINVAL;
6593
6594                 break;
6595         }
6596
6597         return err;
6598 }
6599
6600 /**
6601  * ufshcd_eh_device_reset_handler - device reset handler registered to
6602  *                                    scsi layer.
6603  * @cmd: SCSI command pointer
6604  *
6605  * Returns SUCCESS/FAILED
6606  */
6607 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6608 {
6609         struct Scsi_Host *host;
6610         struct ufs_hba *hba;
6611         unsigned int tag;
6612         u32 pos;
6613         int err;
6614         u8 resp = 0xF;
6615         struct ufshcd_lrb *lrbp;
6616         unsigned long flags;
6617
6618         host = cmd->device->host;
6619         hba = shost_priv(host);
6620         tag = cmd->request->tag;
6621
6622         lrbp = &hba->lrb[tag];
6623         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6624         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6625                 if (!err)
6626                         err = resp;
6627                 goto out;
6628         }
6629
6630         /* clear the commands that were pending for corresponding LUN */
6631         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6632                 if (hba->lrb[pos].lun == lrbp->lun) {
6633                         err = ufshcd_clear_cmd(hba, pos);
6634                         if (err)
6635                                 break;
6636                 }
6637         }
6638         spin_lock_irqsave(host->host_lock, flags);
6639         ufshcd_transfer_req_compl(hba);
6640         spin_unlock_irqrestore(host->host_lock, flags);
6641
6642 out:
6643         hba->req_abort_count = 0;
6644         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
6645         if (!err) {
6646                 err = SUCCESS;
6647         } else {
6648                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6649                 err = FAILED;
6650         }
6651         return err;
6652 }
6653
6654 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6655 {
6656         struct ufshcd_lrb *lrbp;
6657         int tag;
6658
6659         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6660                 lrbp = &hba->lrb[tag];
6661                 lrbp->req_abort_skip = true;
6662         }
6663 }
6664
6665 /**
6666  * ufshcd_try_to_abort_task - abort a specific task
6667  * @hba: Pointer to adapter instance
6668  * @tag: Task tag/index to be aborted
6669  *
6670  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6671  * command, and in host controller by clearing the door-bell register. There can
6672  * be race between controller sending the command to the device while abort is
6673  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6674  * really issued and then try to abort it.
6675  *
6676  * Returns zero on success, non-zero on failure
6677  */
6678 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6679 {
6680         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6681         int err = 0;
6682         int poll_cnt;
6683         u8 resp = 0xF;
6684         u32 reg;
6685
6686         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6687                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6688                                 UFS_QUERY_TASK, &resp);
6689                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6690                         /* cmd pending in the device */
6691                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6692                                 __func__, tag);
6693                         break;
6694                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6695                         /*
6696                          * cmd not pending in the device, check if it is
6697                          * in transition.
6698                          */
6699                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6700                                 __func__, tag);
6701                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6702                         if (reg & (1 << tag)) {
6703                                 /* sleep for max. 200us to stabilize */
6704                                 usleep_range(100, 200);
6705                                 continue;
6706                         }
6707                         /* command completed already */
6708                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6709                                 __func__, tag);
6710                         goto out;
6711                 } else {
6712                         dev_err(hba->dev,
6713                                 "%s: no response from device. tag = %d, err %d\n",
6714                                 __func__, tag, err);
6715                         if (!err)
6716                                 err = resp; /* service response error */
6717                         goto out;
6718                 }
6719         }
6720
6721         if (!poll_cnt) {
6722                 err = -EBUSY;
6723                 goto out;
6724         }
6725
6726         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6727                         UFS_ABORT_TASK, &resp);
6728         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6729                 if (!err) {
6730                         err = resp; /* service response error */
6731                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6732                                 __func__, tag, err);
6733                 }
6734                 goto out;
6735         }
6736
6737         err = ufshcd_clear_cmd(hba, tag);
6738         if (err)
6739                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6740                         __func__, tag, err);
6741
6742 out:
6743         return err;
6744 }
6745
6746 /**
6747  * ufshcd_abort - scsi host template eh_abort_handler callback
6748  * @cmd: SCSI command pointer
6749  *
6750  * Returns SUCCESS/FAILED
6751  */
6752 static int ufshcd_abort(struct scsi_cmnd *cmd)
6753 {
6754         struct Scsi_Host *host;
6755         struct ufs_hba *hba;
6756         unsigned long flags;
6757         unsigned int tag;
6758         int err = 0;
6759         struct ufshcd_lrb *lrbp;
6760         u32 reg;
6761
6762         host = cmd->device->host;
6763         hba = shost_priv(host);
6764         tag = cmd->request->tag;
6765         lrbp = &hba->lrb[tag];
6766         if (!ufshcd_valid_tag(hba, tag)) {
6767                 dev_err(hba->dev,
6768                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6769                         __func__, tag, cmd, cmd->request);
6770                 BUG();
6771         }
6772
6773         ufshcd_hold(hba, false);
6774         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6775         /* If command is already aborted/completed, return SUCCESS */
6776         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6777                 dev_err(hba->dev,
6778                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6779                         __func__, tag, hba->outstanding_reqs, reg);
6780                 goto out;
6781         }
6782
6783         /* Print Transfer Request of aborted task */
6784         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6785
6786         /*
6787          * Print detailed info about aborted request.
6788          * As more than one request might get aborted at the same time,
6789          * print full information only for the first aborted request in order
6790          * to reduce repeated printouts. For other aborted requests only print
6791          * basic details.
6792          */
6793         scsi_print_command(cmd);
6794         if (!hba->req_abort_count) {
6795                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
6796                 ufshcd_print_evt_hist(hba);
6797                 ufshcd_print_host_state(hba);
6798                 ufshcd_print_pwr_info(hba);
6799                 ufshcd_print_trs(hba, 1 << tag, true);
6800         } else {
6801                 ufshcd_print_trs(hba, 1 << tag, false);
6802         }
6803         hba->req_abort_count++;
6804
6805         if (!(reg & (1 << tag))) {
6806                 dev_err(hba->dev,
6807                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6808                 __func__, tag);
6809                 goto cleanup;
6810         }
6811
6812         /*
6813          * Task abort to the device W-LUN is illegal. When this command
6814          * will fail, due to spec violation, scsi err handling next step
6815          * will be to send LU reset which, again, is a spec violation.
6816          * To avoid these unnecessary/illegal steps, first we clean up
6817          * the lrb taken by this cmd and mark the lrb as in_use, then
6818          * queue the eh_work and bail.
6819          */
6820         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
6821                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
6822                 spin_lock_irqsave(host->host_lock, flags);
6823                 if (lrbp->cmd) {
6824                         __ufshcd_transfer_req_compl(hba, (1UL << tag));
6825                         __set_bit(tag, &hba->outstanding_reqs);
6826                         lrbp->in_use = true;
6827                         hba->force_reset = true;
6828                         ufshcd_schedule_eh_work(hba);
6829                 }
6830
6831                 spin_unlock_irqrestore(host->host_lock, flags);
6832                 goto out;
6833         }
6834
6835         /* Skip task abort in case previous aborts failed and report failure */
6836         if (lrbp->req_abort_skip)
6837                 err = -EIO;
6838         else
6839                 err = ufshcd_try_to_abort_task(hba, tag);
6840
6841         if (!err) {
6842 cleanup:
6843                 spin_lock_irqsave(host->host_lock, flags);
6844                 __ufshcd_transfer_req_compl(hba, (1UL << tag));
6845                 spin_unlock_irqrestore(host->host_lock, flags);
6846 out:
6847                 err = SUCCESS;
6848         } else {
6849                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6850                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6851                 err = FAILED;
6852         }
6853
6854         /*
6855          * This ufshcd_release() corresponds to the original scsi cmd that got
6856          * aborted here (as we won't get any IRQ for it).
6857          */
6858         ufshcd_release(hba);
6859         return err;
6860 }
6861
6862 /**
6863  * ufshcd_host_reset_and_restore - reset and restore host controller
6864  * @hba: per-adapter instance
6865  *
6866  * Note that host controller reset may issue DME_RESET to
6867  * local and remote (device) Uni-Pro stack and the attributes
6868  * are reset to default state.
6869  *
6870  * Returns zero on success, non-zero on failure
6871  */
6872 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6873 {
6874         int err;
6875         unsigned long flags;
6876
6877         /*
6878          * Stop the host controller and complete the requests
6879          * cleared by h/w
6880          */
6881         ufshcd_hba_stop(hba);
6882
6883         spin_lock_irqsave(hba->host->host_lock, flags);
6884         hba->silence_err_logs = true;
6885         ufshcd_complete_requests(hba);
6886         hba->silence_err_logs = false;
6887         spin_unlock_irqrestore(hba->host->host_lock, flags);
6888
6889         /* scale up clocks to max frequency before full reinitialization */
6890         ufshcd_set_clk_freq(hba, true);
6891
6892         err = ufshcd_hba_enable(hba);
6893         if (err)
6894                 goto out;
6895
6896         /* Establish the link again and restore the device */
6897         err = ufshcd_probe_hba(hba, false);
6898
6899 out:
6900         if (err)
6901                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6902         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
6903         return err;
6904 }
6905
6906 /**
6907  * ufshcd_reset_and_restore - reset and re-initialize host/device
6908  * @hba: per-adapter instance
6909  *
6910  * Reset and recover device, host and re-establish link. This
6911  * is helpful to recover the communication in fatal error conditions.
6912  *
6913  * Returns zero on success, non-zero on failure
6914  */
6915 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6916 {
6917         u32 saved_err;
6918         u32 saved_uic_err;
6919         int err = 0;
6920         unsigned long flags;
6921         int retries = MAX_HOST_RESET_RETRIES;
6922
6923         /*
6924          * This is a fresh start, cache and clear saved error first,
6925          * in case new error generated during reset and restore.
6926          */
6927         spin_lock_irqsave(hba->host->host_lock, flags);
6928         saved_err = hba->saved_err;
6929         saved_uic_err = hba->saved_uic_err;
6930         hba->saved_err = 0;
6931         hba->saved_uic_err = 0;
6932         spin_unlock_irqrestore(hba->host->host_lock, flags);
6933
6934         do {
6935                 /* Reset the attached device */
6936                 ufshcd_vops_device_reset(hba);
6937
6938                 err = ufshcd_host_reset_and_restore(hba);
6939         } while (err && --retries);
6940
6941         spin_lock_irqsave(hba->host->host_lock, flags);
6942         /*
6943          * Inform scsi mid-layer that we did reset and allow to handle
6944          * Unit Attention properly.
6945          */
6946         scsi_report_bus_reset(hba->host, 0);
6947         if (err) {
6948                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6949                 hba->saved_err |= saved_err;
6950                 hba->saved_uic_err |= saved_uic_err;
6951         }
6952         spin_unlock_irqrestore(hba->host->host_lock, flags);
6953
6954         return err;
6955 }
6956
6957 /**
6958  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6959  * @cmd: SCSI command pointer
6960  *
6961  * Returns SUCCESS/FAILED
6962  */
6963 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6964 {
6965         int err = SUCCESS;
6966         unsigned long flags;
6967         struct ufs_hba *hba;
6968
6969         hba = shost_priv(cmd->device->host);
6970
6971         spin_lock_irqsave(hba->host->host_lock, flags);
6972         hba->force_reset = true;
6973         ufshcd_schedule_eh_work(hba);
6974         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
6975         spin_unlock_irqrestore(hba->host->host_lock, flags);
6976
6977         flush_work(&hba->eh_work);
6978
6979         spin_lock_irqsave(hba->host->host_lock, flags);
6980         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
6981                 err = FAILED;
6982         spin_unlock_irqrestore(hba->host->host_lock, flags);
6983
6984         return err;
6985 }
6986
6987 /**
6988  * ufshcd_get_max_icc_level - calculate the ICC level
6989  * @sup_curr_uA: max. current supported by the regulator
6990  * @start_scan: row at the desc table to start scan from
6991  * @buff: power descriptor buffer
6992  *
6993  * Returns calculated max ICC level for specific regulator
6994  */
6995 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6996 {
6997         int i;
6998         int curr_uA;
6999         u16 data;
7000         u16 unit;
7001
7002         for (i = start_scan; i >= 0; i--) {
7003                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
7004                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7005                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7006                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7007                 switch (unit) {
7008                 case UFSHCD_NANO_AMP:
7009                         curr_uA = curr_uA / 1000;
7010                         break;
7011                 case UFSHCD_MILI_AMP:
7012                         curr_uA = curr_uA * 1000;
7013                         break;
7014                 case UFSHCD_AMP:
7015                         curr_uA = curr_uA * 1000 * 1000;
7016                         break;
7017                 case UFSHCD_MICRO_AMP:
7018                 default:
7019                         break;
7020                 }
7021                 if (sup_curr_uA >= curr_uA)
7022                         break;
7023         }
7024         if (i < 0) {
7025                 i = 0;
7026                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7027         }
7028
7029         return (u32)i;
7030 }
7031
7032 /**
7033  * ufshcd_calc_icc_level - calculate the max ICC level
7034  * In case regulators are not initialized we'll return 0
7035  * @hba: per-adapter instance
7036  * @desc_buf: power descriptor buffer to extract ICC levels from.
7037  * @len: length of desc_buff
7038  *
7039  * Returns calculated ICC level
7040  */
7041 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7042                                                         u8 *desc_buf, int len)
7043 {
7044         u32 icc_level = 0;
7045
7046         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7047                                                 !hba->vreg_info.vccq2) {
7048                 dev_err(hba->dev,
7049                         "%s: Regulator capability was not set, actvIccLevel=%d",
7050                                                         __func__, icc_level);
7051                 goto out;
7052         }
7053
7054         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
7055                 icc_level = ufshcd_get_max_icc_level(
7056                                 hba->vreg_info.vcc->max_uA,
7057                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7058                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7059
7060         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
7061                 icc_level = ufshcd_get_max_icc_level(
7062                                 hba->vreg_info.vccq->max_uA,
7063                                 icc_level,
7064                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7065
7066         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7067                 icc_level = ufshcd_get_max_icc_level(
7068                                 hba->vreg_info.vccq2->max_uA,
7069                                 icc_level,
7070                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7071 out:
7072         return icc_level;
7073 }
7074
7075 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7076 {
7077         int ret;
7078         int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7079         u8 *desc_buf;
7080         u32 icc_level;
7081
7082         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7083         if (!desc_buf)
7084                 return;
7085
7086         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7087                                      desc_buf, buff_len);
7088         if (ret) {
7089                 dev_err(hba->dev,
7090                         "%s: Failed reading power descriptor.len = %d ret = %d",
7091                         __func__, buff_len, ret);
7092                 goto out;
7093         }
7094
7095         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7096                                                          buff_len);
7097         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7098
7099         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7100                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7101
7102         if (ret)
7103                 dev_err(hba->dev,
7104                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7105                         __func__, icc_level, ret);
7106
7107 out:
7108         kfree(desc_buf);
7109 }
7110
7111 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7112 {
7113         scsi_autopm_get_device(sdev);
7114         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7115         if (sdev->rpm_autosuspend)
7116                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7117                                                  RPM_AUTOSUSPEND_DELAY_MS);
7118         scsi_autopm_put_device(sdev);
7119 }
7120
7121 /**
7122  * ufshcd_scsi_add_wlus - Adds required W-LUs
7123  * @hba: per-adapter instance
7124  *
7125  * UFS device specification requires the UFS devices to support 4 well known
7126  * logical units:
7127  *      "REPORT_LUNS" (address: 01h)
7128  *      "UFS Device" (address: 50h)
7129  *      "RPMB" (address: 44h)
7130  *      "BOOT" (address: 30h)
7131  * UFS device's power management needs to be controlled by "POWER CONDITION"
7132  * field of SSU (START STOP UNIT) command. But this "power condition" field
7133  * will take effect only when its sent to "UFS device" well known logical unit
7134  * hence we require the scsi_device instance to represent this logical unit in
7135  * order for the UFS host driver to send the SSU command for power management.
7136  *
7137  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7138  * Block) LU so user space process can control this LU. User space may also
7139  * want to have access to BOOT LU.
7140  *
7141  * This function adds scsi device instances for each of all well known LUs
7142  * (except "REPORT LUNS" LU).
7143  *
7144  * Returns zero on success (all required W-LUs are added successfully),
7145  * non-zero error value on failure (if failed to add any of the required W-LU).
7146  */
7147 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7148 {
7149         int ret = 0;
7150         struct scsi_device *sdev_boot;
7151
7152         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7153                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7154         if (IS_ERR(hba->sdev_ufs_device)) {
7155                 ret = PTR_ERR(hba->sdev_ufs_device);
7156                 hba->sdev_ufs_device = NULL;
7157                 goto out;
7158         }
7159         ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
7160         scsi_device_put(hba->sdev_ufs_device);
7161
7162         hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7163                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7164         if (IS_ERR(hba->sdev_rpmb)) {
7165                 ret = PTR_ERR(hba->sdev_rpmb);
7166                 goto remove_sdev_ufs_device;
7167         }
7168         ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7169         scsi_device_put(hba->sdev_rpmb);
7170
7171         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7172                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7173         if (IS_ERR(sdev_boot)) {
7174                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7175         } else {
7176                 ufshcd_blk_pm_runtime_init(sdev_boot);
7177                 scsi_device_put(sdev_boot);
7178         }
7179         goto out;
7180
7181 remove_sdev_ufs_device:
7182         scsi_remove_device(hba->sdev_ufs_device);
7183 out:
7184         return ret;
7185 }
7186
7187 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7188 {
7189         struct ufs_dev_info *dev_info = &hba->dev_info;
7190         u8 lun;
7191         u32 d_lu_wb_buf_alloc;
7192
7193         if (!ufshcd_is_wb_allowed(hba))
7194                 return;
7195         /*
7196          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7197          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7198          * enabled
7199          */
7200         if (!(dev_info->wspecversion >= 0x310 ||
7201               dev_info->wspecversion == 0x220 ||
7202              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7203                 goto wb_disabled;
7204
7205         if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7206             DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7207                 goto wb_disabled;
7208
7209         dev_info->d_ext_ufs_feature_sup =
7210                 get_unaligned_be32(desc_buf +
7211                                    DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7212
7213         if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
7214                 goto wb_disabled;
7215
7216         /*
7217          * WB may be supported but not configured while provisioning.
7218          * The spec says, in dedicated wb buffer mode,
7219          * a max of 1 lun would have wb buffer configured.
7220          * Now only shared buffer mode is supported.
7221          */
7222         dev_info->b_wb_buffer_type =
7223                 desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7224
7225         dev_info->b_presrv_uspc_en =
7226                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7227
7228         if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
7229                 dev_info->d_wb_alloc_units =
7230                 get_unaligned_be32(desc_buf +
7231                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
7232                 if (!dev_info->d_wb_alloc_units)
7233                         goto wb_disabled;
7234         } else {
7235                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7236                         d_lu_wb_buf_alloc = 0;
7237                         ufshcd_read_unit_desc_param(hba,
7238                                         lun,
7239                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7240                                         (u8 *)&d_lu_wb_buf_alloc,
7241                                         sizeof(d_lu_wb_buf_alloc));
7242                         if (d_lu_wb_buf_alloc) {
7243                                 dev_info->wb_dedicated_lu = lun;
7244                                 break;
7245                         }
7246                 }
7247
7248                 if (!d_lu_wb_buf_alloc)
7249                         goto wb_disabled;
7250         }
7251         return;
7252
7253 wb_disabled:
7254         hba->caps &= ~UFSHCD_CAP_WB_EN;
7255 }
7256
7257 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7258 {
7259         struct ufs_dev_fix *f;
7260         struct ufs_dev_info *dev_info = &hba->dev_info;
7261
7262         if (!fixups)
7263                 return;
7264
7265         for (f = fixups; f->quirk; f++) {
7266                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7267                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
7268                      ((dev_info->model &&
7269                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7270                       !strcmp(f->model, UFS_ANY_MODEL)))
7271                         hba->dev_quirks |= f->quirk;
7272         }
7273 }
7274 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7275
7276 static void ufs_fixup_device_setup(struct ufs_hba *hba)
7277 {
7278         /* fix by general quirk table */
7279         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7280
7281         /* allow vendors to fix quirks */
7282         ufshcd_vops_fixup_dev_quirks(hba);
7283 }
7284
7285 static int ufs_get_device_desc(struct ufs_hba *hba)
7286 {
7287         int err;
7288         u8 model_index;
7289         u8 *desc_buf;
7290         struct ufs_dev_info *dev_info = &hba->dev_info;
7291
7292         desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7293         if (!desc_buf) {
7294                 err = -ENOMEM;
7295                 goto out;
7296         }
7297
7298         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7299                                      hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7300         if (err) {
7301                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7302                         __func__, err);
7303                 goto out;
7304         }
7305
7306         /*
7307          * getting vendor (manufacturerID) and Bank Index in big endian
7308          * format
7309          */
7310         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7311                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7312
7313         /* getting Specification Version in big endian format */
7314         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7315                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7316
7317         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7318
7319         err = ufshcd_read_string_desc(hba, model_index,
7320                                       &dev_info->model, SD_ASCII_STD);
7321         if (err < 0) {
7322                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7323                         __func__, err);
7324                 goto out;
7325         }
7326
7327         ufs_fixup_device_setup(hba);
7328
7329         ufshcd_wb_probe(hba, desc_buf);
7330
7331         /*
7332          * ufshcd_read_string_desc returns size of the string
7333          * reset the error value
7334          */
7335         err = 0;
7336
7337 out:
7338         kfree(desc_buf);
7339         return err;
7340 }
7341
7342 static void ufs_put_device_desc(struct ufs_hba *hba)
7343 {
7344         struct ufs_dev_info *dev_info = &hba->dev_info;
7345
7346         kfree(dev_info->model);
7347         dev_info->model = NULL;
7348 }
7349
7350 /**
7351  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7352  * @hba: per-adapter instance
7353  *
7354  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7355  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7356  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7357  * the hibern8 exit latency.
7358  *
7359  * Returns zero on success, non-zero error value on failure.
7360  */
7361 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7362 {
7363         int ret = 0;
7364         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7365
7366         ret = ufshcd_dme_peer_get(hba,
7367                                   UIC_ARG_MIB_SEL(
7368                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7369                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7370                                   &peer_rx_min_activatetime);
7371         if (ret)
7372                 goto out;
7373
7374         /* make sure proper unit conversion is applied */
7375         tuned_pa_tactivate =
7376                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7377                  / PA_TACTIVATE_TIME_UNIT_US);
7378         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7379                              tuned_pa_tactivate);
7380
7381 out:
7382         return ret;
7383 }
7384
7385 /**
7386  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7387  * @hba: per-adapter instance
7388  *
7389  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7390  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7391  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7392  * This optimal value can help reduce the hibern8 exit latency.
7393  *
7394  * Returns zero on success, non-zero error value on failure.
7395  */
7396 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7397 {
7398         int ret = 0;
7399         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7400         u32 max_hibern8_time, tuned_pa_hibern8time;
7401
7402         ret = ufshcd_dme_get(hba,
7403                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7404                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7405                                   &local_tx_hibern8_time_cap);
7406         if (ret)
7407                 goto out;
7408
7409         ret = ufshcd_dme_peer_get(hba,
7410                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7411                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7412                                   &peer_rx_hibern8_time_cap);
7413         if (ret)
7414                 goto out;
7415
7416         max_hibern8_time = max(local_tx_hibern8_time_cap,
7417                                peer_rx_hibern8_time_cap);
7418         /* make sure proper unit conversion is applied */
7419         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7420                                 / PA_HIBERN8_TIME_UNIT_US);
7421         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7422                              tuned_pa_hibern8time);
7423 out:
7424         return ret;
7425 }
7426
7427 /**
7428  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7429  * less than device PA_TACTIVATE time.
7430  * @hba: per-adapter instance
7431  *
7432  * Some UFS devices require host PA_TACTIVATE to be lower than device
7433  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7434  * for such devices.
7435  *
7436  * Returns zero on success, non-zero error value on failure.
7437  */
7438 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7439 {
7440         int ret = 0;
7441         u32 granularity, peer_granularity;
7442         u32 pa_tactivate, peer_pa_tactivate;
7443         u32 pa_tactivate_us, peer_pa_tactivate_us;
7444         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7445
7446         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7447                                   &granularity);
7448         if (ret)
7449                 goto out;
7450
7451         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7452                                   &peer_granularity);
7453         if (ret)
7454                 goto out;
7455
7456         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7457             (granularity > PA_GRANULARITY_MAX_VAL)) {
7458                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7459                         __func__, granularity);
7460                 return -EINVAL;
7461         }
7462
7463         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7464             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7465                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7466                         __func__, peer_granularity);
7467                 return -EINVAL;
7468         }
7469
7470         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7471         if (ret)
7472                 goto out;
7473
7474         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7475                                   &peer_pa_tactivate);
7476         if (ret)
7477                 goto out;
7478
7479         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7480         peer_pa_tactivate_us = peer_pa_tactivate *
7481                              gran_to_us_table[peer_granularity - 1];
7482
7483         if (pa_tactivate_us > peer_pa_tactivate_us) {
7484                 u32 new_peer_pa_tactivate;
7485
7486                 new_peer_pa_tactivate = pa_tactivate_us /
7487                                       gran_to_us_table[peer_granularity - 1];
7488                 new_peer_pa_tactivate++;
7489                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7490                                           new_peer_pa_tactivate);
7491         }
7492
7493 out:
7494         return ret;
7495 }
7496
7497 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7498 {
7499         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7500                 ufshcd_tune_pa_tactivate(hba);
7501                 ufshcd_tune_pa_hibern8time(hba);
7502         }
7503
7504         ufshcd_vops_apply_dev_quirks(hba);
7505
7506         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7507                 /* set 1ms timeout for PA_TACTIVATE */
7508                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7509
7510         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7511                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7512 }
7513
7514 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7515 {
7516         hba->ufs_stats.hibern8_exit_cnt = 0;
7517         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7518         hba->req_abort_count = 0;
7519 }
7520
7521 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7522 {
7523         int err;
7524         size_t buff_len;
7525         u8 *desc_buf;
7526
7527         buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7528         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7529         if (!desc_buf) {
7530                 err = -ENOMEM;
7531                 goto out;
7532         }
7533
7534         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7535                                      desc_buf, buff_len);
7536         if (err) {
7537                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7538                                 __func__, err);
7539                 goto out;
7540         }
7541
7542         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7543                 hba->dev_info.max_lu_supported = 32;
7544         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7545                 hba->dev_info.max_lu_supported = 8;
7546
7547 out:
7548         kfree(desc_buf);
7549         return err;
7550 }
7551
7552 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7553         {19200000, REF_CLK_FREQ_19_2_MHZ},
7554         {26000000, REF_CLK_FREQ_26_MHZ},
7555         {38400000, REF_CLK_FREQ_38_4_MHZ},
7556         {52000000, REF_CLK_FREQ_52_MHZ},
7557         {0, REF_CLK_FREQ_INVAL},
7558 };
7559
7560 static enum ufs_ref_clk_freq
7561 ufs_get_bref_clk_from_hz(unsigned long freq)
7562 {
7563         int i;
7564
7565         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7566                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7567                         return ufs_ref_clk_freqs[i].val;
7568
7569         return REF_CLK_FREQ_INVAL;
7570 }
7571
7572 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7573 {
7574         unsigned long freq;
7575
7576         freq = clk_get_rate(refclk);
7577
7578         hba->dev_ref_clk_freq =
7579                 ufs_get_bref_clk_from_hz(freq);
7580
7581         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7582                 dev_err(hba->dev,
7583                 "invalid ref_clk setting = %ld\n", freq);
7584 }
7585
7586 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7587 {
7588         int err;
7589         u32 ref_clk;
7590         u32 freq = hba->dev_ref_clk_freq;
7591
7592         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7593                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7594
7595         if (err) {
7596                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7597                         err);
7598                 goto out;
7599         }
7600
7601         if (ref_clk == freq)
7602                 goto out; /* nothing to update */
7603
7604         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7605                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7606
7607         if (err) {
7608                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7609                         ufs_ref_clk_freqs[freq].freq_hz);
7610                 goto out;
7611         }
7612
7613         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7614                         ufs_ref_clk_freqs[freq].freq_hz);
7615
7616 out:
7617         return err;
7618 }
7619
7620 static int ufshcd_device_params_init(struct ufs_hba *hba)
7621 {
7622         bool flag;
7623         int ret, i;
7624
7625          /* Init device descriptor sizes */
7626         for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7627                 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7628
7629         /* Init UFS geometry descriptor related parameters */
7630         ret = ufshcd_device_geo_params_init(hba);
7631         if (ret)
7632                 goto out;
7633
7634         /* Check and apply UFS device quirks */
7635         ret = ufs_get_device_desc(hba);
7636         if (ret) {
7637                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7638                         __func__, ret);
7639                 goto out;
7640         }
7641
7642         ufshcd_get_ref_clk_gating_wait(hba);
7643
7644         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7645                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7646                 hba->dev_info.f_power_on_wp_en = flag;
7647
7648         /* Probe maximum power mode co-supported by both UFS host and device */
7649         if (ufshcd_get_max_pwr_mode(hba))
7650                 dev_err(hba->dev,
7651                         "%s: Failed getting max supported power mode\n",
7652                         __func__);
7653 out:
7654         return ret;
7655 }
7656
7657 /**
7658  * ufshcd_add_lus - probe and add UFS logical units
7659  * @hba: per-adapter instance
7660  */
7661 static int ufshcd_add_lus(struct ufs_hba *hba)
7662 {
7663         int ret;
7664
7665         /* Add required well known logical units to scsi mid layer */
7666         ret = ufshcd_scsi_add_wlus(hba);
7667         if (ret)
7668                 goto out;
7669
7670         /* Initialize devfreq after UFS device is detected */
7671         if (ufshcd_is_clkscaling_supported(hba)) {
7672                 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7673                         &hba->pwr_info,
7674                         sizeof(struct ufs_pa_layer_attr));
7675                 hba->clk_scaling.saved_pwr_info.is_valid = true;
7676                 if (!hba->devfreq) {
7677                         ret = ufshcd_devfreq_init(hba);
7678                         if (ret)
7679                                 goto out;
7680                 }
7681
7682                 hba->clk_scaling.is_allowed = true;
7683         }
7684
7685         ufs_bsg_probe(hba);
7686         scsi_scan_host(hba->host);
7687         pm_runtime_put_sync(hba->dev);
7688
7689 out:
7690         return ret;
7691 }
7692
7693 static int
7694 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
7695
7696 static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
7697 {
7698         struct scsi_device *sdp;
7699         unsigned long flags;
7700         int ret = 0;
7701
7702         spin_lock_irqsave(hba->host->host_lock, flags);
7703         if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
7704                 sdp = hba->sdev_ufs_device;
7705         else if (wlun == UFS_UPIU_RPMB_WLUN)
7706                 sdp = hba->sdev_rpmb;
7707         else
7708                 BUG();
7709         if (sdp) {
7710                 ret = scsi_device_get(sdp);
7711                 if (!ret && !scsi_device_online(sdp)) {
7712                         ret = -ENODEV;
7713                         scsi_device_put(sdp);
7714                 }
7715         } else {
7716                 ret = -ENODEV;
7717         }
7718         spin_unlock_irqrestore(hba->host->host_lock, flags);
7719         if (ret)
7720                 goto out_err;
7721
7722         ret = ufshcd_send_request_sense(hba, sdp);
7723         scsi_device_put(sdp);
7724 out_err:
7725         if (ret)
7726                 dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
7727                                 __func__, wlun, ret);
7728         return ret;
7729 }
7730
7731 static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
7732 {
7733         int ret = 0;
7734
7735         if (!hba->wlun_dev_clr_ua)
7736                 goto out;
7737
7738         ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
7739         if (!ret)
7740                 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
7741         if (!ret)
7742                 hba->wlun_dev_clr_ua = false;
7743 out:
7744         if (ret)
7745                 dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
7746                                 __func__, ret);
7747         return ret;
7748 }
7749
7750 /**
7751  * ufshcd_probe_hba - probe hba to detect device and initialize
7752  * @hba: per-adapter instance
7753  * @async: asynchronous execution or not
7754  *
7755  * Execute link-startup and verify device initialization
7756  */
7757 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7758 {
7759         int ret;
7760         unsigned long flags;
7761         ktime_t start = ktime_get();
7762
7763         ret = ufshcd_link_startup(hba);
7764         if (ret)
7765                 goto out;
7766
7767         /* Debug counters initialization */
7768         ufshcd_clear_dbg_ufs_stats(hba);
7769
7770         /* UniPro link is active now */
7771         ufshcd_set_link_active(hba);
7772
7773         /* Verify device initialization by sending NOP OUT UPIU */
7774         ret = ufshcd_verify_dev_init(hba);
7775         if (ret)
7776                 goto out;
7777
7778         /* Initiate UFS initialization, and waiting until completion */
7779         ret = ufshcd_complete_dev_init(hba);
7780         if (ret)
7781                 goto out;
7782
7783         /*
7784          * Initialize UFS device parameters used by driver, these
7785          * parameters are associated with UFS descriptors.
7786          */
7787         if (async) {
7788                 ret = ufshcd_device_params_init(hba);
7789                 if (ret)
7790                         goto out;
7791         }
7792
7793         ufshcd_tune_unipro_params(hba);
7794
7795         /* UFS device is also active now */
7796         ufshcd_set_ufs_dev_active(hba);
7797         ufshcd_force_reset_auto_bkops(hba);
7798         hba->wlun_dev_clr_ua = true;
7799
7800         /* Gear up to HS gear if supported */
7801         if (hba->max_pwr_info.is_valid) {
7802                 /*
7803                  * Set the right value to bRefClkFreq before attempting to
7804                  * switch to HS gears.
7805                  */
7806                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7807                         ufshcd_set_dev_ref_clk(hba);
7808                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7809                 if (ret) {
7810                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7811                                         __func__, ret);
7812                         goto out;
7813                 }
7814                 ufshcd_print_pwr_info(hba);
7815         }
7816
7817         /*
7818          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7819          * and for removable UFS card as well, hence always set the parameter.
7820          * Note: Error handler may issue the device reset hence resetting
7821          * bActiveICCLevel as well so it is always safe to set this here.
7822          */
7823         ufshcd_set_active_icc_lvl(hba);
7824
7825         ufshcd_wb_config(hba);
7826         /* Enable Auto-Hibernate if configured */
7827         ufshcd_auto_hibern8_enable(hba);
7828
7829 out:
7830         spin_lock_irqsave(hba->host->host_lock, flags);
7831         if (ret)
7832                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7833         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7834                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7835         spin_unlock_irqrestore(hba->host->host_lock, flags);
7836
7837         trace_ufshcd_init(dev_name(hba->dev), ret,
7838                 ktime_to_us(ktime_sub(ktime_get(), start)),
7839                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7840         return ret;
7841 }
7842
7843 /**
7844  * ufshcd_async_scan - asynchronous execution for probing hba
7845  * @data: data pointer to pass to this function
7846  * @cookie: cookie data
7847  */
7848 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7849 {
7850         struct ufs_hba *hba = (struct ufs_hba *)data;
7851         int ret;
7852
7853         down(&hba->eh_sem);
7854         /* Initialize hba, detect and initialize UFS device */
7855         ret = ufshcd_probe_hba(hba, true);
7856         up(&hba->eh_sem);
7857         if (ret)
7858                 goto out;
7859
7860         /* Probe and add UFS logical units  */
7861         ret = ufshcd_add_lus(hba);
7862 out:
7863         /*
7864          * If we failed to initialize the device or the device is not
7865          * present, turn off the power/clocks etc.
7866          */
7867         if (ret) {
7868                 pm_runtime_put_sync(hba->dev);
7869                 ufshcd_exit_clk_scaling(hba);
7870                 ufshcd_hba_exit(hba);
7871         } else {
7872                 ufshcd_clear_ua_wluns(hba);
7873         }
7874 }
7875
7876 static const struct attribute_group *ufshcd_driver_groups[] = {
7877         &ufs_sysfs_unit_descriptor_group,
7878         &ufs_sysfs_lun_attributes_group,
7879         NULL,
7880 };
7881
7882 static struct ufs_hba_variant_params ufs_hba_vps = {
7883         .hba_enable_delay_us            = 1000,
7884         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
7885         .devfreq_profile.polling_ms     = 100,
7886         .devfreq_profile.target         = ufshcd_devfreq_target,
7887         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
7888         .ondemand_data.upthreshold      = 70,
7889         .ondemand_data.downdifferential = 5,
7890 };
7891
7892 static struct scsi_host_template ufshcd_driver_template = {
7893         .module                 = THIS_MODULE,
7894         .name                   = UFSHCD,
7895         .proc_name              = UFSHCD,
7896         .queuecommand           = ufshcd_queuecommand,
7897         .slave_alloc            = ufshcd_slave_alloc,
7898         .slave_configure        = ufshcd_slave_configure,
7899         .slave_destroy          = ufshcd_slave_destroy,
7900         .change_queue_depth     = ufshcd_change_queue_depth,
7901         .eh_abort_handler       = ufshcd_abort,
7902         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7903         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7904         .this_id                = -1,
7905         .sg_tablesize           = SG_ALL,
7906         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7907         .can_queue              = UFSHCD_CAN_QUEUE,
7908         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
7909         .max_host_blocked       = 1,
7910         .track_queue_depth      = 1,
7911         .sdev_groups            = ufshcd_driver_groups,
7912         .dma_boundary           = PAGE_SIZE - 1,
7913         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
7914 };
7915
7916 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7917                                    int ua)
7918 {
7919         int ret;
7920
7921         if (!vreg)
7922                 return 0;
7923
7924         /*
7925          * "set_load" operation shall be required on those regulators
7926          * which specifically configured current limitation. Otherwise
7927          * zero max_uA may cause unexpected behavior when regulator is
7928          * enabled or set as high power mode.
7929          */
7930         if (!vreg->max_uA)
7931                 return 0;
7932
7933         ret = regulator_set_load(vreg->reg, ua);
7934         if (ret < 0) {
7935                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7936                                 __func__, vreg->name, ua, ret);
7937         }
7938
7939         return ret;
7940 }
7941
7942 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7943                                          struct ufs_vreg *vreg)
7944 {
7945         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7946 }
7947
7948 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7949                                          struct ufs_vreg *vreg)
7950 {
7951         if (!vreg)
7952                 return 0;
7953
7954         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7955 }
7956
7957 static int ufshcd_config_vreg(struct device *dev,
7958                 struct ufs_vreg *vreg, bool on)
7959 {
7960         int ret = 0;
7961         struct regulator *reg;
7962         const char *name;
7963         int min_uV, uA_load;
7964
7965         BUG_ON(!vreg);
7966
7967         reg = vreg->reg;
7968         name = vreg->name;
7969
7970         if (regulator_count_voltages(reg) > 0) {
7971                 uA_load = on ? vreg->max_uA : 0;
7972                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7973                 if (ret)
7974                         goto out;
7975
7976                 if (vreg->min_uV && vreg->max_uV) {
7977                         min_uV = on ? vreg->min_uV : 0;
7978                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7979                         if (ret)
7980                                 dev_err(dev,
7981                                         "%s: %s set voltage failed, err=%d\n",
7982                                         __func__, name, ret);
7983                 }
7984         }
7985 out:
7986         return ret;
7987 }
7988
7989 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7990 {
7991         int ret = 0;
7992
7993         if (!vreg || vreg->enabled)
7994                 goto out;
7995
7996         ret = ufshcd_config_vreg(dev, vreg, true);
7997         if (!ret)
7998                 ret = regulator_enable(vreg->reg);
7999
8000         if (!ret)
8001                 vreg->enabled = true;
8002         else
8003                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8004                                 __func__, vreg->name, ret);
8005 out:
8006         return ret;
8007 }
8008
8009 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8010 {
8011         int ret = 0;
8012
8013         if (!vreg || !vreg->enabled)
8014                 goto out;
8015
8016         ret = regulator_disable(vreg->reg);
8017
8018         if (!ret) {
8019                 /* ignore errors on applying disable config */
8020                 ufshcd_config_vreg(dev, vreg, false);
8021                 vreg->enabled = false;
8022         } else {
8023                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8024                                 __func__, vreg->name, ret);
8025         }
8026 out:
8027         return ret;
8028 }
8029
8030 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8031 {
8032         int ret = 0;
8033         struct device *dev = hba->dev;
8034         struct ufs_vreg_info *info = &hba->vreg_info;
8035
8036         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8037         if (ret)
8038                 goto out;
8039
8040         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8041         if (ret)
8042                 goto out;
8043
8044         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8045
8046 out:
8047         if (ret) {
8048                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8049                 ufshcd_toggle_vreg(dev, info->vccq, false);
8050                 ufshcd_toggle_vreg(dev, info->vcc, false);
8051         }
8052         return ret;
8053 }
8054
8055 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8056 {
8057         struct ufs_vreg_info *info = &hba->vreg_info;
8058
8059         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8060 }
8061
8062 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8063 {
8064         int ret = 0;
8065
8066         if (!vreg)
8067                 goto out;
8068
8069         vreg->reg = devm_regulator_get(dev, vreg->name);
8070         if (IS_ERR(vreg->reg)) {
8071                 ret = PTR_ERR(vreg->reg);
8072                 dev_err(dev, "%s: %s get failed, err=%d\n",
8073                                 __func__, vreg->name, ret);
8074         }
8075 out:
8076         return ret;
8077 }
8078
8079 static int ufshcd_init_vreg(struct ufs_hba *hba)
8080 {
8081         int ret = 0;
8082         struct device *dev = hba->dev;
8083         struct ufs_vreg_info *info = &hba->vreg_info;
8084
8085         ret = ufshcd_get_vreg(dev, info->vcc);
8086         if (ret)
8087                 goto out;
8088
8089         ret = ufshcd_get_vreg(dev, info->vccq);
8090         if (!ret)
8091                 ret = ufshcd_get_vreg(dev, info->vccq2);
8092 out:
8093         return ret;
8094 }
8095
8096 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8097 {
8098         struct ufs_vreg_info *info = &hba->vreg_info;
8099
8100         if (info)
8101                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8102
8103         return 0;
8104 }
8105
8106 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8107 {
8108         int ret = 0;
8109         struct ufs_clk_info *clki;
8110         struct list_head *head = &hba->clk_list_head;
8111         unsigned long flags;
8112         ktime_t start = ktime_get();
8113         bool clk_state_changed = false;
8114
8115         if (list_empty(head))
8116                 goto out;
8117
8118         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8119         if (ret)
8120                 return ret;
8121
8122         list_for_each_entry(clki, head, list) {
8123                 if (!IS_ERR_OR_NULL(clki->clk)) {
8124                         /*
8125                          * Don't disable clocks which are needed
8126                          * to keep the link active.
8127                          */
8128                         if (ufshcd_is_link_active(hba) &&
8129                             clki->keep_link_active)
8130                                 continue;
8131
8132                         clk_state_changed = on ^ clki->enabled;
8133                         if (on && !clki->enabled) {
8134                                 ret = clk_prepare_enable(clki->clk);
8135                                 if (ret) {
8136                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8137                                                 __func__, clki->name, ret);
8138                                         goto out;
8139                                 }
8140                         } else if (!on && clki->enabled) {
8141                                 clk_disable_unprepare(clki->clk);
8142                         }
8143                         clki->enabled = on;
8144                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8145                                         clki->name, on ? "en" : "dis");
8146                 }
8147         }
8148
8149         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8150         if (ret)
8151                 return ret;
8152
8153 out:
8154         if (ret) {
8155                 list_for_each_entry(clki, head, list) {
8156                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8157                                 clk_disable_unprepare(clki->clk);
8158                 }
8159         } else if (!ret && on) {
8160                 spin_lock_irqsave(hba->host->host_lock, flags);
8161                 hba->clk_gating.state = CLKS_ON;
8162                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8163                                         hba->clk_gating.state);
8164                 spin_unlock_irqrestore(hba->host->host_lock, flags);
8165         }
8166
8167         if (clk_state_changed)
8168                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8169                         (on ? "on" : "off"),
8170                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8171         return ret;
8172 }
8173
8174 static int ufshcd_init_clocks(struct ufs_hba *hba)
8175 {
8176         int ret = 0;
8177         struct ufs_clk_info *clki;
8178         struct device *dev = hba->dev;
8179         struct list_head *head = &hba->clk_list_head;
8180
8181         if (list_empty(head))
8182                 goto out;
8183
8184         list_for_each_entry(clki, head, list) {
8185                 if (!clki->name)
8186                         continue;
8187
8188                 clki->clk = devm_clk_get(dev, clki->name);
8189                 if (IS_ERR(clki->clk)) {
8190                         ret = PTR_ERR(clki->clk);
8191                         dev_err(dev, "%s: %s clk get failed, %d\n",
8192                                         __func__, clki->name, ret);
8193                         goto out;
8194                 }
8195
8196                 /*
8197                  * Parse device ref clk freq as per device tree "ref_clk".
8198                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8199                  * in ufshcd_alloc_host().
8200                  */
8201                 if (!strcmp(clki->name, "ref_clk"))
8202                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8203
8204                 if (clki->max_freq) {
8205                         ret = clk_set_rate(clki->clk, clki->max_freq);
8206                         if (ret) {
8207                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8208                                         __func__, clki->name,
8209                                         clki->max_freq, ret);
8210                                 goto out;
8211                         }
8212                         clki->curr_freq = clki->max_freq;
8213                 }
8214                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8215                                 clki->name, clk_get_rate(clki->clk));
8216         }
8217 out:
8218         return ret;
8219 }
8220
8221 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8222 {
8223         int err = 0;
8224
8225         if (!hba->vops)
8226                 goto out;
8227
8228         err = ufshcd_vops_init(hba);
8229         if (err)
8230                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8231                         __func__, ufshcd_get_var_name(hba), err);
8232 out:
8233         return err;
8234 }
8235
8236 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8237 {
8238         if (!hba->vops)
8239                 return;
8240
8241         ufshcd_vops_exit(hba);
8242 }
8243
8244 static int ufshcd_hba_init(struct ufs_hba *hba)
8245 {
8246         int err;
8247
8248         /*
8249          * Handle host controller power separately from the UFS device power
8250          * rails as it will help controlling the UFS host controller power
8251          * collapse easily which is different than UFS device power collapse.
8252          * Also, enable the host controller power before we go ahead with rest
8253          * of the initialization here.
8254          */
8255         err = ufshcd_init_hba_vreg(hba);
8256         if (err)
8257                 goto out;
8258
8259         err = ufshcd_setup_hba_vreg(hba, true);
8260         if (err)
8261                 goto out;
8262
8263         err = ufshcd_init_clocks(hba);
8264         if (err)
8265                 goto out_disable_hba_vreg;
8266
8267         err = ufshcd_setup_clocks(hba, true);
8268         if (err)
8269                 goto out_disable_hba_vreg;
8270
8271         err = ufshcd_init_vreg(hba);
8272         if (err)
8273                 goto out_disable_clks;
8274
8275         err = ufshcd_setup_vreg(hba, true);
8276         if (err)
8277                 goto out_disable_clks;
8278
8279         err = ufshcd_variant_hba_init(hba);
8280         if (err)
8281                 goto out_disable_vreg;
8282
8283         hba->is_powered = true;
8284         goto out;
8285
8286 out_disable_vreg:
8287         ufshcd_setup_vreg(hba, false);
8288 out_disable_clks:
8289         ufshcd_setup_clocks(hba, false);
8290 out_disable_hba_vreg:
8291         ufshcd_setup_hba_vreg(hba, false);
8292 out:
8293         return err;
8294 }
8295
8296 static void ufshcd_hba_exit(struct ufs_hba *hba)
8297 {
8298         if (hba->is_powered) {
8299                 ufshcd_variant_hba_exit(hba);
8300                 ufshcd_setup_vreg(hba, false);
8301                 ufshcd_suspend_clkscaling(hba);
8302                 if (ufshcd_is_clkscaling_supported(hba))
8303                         if (hba->devfreq)
8304                                 ufshcd_suspend_clkscaling(hba);
8305                 ufshcd_setup_clocks(hba, false);
8306                 ufshcd_setup_hba_vreg(hba, false);
8307                 hba->is_powered = false;
8308                 ufs_put_device_desc(hba);
8309         }
8310 }
8311
8312 static int
8313 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8314 {
8315         unsigned char cmd[6] = {REQUEST_SENSE,
8316                                 0,
8317                                 0,
8318                                 0,
8319                                 UFS_SENSE_SIZE,
8320                                 0};
8321         char *buffer;
8322         int ret;
8323
8324         buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
8325         if (!buffer) {
8326                 ret = -ENOMEM;
8327                 goto out;
8328         }
8329
8330         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
8331                         UFS_SENSE_SIZE, NULL, NULL,
8332                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
8333         if (ret)
8334                 pr_err("%s: failed with err %d\n", __func__, ret);
8335
8336         kfree(buffer);
8337 out:
8338         return ret;
8339 }
8340
8341 /**
8342  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8343  *                           power mode
8344  * @hba: per adapter instance
8345  * @pwr_mode: device power mode to set
8346  *
8347  * Returns 0 if requested power mode is set successfully
8348  * Returns non-zero if failed to set the requested power mode
8349  */
8350 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8351                                      enum ufs_dev_pwr_mode pwr_mode)
8352 {
8353         unsigned char cmd[6] = { START_STOP };
8354         struct scsi_sense_hdr sshdr;
8355         struct scsi_device *sdp;
8356         unsigned long flags;
8357         int ret;
8358
8359         spin_lock_irqsave(hba->host->host_lock, flags);
8360         sdp = hba->sdev_ufs_device;
8361         if (sdp) {
8362                 ret = scsi_device_get(sdp);
8363                 if (!ret && !scsi_device_online(sdp)) {
8364                         ret = -ENODEV;
8365                         scsi_device_put(sdp);
8366                 }
8367         } else {
8368                 ret = -ENODEV;
8369         }
8370         spin_unlock_irqrestore(hba->host->host_lock, flags);
8371
8372         if (ret)
8373                 return ret;
8374
8375         /*
8376          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8377          * handling, which would wait for host to be resumed. Since we know
8378          * we are functional while we are here, skip host resume in error
8379          * handling context.
8380          */
8381         hba->host->eh_noresume = 1;
8382         if (hba->wlun_dev_clr_ua) {
8383                 ret = ufshcd_send_request_sense(hba, sdp);
8384                 if (ret)
8385                         goto out;
8386                 /* Unit attention condition is cleared now */
8387                 hba->wlun_dev_clr_ua = false;
8388         }
8389
8390         cmd[4] = pwr_mode << 4;
8391
8392         /*
8393          * Current function would be generally called from the power management
8394          * callbacks hence set the RQF_PM flag so that it doesn't resume the
8395          * already suspended childs.
8396          */
8397         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8398                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8399         if (ret) {
8400                 sdev_printk(KERN_WARNING, sdp,
8401                             "START_STOP failed for power mode: %d, result %x\n",
8402                             pwr_mode, ret);
8403                 if (driver_byte(ret) == DRIVER_SENSE)
8404                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8405         }
8406
8407         if (!ret)
8408                 hba->curr_dev_pwr_mode = pwr_mode;
8409 out:
8410         scsi_device_put(sdp);
8411         hba->host->eh_noresume = 0;
8412         return ret;
8413 }
8414
8415 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8416                                         enum uic_link_state req_link_state,
8417                                         int check_for_bkops)
8418 {
8419         int ret = 0;
8420
8421         if (req_link_state == hba->uic_link_state)
8422                 return 0;
8423
8424         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8425                 ret = ufshcd_uic_hibern8_enter(hba);
8426                 if (!ret) {
8427                         ufshcd_set_link_hibern8(hba);
8428                 } else {
8429                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8430                                         __func__, ret);
8431                         goto out;
8432                 }
8433         }
8434         /*
8435          * If autobkops is enabled, link can't be turned off because
8436          * turning off the link would also turn off the device, except in the
8437          * case of DeepSleep where the device is expected to remain powered.
8438          */
8439         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8440                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
8441                 /*
8442                  * Let's make sure that link is in low power mode, we are doing
8443                  * this currently by putting the link in Hibern8. Otherway to
8444                  * put the link in low power mode is to send the DME end point
8445                  * to device and then send the DME reset command to local
8446                  * unipro. But putting the link in hibern8 is much faster.
8447                  *
8448                  * Note also that putting the link in Hibern8 is a requirement
8449                  * for entering DeepSleep.
8450                  */
8451                 ret = ufshcd_uic_hibern8_enter(hba);
8452                 if (ret) {
8453                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8454                                         __func__, ret);
8455                         goto out;
8456                 }
8457                 /*
8458                  * Change controller state to "reset state" which
8459                  * should also put the link in off/reset state
8460                  */
8461                 ufshcd_hba_stop(hba);
8462                 /*
8463                  * TODO: Check if we need any delay to make sure that
8464                  * controller is reset
8465                  */
8466                 ufshcd_set_link_off(hba);
8467         }
8468
8469 out:
8470         return ret;
8471 }
8472
8473 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8474 {
8475         bool vcc_off = false;
8476
8477         /*
8478          * It seems some UFS devices may keep drawing more than sleep current
8479          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8480          * To avoid this situation, add 2ms delay before putting these UFS
8481          * rails in LPM mode.
8482          */
8483         if (!ufshcd_is_link_active(hba) &&
8484             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8485                 usleep_range(2000, 2100);
8486
8487         /*
8488          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8489          * power.
8490          *
8491          * If UFS device and link is in OFF state, all power supplies (VCC,
8492          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8493          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8494          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8495          *
8496          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8497          * in low power state which would save some power.
8498          *
8499          * If Write Booster is enabled and the device needs to flush the WB
8500          * buffer OR if bkops status is urgent for WB, keep Vcc on.
8501          */
8502         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8503             !hba->dev_info.is_lu_power_on_wp) {
8504                 ufshcd_setup_vreg(hba, false);
8505                 vcc_off = true;
8506         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8507                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8508                 vcc_off = true;
8509                 if (!ufshcd_is_link_active(hba)) {
8510                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8511                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8512                 }
8513         }
8514
8515         /*
8516          * Some UFS devices require delay after VCC power rail is turned-off.
8517          */
8518         if (vcc_off && hba->vreg_info.vcc &&
8519                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8520                 usleep_range(5000, 5100);
8521 }
8522
8523 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8524 {
8525         int ret = 0;
8526
8527         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8528             !hba->dev_info.is_lu_power_on_wp) {
8529                 ret = ufshcd_setup_vreg(hba, true);
8530         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8531                 if (!ret && !ufshcd_is_link_active(hba)) {
8532                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8533                         if (ret)
8534                                 goto vcc_disable;
8535                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8536                         if (ret)
8537                                 goto vccq_lpm;
8538                 }
8539                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8540         }
8541         goto out;
8542
8543 vccq_lpm:
8544         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8545 vcc_disable:
8546         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8547 out:
8548         return ret;
8549 }
8550
8551 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8552 {
8553         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8554                 ufshcd_setup_hba_vreg(hba, false);
8555 }
8556
8557 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8558 {
8559         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8560                 ufshcd_setup_hba_vreg(hba, true);
8561 }
8562
8563 /**
8564  * ufshcd_suspend - helper function for suspend operations
8565  * @hba: per adapter instance
8566  * @pm_op: desired low power operation type
8567  *
8568  * This function will try to put the UFS device and link into low power
8569  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8570  * (System PM level).
8571  *
8572  * If this function is called during shutdown, it will make sure that
8573  * both UFS device and UFS link is powered off.
8574  *
8575  * NOTE: UFS device & link must be active before we enter in this function.
8576  *
8577  * Returns 0 for success and non-zero for failure
8578  */
8579 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8580 {
8581         int ret = 0;
8582         int check_for_bkops;
8583         enum ufs_pm_level pm_lvl;
8584         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8585         enum uic_link_state req_link_state;
8586
8587         hba->pm_op_in_progress = 1;
8588         if (!ufshcd_is_shutdown_pm(pm_op)) {
8589                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8590                          hba->rpm_lvl : hba->spm_lvl;
8591                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8592                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8593         } else {
8594                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8595                 req_link_state = UIC_LINK_OFF_STATE;
8596         }
8597
8598         /*
8599          * If we can't transition into any of the low power modes
8600          * just gate the clocks.
8601          */
8602         ufshcd_hold(hba, false);
8603         hba->clk_gating.is_suspended = true;
8604
8605         if (hba->clk_scaling.is_allowed) {
8606                 cancel_work_sync(&hba->clk_scaling.suspend_work);
8607                 cancel_work_sync(&hba->clk_scaling.resume_work);
8608                 ufshcd_suspend_clkscaling(hba);
8609         }
8610
8611         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8612                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8613                 goto disable_clks;
8614         }
8615
8616         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8617             (req_link_state == hba->uic_link_state))
8618                 goto enable_gating;
8619
8620         /* UFS device & link must be active before we enter in this function */
8621         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8622                 ret = -EINVAL;
8623                 goto enable_gating;
8624         }
8625
8626         if (ufshcd_is_runtime_pm(pm_op)) {
8627                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8628                         /*
8629                          * The device is idle with no requests in the queue,
8630                          * allow background operations if bkops status shows
8631                          * that performance might be impacted.
8632                          */
8633                         ret = ufshcd_urgent_bkops(hba);
8634                         if (ret)
8635                                 goto enable_gating;
8636                 } else {
8637                         /* make sure that auto bkops is disabled */
8638                         ufshcd_disable_auto_bkops(hba);
8639                 }
8640                 /*
8641                  * If device needs to do BKOP or WB buffer flush during
8642                  * Hibern8, keep device power mode as "active power mode"
8643                  * and VCC supply.
8644                  */
8645                 hba->dev_info.b_rpm_dev_flush_capable =
8646                         hba->auto_bkops_enabled ||
8647                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8648                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8649                         ufshcd_is_auto_hibern8_enabled(hba))) &&
8650                         ufshcd_wb_need_flush(hba));
8651         }
8652
8653         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8654                 if (!ufshcd_is_runtime_pm(pm_op))
8655                         /* ensure that bkops is disabled */
8656                         ufshcd_disable_auto_bkops(hba);
8657
8658                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8659                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8660                         if (ret)
8661                                 goto enable_gating;
8662                 }
8663         }
8664
8665         flush_work(&hba->eeh_work);
8666
8667         /*
8668          * In the case of DeepSleep, the device is expected to remain powered
8669          * with the link off, so do not check for bkops.
8670          */
8671         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8672         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
8673         if (ret)
8674                 goto set_dev_active;
8675
8676         ufshcd_vreg_set_lpm(hba);
8677
8678 disable_clks:
8679         /*
8680          * Call vendor specific suspend callback. As these callbacks may access
8681          * vendor specific host controller register space call them before the
8682          * host clocks are ON.
8683          */
8684         ret = ufshcd_vops_suspend(hba, pm_op);
8685         if (ret)
8686                 goto set_link_active;
8687         /*
8688          * Disable the host irq as host controller as there won't be any
8689          * host controller transaction expected till resume.
8690          */
8691         ufshcd_disable_irq(hba);
8692
8693         ufshcd_setup_clocks(hba, false);
8694
8695         if (ufshcd_is_clkgating_allowed(hba)) {
8696                 hba->clk_gating.state = CLKS_OFF;
8697                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8698                                         hba->clk_gating.state);
8699         }
8700
8701         /* Put the host controller in low power mode if possible */
8702         ufshcd_hba_vreg_set_lpm(hba);
8703         goto out;
8704
8705 set_link_active:
8706         if (hba->clk_scaling.is_allowed)
8707                 ufshcd_resume_clkscaling(hba);
8708         ufshcd_vreg_set_hpm(hba);
8709         /*
8710          * Device hardware reset is required to exit DeepSleep. Also, for
8711          * DeepSleep, the link is off so host reset and restore will be done
8712          * further below.
8713          */
8714         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8715                 ufshcd_vops_device_reset(hba);
8716                 WARN_ON(!ufshcd_is_link_off(hba));
8717         }
8718         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8719                 ufshcd_set_link_active(hba);
8720         else if (ufshcd_is_link_off(hba))
8721                 ufshcd_host_reset_and_restore(hba);
8722 set_dev_active:
8723         /* Can also get here needing to exit DeepSleep */
8724         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8725                 ufshcd_vops_device_reset(hba);
8726                 ufshcd_host_reset_and_restore(hba);
8727         }
8728         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8729                 ufshcd_disable_auto_bkops(hba);
8730 enable_gating:
8731         if (hba->clk_scaling.is_allowed)
8732                 ufshcd_resume_clkscaling(hba);
8733         hba->clk_gating.is_suspended = false;
8734         hba->dev_info.b_rpm_dev_flush_capable = false;
8735         ufshcd_release(hba);
8736 out:
8737         if (hba->dev_info.b_rpm_dev_flush_capable) {
8738                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8739                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8740         }
8741
8742         hba->pm_op_in_progress = 0;
8743
8744         if (ret)
8745                 ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
8746         return ret;
8747 }
8748
8749 /**
8750  * ufshcd_resume - helper function for resume operations
8751  * @hba: per adapter instance
8752  * @pm_op: runtime PM or system PM
8753  *
8754  * This function basically brings the UFS device, UniPro link and controller
8755  * to active state.
8756  *
8757  * Returns 0 for success and non-zero for failure
8758  */
8759 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8760 {
8761         int ret;
8762         enum uic_link_state old_link_state;
8763
8764         hba->pm_op_in_progress = 1;
8765         old_link_state = hba->uic_link_state;
8766
8767         ufshcd_hba_vreg_set_hpm(hba);
8768         /* Make sure clocks are enabled before accessing controller */
8769         ret = ufshcd_setup_clocks(hba, true);
8770         if (ret)
8771                 goto out;
8772
8773         /* enable the host irq as host controller would be active soon */
8774         ufshcd_enable_irq(hba);
8775
8776         ret = ufshcd_vreg_set_hpm(hba);
8777         if (ret)
8778                 goto disable_irq_and_vops_clks;
8779
8780         /*
8781          * Call vendor specific resume callback. As these callbacks may access
8782          * vendor specific host controller register space call them when the
8783          * host clocks are ON.
8784          */
8785         ret = ufshcd_vops_resume(hba, pm_op);
8786         if (ret)
8787                 goto disable_vreg;
8788
8789         /* For DeepSleep, the only supported option is to have the link off */
8790         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
8791
8792         if (ufshcd_is_link_hibern8(hba)) {
8793                 ret = ufshcd_uic_hibern8_exit(hba);
8794                 if (!ret) {
8795                         ufshcd_set_link_active(hba);
8796                 } else {
8797                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8798                                         __func__, ret);
8799                         goto vendor_suspend;
8800                 }
8801         } else if (ufshcd_is_link_off(hba)) {
8802                 /*
8803                  * A full initialization of the host and the device is
8804                  * required since the link was put to off during suspend.
8805                  * Note, in the case of DeepSleep, the device will exit
8806                  * DeepSleep due to device reset.
8807                  */
8808                 ret = ufshcd_reset_and_restore(hba);
8809                 /*
8810                  * ufshcd_reset_and_restore() should have already
8811                  * set the link state as active
8812                  */
8813                 if (ret || !ufshcd_is_link_active(hba))
8814                         goto vendor_suspend;
8815         }
8816
8817         if (!ufshcd_is_ufs_dev_active(hba)) {
8818                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8819                 if (ret)
8820                         goto set_old_link_state;
8821         }
8822
8823         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8824                 ufshcd_enable_auto_bkops(hba);
8825         else
8826                 /*
8827                  * If BKOPs operations are urgently needed at this moment then
8828                  * keep auto-bkops enabled or else disable it.
8829                  */
8830                 ufshcd_urgent_bkops(hba);
8831
8832         hba->clk_gating.is_suspended = false;
8833
8834         if (hba->clk_scaling.is_allowed)
8835                 ufshcd_resume_clkscaling(hba);
8836
8837         /* Enable Auto-Hibernate if configured */
8838         ufshcd_auto_hibern8_enable(hba);
8839
8840         if (hba->dev_info.b_rpm_dev_flush_capable) {
8841                 hba->dev_info.b_rpm_dev_flush_capable = false;
8842                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8843         }
8844
8845         /* Schedule clock gating in case of no access to UFS device yet */
8846         ufshcd_release(hba);
8847
8848         goto out;
8849
8850 set_old_link_state:
8851         ufshcd_link_state_transition(hba, old_link_state, 0);
8852 vendor_suspend:
8853         ufshcd_vops_suspend(hba, pm_op);
8854 disable_vreg:
8855         ufshcd_vreg_set_lpm(hba);
8856 disable_irq_and_vops_clks:
8857         ufshcd_disable_irq(hba);
8858         if (hba->clk_scaling.is_allowed)
8859                 ufshcd_suspend_clkscaling(hba);
8860         ufshcd_setup_clocks(hba, false);
8861         if (ufshcd_is_clkgating_allowed(hba)) {
8862                 hba->clk_gating.state = CLKS_OFF;
8863                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8864                                         hba->clk_gating.state);
8865         }
8866 out:
8867         hba->pm_op_in_progress = 0;
8868         if (ret)
8869                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
8870         return ret;
8871 }
8872
8873 /**
8874  * ufshcd_system_suspend - system suspend routine
8875  * @hba: per adapter instance
8876  *
8877  * Check the description of ufshcd_suspend() function for more details.
8878  *
8879  * Returns 0 for success and non-zero for failure
8880  */
8881 int ufshcd_system_suspend(struct ufs_hba *hba)
8882 {
8883         int ret = 0;
8884         ktime_t start = ktime_get();
8885
8886         down(&hba->eh_sem);
8887         if (!hba || !hba->is_powered)
8888                 return 0;
8889
8890         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8891              hba->curr_dev_pwr_mode) &&
8892             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8893              hba->uic_link_state))
8894                 goto out;
8895
8896         if (pm_runtime_suspended(hba->dev)) {
8897                 /*
8898                  * UFS device and/or UFS link low power states during runtime
8899                  * suspend seems to be different than what is expected during
8900                  * system suspend. Hence runtime resume the devic & link and
8901                  * let the system suspend low power states to take effect.
8902                  * TODO: If resume takes longer time, we might have optimize
8903                  * it in future by not resuming everything if possible.
8904                  */
8905                 ret = ufshcd_runtime_resume(hba);
8906                 if (ret)
8907                         goto out;
8908         }
8909
8910         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8911 out:
8912         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8913                 ktime_to_us(ktime_sub(ktime_get(), start)),
8914                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8915         if (!ret)
8916                 hba->is_sys_suspended = true;
8917         else
8918                 up(&hba->eh_sem);
8919         return ret;
8920 }
8921 EXPORT_SYMBOL(ufshcd_system_suspend);
8922
8923 /**
8924  * ufshcd_system_resume - system resume routine
8925  * @hba: per adapter instance
8926  *
8927  * Returns 0 for success and non-zero for failure
8928  */
8929
8930 int ufshcd_system_resume(struct ufs_hba *hba)
8931 {
8932         int ret = 0;
8933         ktime_t start = ktime_get();
8934
8935         if (!hba) {
8936                 up(&hba->eh_sem);
8937                 return -EINVAL;
8938         }
8939
8940         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8941                 /*
8942                  * Let the runtime resume take care of resuming
8943                  * if runtime suspended.
8944                  */
8945                 goto out;
8946         else
8947                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8948 out:
8949         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8950                 ktime_to_us(ktime_sub(ktime_get(), start)),
8951                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8952         if (!ret)
8953                 hba->is_sys_suspended = false;
8954         up(&hba->eh_sem);
8955         return ret;
8956 }
8957 EXPORT_SYMBOL(ufshcd_system_resume);
8958
8959 /**
8960  * ufshcd_runtime_suspend - runtime suspend routine
8961  * @hba: per adapter instance
8962  *
8963  * Check the description of ufshcd_suspend() function for more details.
8964  *
8965  * Returns 0 for success and non-zero for failure
8966  */
8967 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8968 {
8969         int ret = 0;
8970         ktime_t start = ktime_get();
8971
8972         if (!hba)
8973                 return -EINVAL;
8974
8975         if (!hba->is_powered)
8976                 goto out;
8977         else
8978                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8979 out:
8980         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8981                 ktime_to_us(ktime_sub(ktime_get(), start)),
8982                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8983         return ret;
8984 }
8985 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8986
8987 /**
8988  * ufshcd_runtime_resume - runtime resume routine
8989  * @hba: per adapter instance
8990  *
8991  * This function basically brings the UFS device, UniPro link and controller
8992  * to active state. Following operations are done in this function:
8993  *
8994  * 1. Turn on all the controller related clocks
8995  * 2. Bring the UniPro link out of Hibernate state
8996  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8997  *    to active state.
8998  * 4. If auto-bkops is enabled on the device, disable it.
8999  *
9000  * So following would be the possible power state after this function return
9001  * successfully:
9002  *      S1: UFS device in Active state with VCC rail ON
9003  *          UniPro link in Active state
9004  *          All the UFS/UniPro controller clocks are ON
9005  *
9006  * Returns 0 for success and non-zero for failure
9007  */
9008 int ufshcd_runtime_resume(struct ufs_hba *hba)
9009 {
9010         int ret = 0;
9011         ktime_t start = ktime_get();
9012
9013         if (!hba)
9014                 return -EINVAL;
9015
9016         if (!hba->is_powered)
9017                 goto out;
9018         else
9019                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9020 out:
9021         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9022                 ktime_to_us(ktime_sub(ktime_get(), start)),
9023                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9024         return ret;
9025 }
9026 EXPORT_SYMBOL(ufshcd_runtime_resume);
9027
9028 int ufshcd_runtime_idle(struct ufs_hba *hba)
9029 {
9030         return 0;
9031 }
9032 EXPORT_SYMBOL(ufshcd_runtime_idle);
9033
9034 /**
9035  * ufshcd_shutdown - shutdown routine
9036  * @hba: per adapter instance
9037  *
9038  * This function would power off both UFS device and UFS link.
9039  *
9040  * Returns 0 always to allow force shutdown even in case of errors.
9041  */
9042 int ufshcd_shutdown(struct ufs_hba *hba)
9043 {
9044         int ret = 0;
9045
9046         down(&hba->eh_sem);
9047         if (!hba->is_powered)
9048                 goto out;
9049
9050         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9051                 goto out;
9052
9053         if (pm_runtime_suspended(hba->dev)) {
9054                 ret = ufshcd_runtime_resume(hba);
9055                 if (ret)
9056                         goto out;
9057         }
9058
9059         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9060 out:
9061         if (ret)
9062                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9063         hba->is_powered = false;
9064         up(&hba->eh_sem);
9065         /* allow force shutdown even in case of errors */
9066         return 0;
9067 }
9068 EXPORT_SYMBOL(ufshcd_shutdown);
9069
9070 /**
9071  * ufshcd_remove - de-allocate SCSI host and host memory space
9072  *              data structure memory
9073  * @hba: per adapter instance
9074  */
9075 void ufshcd_remove(struct ufs_hba *hba)
9076 {
9077         ufs_bsg_remove(hba);
9078         ufs_sysfs_remove_nodes(hba->dev);
9079         blk_cleanup_queue(hba->tmf_queue);
9080         blk_mq_free_tag_set(&hba->tmf_tag_set);
9081         blk_cleanup_queue(hba->cmd_queue);
9082         scsi_remove_host(hba->host);
9083         /* disable interrupts */
9084         ufshcd_disable_intr(hba, hba->intr_mask);
9085         ufshcd_hba_stop(hba);
9086
9087         ufshcd_exit_clk_scaling(hba);
9088         ufshcd_exit_clk_gating(hba);
9089         if (ufshcd_is_clkscaling_supported(hba))
9090                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9091         ufshcd_hba_exit(hba);
9092 }
9093 EXPORT_SYMBOL_GPL(ufshcd_remove);
9094
9095 /**
9096  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9097  * @hba: pointer to Host Bus Adapter (HBA)
9098  */
9099 void ufshcd_dealloc_host(struct ufs_hba *hba)
9100 {
9101         ufshcd_crypto_destroy_keyslot_manager(hba);
9102         scsi_host_put(hba->host);
9103 }
9104 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9105
9106 /**
9107  * ufshcd_set_dma_mask - Set dma mask based on the controller
9108  *                       addressing capability
9109  * @hba: per adapter instance
9110  *
9111  * Returns 0 for success, non-zero for failure
9112  */
9113 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9114 {
9115         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9116                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9117                         return 0;
9118         }
9119         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9120 }
9121
9122 /**
9123  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9124  * @dev: pointer to device handle
9125  * @hba_handle: driver private handle
9126  * Returns 0 on success, non-zero value on failure
9127  */
9128 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9129 {
9130         struct Scsi_Host *host;
9131         struct ufs_hba *hba;
9132         int err = 0;
9133
9134         if (!dev) {
9135                 dev_err(dev,
9136                 "Invalid memory reference for dev is NULL\n");
9137                 err = -ENODEV;
9138                 goto out_error;
9139         }
9140
9141         host = scsi_host_alloc(&ufshcd_driver_template,
9142                                 sizeof(struct ufs_hba));
9143         if (!host) {
9144                 dev_err(dev, "scsi_host_alloc failed\n");
9145                 err = -ENOMEM;
9146                 goto out_error;
9147         }
9148         hba = shost_priv(host);
9149         hba->host = host;
9150         hba->dev = dev;
9151         *hba_handle = hba;
9152         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9153
9154         INIT_LIST_HEAD(&hba->clk_list_head);
9155
9156 out_error:
9157         return err;
9158 }
9159 EXPORT_SYMBOL(ufshcd_alloc_host);
9160
9161 /* This function exists because blk_mq_alloc_tag_set() requires this. */
9162 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9163                                      const struct blk_mq_queue_data *qd)
9164 {
9165         WARN_ON_ONCE(true);
9166         return BLK_STS_NOTSUPP;
9167 }
9168
9169 static const struct blk_mq_ops ufshcd_tmf_ops = {
9170         .queue_rq = ufshcd_queue_tmf,
9171 };
9172
9173 /**
9174  * ufshcd_init - Driver initialization routine
9175  * @hba: per-adapter instance
9176  * @mmio_base: base register address
9177  * @irq: Interrupt line of device
9178  * Returns 0 on success, non-zero value on failure
9179  */
9180 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9181 {
9182         int err;
9183         struct Scsi_Host *host = hba->host;
9184         struct device *dev = hba->dev;
9185         char eh_wq_name[sizeof("ufs_eh_wq_00")];
9186
9187         if (!mmio_base) {
9188                 dev_err(hba->dev,
9189                 "Invalid memory reference for mmio_base is NULL\n");
9190                 err = -ENODEV;
9191                 goto out_error;
9192         }
9193
9194         hba->mmio_base = mmio_base;
9195         hba->irq = irq;
9196         hba->vps = &ufs_hba_vps;
9197
9198         err = ufshcd_hba_init(hba);
9199         if (err)
9200                 goto out_error;
9201
9202         /* Read capabilities registers */
9203         err = ufshcd_hba_capabilities(hba);
9204         if (err)
9205                 goto out_disable;
9206
9207         /* Get UFS version supported by the controller */
9208         hba->ufs_version = ufshcd_get_ufs_version(hba);
9209
9210         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9211             (hba->ufs_version != UFSHCI_VERSION_11) &&
9212             (hba->ufs_version != UFSHCI_VERSION_20) &&
9213             (hba->ufs_version != UFSHCI_VERSION_21))
9214                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
9215                         hba->ufs_version);
9216
9217         /* Get Interrupt bit mask per version */
9218         hba->intr_mask = ufshcd_get_intr_mask(hba);
9219
9220         err = ufshcd_set_dma_mask(hba);
9221         if (err) {
9222                 dev_err(hba->dev, "set dma mask failed\n");
9223                 goto out_disable;
9224         }
9225
9226         /* Allocate memory for host memory space */
9227         err = ufshcd_memory_alloc(hba);
9228         if (err) {
9229                 dev_err(hba->dev, "Memory allocation failed\n");
9230                 goto out_disable;
9231         }
9232
9233         /* Configure LRB */
9234         ufshcd_host_memory_configure(hba);
9235
9236         host->can_queue = hba->nutrs;
9237         host->cmd_per_lun = hba->nutrs;
9238         host->max_id = UFSHCD_MAX_ID;
9239         host->max_lun = UFS_MAX_LUNS;
9240         host->max_channel = UFSHCD_MAX_CHANNEL;
9241         host->unique_id = host->host_no;
9242         host->max_cmd_len = UFS_CDB_SIZE;
9243
9244         hba->max_pwr_info.is_valid = false;
9245
9246         /* Initialize work queues */
9247         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9248                  hba->host->host_no);
9249         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9250         if (!hba->eh_wq) {
9251                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9252                                 __func__);
9253                 err = -ENOMEM;
9254                 goto out_disable;
9255         }
9256         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9257         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9258
9259         sema_init(&hba->eh_sem, 1);
9260
9261         /* Initialize UIC command mutex */
9262         mutex_init(&hba->uic_cmd_mutex);
9263
9264         /* Initialize mutex for device management commands */
9265         mutex_init(&hba->dev_cmd.lock);
9266
9267         init_rwsem(&hba->clk_scaling_lock);
9268
9269         ufshcd_init_clk_gating(hba);
9270
9271         ufshcd_init_clk_scaling(hba);
9272
9273         /*
9274          * In order to avoid any spurious interrupt immediately after
9275          * registering UFS controller interrupt handler, clear any pending UFS
9276          * interrupt status and disable all the UFS interrupts.
9277          */
9278         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9279                       REG_INTERRUPT_STATUS);
9280         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9281         /*
9282          * Make sure that UFS interrupts are disabled and any pending interrupt
9283          * status is cleared before registering UFS interrupt handler.
9284          */
9285         mb();
9286
9287         /* IRQ registration */
9288         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9289         if (err) {
9290                 dev_err(hba->dev, "request irq failed\n");
9291                 goto exit_gating;
9292         } else {
9293                 hba->is_irq_enabled = true;
9294         }
9295
9296         err = scsi_add_host(host, hba->dev);
9297         if (err) {
9298                 dev_err(hba->dev, "scsi_add_host failed\n");
9299                 goto exit_gating;
9300         }
9301
9302         hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9303         if (IS_ERR(hba->cmd_queue)) {
9304                 err = PTR_ERR(hba->cmd_queue);
9305                 goto out_remove_scsi_host;
9306         }
9307
9308         hba->tmf_tag_set = (struct blk_mq_tag_set) {
9309                 .nr_hw_queues   = 1,
9310                 .queue_depth    = hba->nutmrs,
9311                 .ops            = &ufshcd_tmf_ops,
9312                 .flags          = BLK_MQ_F_NO_SCHED,
9313         };
9314         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9315         if (err < 0)
9316                 goto free_cmd_queue;
9317         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9318         if (IS_ERR(hba->tmf_queue)) {
9319                 err = PTR_ERR(hba->tmf_queue);
9320                 goto free_tmf_tag_set;
9321         }
9322
9323         /* Reset the attached device */
9324         ufshcd_vops_device_reset(hba);
9325
9326         ufshcd_init_crypto(hba);
9327
9328         /* Host controller enable */
9329         err = ufshcd_hba_enable(hba);
9330         if (err) {
9331                 dev_err(hba->dev, "Host controller enable failed\n");
9332                 ufshcd_print_evt_hist(hba);
9333                 ufshcd_print_host_state(hba);
9334                 goto free_tmf_queue;
9335         }
9336
9337         /*
9338          * Set the default power management level for runtime and system PM.
9339          * Default power saving mode is to keep UFS link in Hibern8 state
9340          * and UFS device in sleep state.
9341          */
9342         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9343                                                 UFS_SLEEP_PWR_MODE,
9344                                                 UIC_LINK_HIBERN8_STATE);
9345         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9346                                                 UFS_SLEEP_PWR_MODE,
9347                                                 UIC_LINK_HIBERN8_STATE);
9348
9349         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9350                           ufshcd_rpm_dev_flush_recheck_work);
9351
9352         /* Set the default auto-hiberate idle timer value to 150 ms */
9353         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9354                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9355                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9356         }
9357
9358         /* Hold auto suspend until async scan completes */
9359         pm_runtime_get_sync(dev);
9360         atomic_set(&hba->scsi_block_reqs_cnt, 0);
9361         /*
9362          * We are assuming that device wasn't put in sleep/power-down
9363          * state exclusively during the boot stage before kernel.
9364          * This assumption helps avoid doing link startup twice during
9365          * ufshcd_probe_hba().
9366          */
9367         ufshcd_set_ufs_dev_active(hba);
9368
9369         async_schedule(ufshcd_async_scan, hba);
9370         ufs_sysfs_add_nodes(hba->dev);
9371
9372         return 0;
9373
9374 free_tmf_queue:
9375         blk_cleanup_queue(hba->tmf_queue);
9376 free_tmf_tag_set:
9377         blk_mq_free_tag_set(&hba->tmf_tag_set);
9378 free_cmd_queue:
9379         blk_cleanup_queue(hba->cmd_queue);
9380 out_remove_scsi_host:
9381         scsi_remove_host(hba->host);
9382 exit_gating:
9383         ufshcd_exit_clk_scaling(hba);
9384         ufshcd_exit_clk_gating(hba);
9385 out_disable:
9386         hba->is_irq_enabled = false;
9387         ufshcd_hba_exit(hba);
9388 out_error:
9389         return err;
9390 }
9391 EXPORT_SYMBOL_GPL(ufshcd_init);
9392
9393 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9394 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9395 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9396 MODULE_LICENSE("GPL");
9397 MODULE_VERSION(UFSHCD_DRIVER_VERSION);