Merge tag 'usb-5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <scsi/scsi_driver.h>
20 #include "ufshcd.h"
21 #include "ufs_quirks.h"
22 #include "unipro.h"
23 #include "ufs-sysfs.h"
24 #include "ufs-debugfs.h"
25 #include "ufs-fault-injection.h"
26 #include "ufs_bsg.h"
27 #include "ufshcd-crypto.h"
28 #include "ufshpb.h"
29 #include <asm/unaligned.h>
30
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/ufs.h>
33
34 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
35                                  UTP_TASK_REQ_COMPL |\
36                                  UFSHCD_ERROR_MASK)
37 /* UIC command timeout, unit: ms */
38 #define UIC_CMD_TIMEOUT 500
39
40 /* NOP OUT retries waiting for NOP IN response */
41 #define NOP_OUT_RETRIES    10
42 /* Timeout after 50 msecs if NOP OUT hangs without response */
43 #define NOP_OUT_TIMEOUT    50 /* msecs */
44
45 /* Query request retries */
46 #define QUERY_REQ_RETRIES 3
47 /* Query request timeout */
48 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
49
50 /* Task management command timeout */
51 #define TM_CMD_TIMEOUT  100 /* msecs */
52
53 /* maximum number of retries for a general UIC command  */
54 #define UFS_UIC_COMMAND_RETRIES 3
55
56 /* maximum number of link-startup retries */
57 #define DME_LINKSTARTUP_RETRIES 3
58
59 /* Maximum retries for Hibern8 enter */
60 #define UIC_HIBERN8_ENTER_RETRIES 3
61
62 /* maximum number of reset retries before giving up */
63 #define MAX_HOST_RESET_RETRIES 5
64
65 /* Maximum number of error handler retries before giving up */
66 #define MAX_ERR_HANDLER_RETRIES 5
67
68 /* Expose the flag value from utp_upiu_query.value */
69 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
70
71 /* Interrupt aggregation default timeout, unit: 40us */
72 #define INT_AGGR_DEF_TO 0x02
73
74 /* default delay of autosuspend: 2000 ms */
75 #define RPM_AUTOSUSPEND_DELAY_MS 2000
76
77 /* Default delay of RPM device flush delayed work */
78 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
79
80 /* Default value of wait time before gating device ref clock */
81 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
82
83 /* Polling time to wait for fDeviceInit */
84 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
85
86 #define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
87
88 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
89         ({                                                              \
90                 int _ret;                                               \
91                 if (_on)                                                \
92                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
93                 else                                                    \
94                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
95                 _ret;                                                   \
96         })
97
98 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
99         size_t __len = (len);                                            \
100         print_hex_dump(KERN_ERR, prefix_str,                             \
101                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
102                        16, 4, buf, __len, false);                        \
103 } while (0)
104
105 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
106                      const char *prefix)
107 {
108         u32 *regs;
109         size_t pos;
110
111         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
112                 return -EINVAL;
113
114         regs = kzalloc(len, GFP_ATOMIC);
115         if (!regs)
116                 return -ENOMEM;
117
118         for (pos = 0; pos < len; pos += 4)
119                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
120
121         ufshcd_hex_dump(prefix, regs, len);
122         kfree(regs);
123
124         return 0;
125 }
126 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
127
128 enum {
129         UFSHCD_MAX_CHANNEL      = 0,
130         UFSHCD_MAX_ID           = 1,
131         UFSHCD_CMD_PER_LUN      = 32,
132         UFSHCD_CAN_QUEUE        = 32,
133 };
134
135 static const char *const ufshcd_state_name[] = {
136         [UFSHCD_STATE_RESET]                    = "reset",
137         [UFSHCD_STATE_OPERATIONAL]              = "operational",
138         [UFSHCD_STATE_ERROR]                    = "error",
139         [UFSHCD_STATE_EH_SCHEDULED_FATAL]       = "eh_fatal",
140         [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL]   = "eh_non_fatal",
141 };
142
143 /* UFSHCD error handling flags */
144 enum {
145         UFSHCD_EH_IN_PROGRESS = (1 << 0),
146 };
147
148 /* UFSHCD UIC layer error flags */
149 enum {
150         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
151         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
152         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
153         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
154         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
155         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
156         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
157 };
158
159 #define ufshcd_set_eh_in_progress(h) \
160         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
161 #define ufshcd_eh_in_progress(h) \
162         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
163 #define ufshcd_clear_eh_in_progress(h) \
164         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
165
166 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
167         [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
168         [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
169         [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
170         [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
171         [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
172         [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
173         /*
174          * For DeepSleep, the link is first put in hibern8 and then off.
175          * Leaving the link in hibern8 is not supported.
176          */
177         [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
178 };
179
180 static inline enum ufs_dev_pwr_mode
181 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
182 {
183         return ufs_pm_lvl_states[lvl].dev_state;
184 }
185
186 static inline enum uic_link_state
187 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
188 {
189         return ufs_pm_lvl_states[lvl].link_state;
190 }
191
192 static inline enum ufs_pm_level
193 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
194                                         enum uic_link_state link_state)
195 {
196         enum ufs_pm_level lvl;
197
198         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
199                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
200                         (ufs_pm_lvl_states[lvl].link_state == link_state))
201                         return lvl;
202         }
203
204         /* if no match found, return the level 0 */
205         return UFS_PM_LVL_0;
206 }
207
208 static struct ufs_dev_fix ufs_fixups[] = {
209         /* UFS cards deviations table */
210         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
211                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
212                 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
213         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
214                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
215                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
216                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
217         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
218                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
219         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
220                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
221         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
222                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
223         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
224                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
225         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
226                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
227         END_FIX
228 };
229
230 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
231 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
232 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
233 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
234 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
235 static void ufshcd_hba_exit(struct ufs_hba *hba);
236 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
237 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
238 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
239 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
240 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
241 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
242 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
243 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
244 static irqreturn_t ufshcd_intr(int irq, void *__hba);
245 static int ufshcd_change_power_mode(struct ufs_hba *hba,
246                              struct ufs_pa_layer_attr *pwr_mode);
247 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
248 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
249 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
250                                          struct ufs_vreg *vreg);
251 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
252 static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
253 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
254 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
255 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
256
257 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
258 {
259         if (!hba->is_irq_enabled) {
260                 enable_irq(hba->irq);
261                 hba->is_irq_enabled = true;
262         }
263 }
264
265 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
266 {
267         if (hba->is_irq_enabled) {
268                 disable_irq(hba->irq);
269                 hba->is_irq_enabled = false;
270         }
271 }
272
273 static inline void ufshcd_wb_config(struct ufs_hba *hba)
274 {
275         if (!ufshcd_is_wb_allowed(hba))
276                 return;
277
278         ufshcd_wb_toggle(hba, true);
279
280         ufshcd_wb_toggle_flush_during_h8(hba, true);
281         if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
282                 ufshcd_wb_toggle_flush(hba, true);
283 }
284
285 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
286 {
287         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
288                 scsi_unblock_requests(hba->host);
289 }
290
291 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
292 {
293         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
294                 scsi_block_requests(hba->host);
295 }
296
297 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
298                                       enum ufs_trace_str_t str_t)
299 {
300         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
301         struct utp_upiu_header *header;
302
303         if (!trace_ufshcd_upiu_enabled())
304                 return;
305
306         if (str_t == UFS_CMD_SEND)
307                 header = &rq->header;
308         else
309                 header = &hba->lrb[tag].ucd_rsp_ptr->header;
310
311         trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
312                           UFS_TSF_CDB);
313 }
314
315 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
316                                         enum ufs_trace_str_t str_t,
317                                         struct utp_upiu_req *rq_rsp)
318 {
319         if (!trace_ufshcd_upiu_enabled())
320                 return;
321
322         trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
323                           &rq_rsp->qr, UFS_TSF_OSF);
324 }
325
326 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
327                                      enum ufs_trace_str_t str_t)
328 {
329         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
330
331         if (!trace_ufshcd_upiu_enabled())
332                 return;
333
334         if (str_t == UFS_TM_SEND)
335                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
336                                   &descp->upiu_req.req_header,
337                                   &descp->upiu_req.input_param1,
338                                   UFS_TSF_TM_INPUT);
339         else
340                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
341                                   &descp->upiu_rsp.rsp_header,
342                                   &descp->upiu_rsp.output_param1,
343                                   UFS_TSF_TM_OUTPUT);
344 }
345
346 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
347                                          struct uic_command *ucmd,
348                                          enum ufs_trace_str_t str_t)
349 {
350         u32 cmd;
351
352         if (!trace_ufshcd_uic_command_enabled())
353                 return;
354
355         if (str_t == UFS_CMD_SEND)
356                 cmd = ucmd->command;
357         else
358                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
359
360         trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
361                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
362                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
363                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
364 }
365
366 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
367                                      enum ufs_trace_str_t str_t)
368 {
369         u64 lba;
370         u8 opcode = 0, group_id = 0;
371         u32 intr, doorbell;
372         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
373         struct scsi_cmnd *cmd = lrbp->cmd;
374         struct request *rq = scsi_cmd_to_rq(cmd);
375         int transfer_len = -1;
376
377         if (!cmd)
378                 return;
379
380         /* trace UPIU also */
381         ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
382         if (!trace_ufshcd_command_enabled())
383                 return;
384
385         opcode = cmd->cmnd[0];
386         lba = scsi_get_lba(cmd);
387
388         if (opcode == READ_10 || opcode == WRITE_10) {
389                 /*
390                  * Currently we only fully trace read(10) and write(10) commands
391                  */
392                 transfer_len =
393                        be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
394                 if (opcode == WRITE_10)
395                         group_id = lrbp->cmd->cmnd[6];
396         } else if (opcode == UNMAP) {
397                 /*
398                  * The number of Bytes to be unmapped beginning with the lba.
399                  */
400                 transfer_len = blk_rq_bytes(rq);
401         }
402
403         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
404         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
405         trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
406                         doorbell, transfer_len, intr, lba, opcode, group_id);
407 }
408
409 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
410 {
411         struct ufs_clk_info *clki;
412         struct list_head *head = &hba->clk_list_head;
413
414         if (list_empty(head))
415                 return;
416
417         list_for_each_entry(clki, head, list) {
418                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
419                                 clki->max_freq)
420                         dev_err(hba->dev, "clk: %s, rate: %u\n",
421                                         clki->name, clki->curr_freq);
422         }
423 }
424
425 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
426                              char *err_name)
427 {
428         int i;
429         bool found = false;
430         struct ufs_event_hist *e;
431
432         if (id >= UFS_EVT_CNT)
433                 return;
434
435         e = &hba->ufs_stats.event[id];
436
437         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
438                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
439
440                 if (e->tstamp[p] == 0)
441                         continue;
442                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
443                         e->val[p], ktime_to_us(e->tstamp[p]));
444                 found = true;
445         }
446
447         if (!found)
448                 dev_err(hba->dev, "No record of %s\n", err_name);
449         else
450                 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
451 }
452
453 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
454 {
455         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
456
457         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
458         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
459         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
460         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
461         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
462         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
463                          "auto_hibern8_err");
464         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
465         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
466                          "link_startup_fail");
467         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
468         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
469                          "suspend_fail");
470         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
471         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
472         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
473
474         ufshcd_vops_dbg_register_dump(hba);
475 }
476
477 static
478 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
479 {
480         struct ufshcd_lrb *lrbp;
481         int prdt_length;
482         int tag;
483
484         for_each_set_bit(tag, &bitmap, hba->nutrs) {
485                 lrbp = &hba->lrb[tag];
486
487                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
488                                 tag, ktime_to_us(lrbp->issue_time_stamp));
489                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
490                                 tag, ktime_to_us(lrbp->compl_time_stamp));
491                 dev_err(hba->dev,
492                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
493                         tag, (u64)lrbp->utrd_dma_addr);
494
495                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
496                                 sizeof(struct utp_transfer_req_desc));
497                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
498                         (u64)lrbp->ucd_req_dma_addr);
499                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
500                                 sizeof(struct utp_upiu_req));
501                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
502                         (u64)lrbp->ucd_rsp_dma_addr);
503                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
504                                 sizeof(struct utp_upiu_rsp));
505
506                 prdt_length = le16_to_cpu(
507                         lrbp->utr_descriptor_ptr->prd_table_length);
508                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
509                         prdt_length /= sizeof(struct ufshcd_sg_entry);
510
511                 dev_err(hba->dev,
512                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
513                         tag, prdt_length,
514                         (u64)lrbp->ucd_prdt_dma_addr);
515
516                 if (pr_prdt)
517                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
518                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
519         }
520 }
521
522 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
523 {
524         int tag;
525
526         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
527                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
528
529                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
530                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
531         }
532 }
533
534 static void ufshcd_print_host_state(struct ufs_hba *hba)
535 {
536         struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
537
538         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
539         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
540                 hba->outstanding_reqs, hba->outstanding_tasks);
541         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
542                 hba->saved_err, hba->saved_uic_err);
543         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
544                 hba->curr_dev_pwr_mode, hba->uic_link_state);
545         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
546                 hba->pm_op_in_progress, hba->is_sys_suspended);
547         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
548                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
549         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
550         dev_err(hba->dev,
551                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
552                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
553                 hba->ufs_stats.hibern8_exit_cnt);
554         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
555                 ktime_to_us(hba->ufs_stats.last_intr_ts),
556                 hba->ufs_stats.last_intr_status);
557         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
558                 hba->eh_flags, hba->req_abort_count);
559         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
560                 hba->ufs_version, hba->capabilities, hba->caps);
561         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
562                 hba->dev_quirks);
563         if (sdev_ufs)
564                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
565                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
566
567         ufshcd_print_clk_freqs(hba);
568 }
569
570 /**
571  * ufshcd_print_pwr_info - print power params as saved in hba
572  * power info
573  * @hba: per-adapter instance
574  */
575 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
576 {
577         static const char * const names[] = {
578                 "INVALID MODE",
579                 "FAST MODE",
580                 "SLOW_MODE",
581                 "INVALID MODE",
582                 "FASTAUTO_MODE",
583                 "SLOWAUTO_MODE",
584                 "INVALID MODE",
585         };
586
587         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
588                  __func__,
589                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
590                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
591                  names[hba->pwr_info.pwr_rx],
592                  names[hba->pwr_info.pwr_tx],
593                  hba->pwr_info.hs_rate);
594 }
595
596 static void ufshcd_device_reset(struct ufs_hba *hba)
597 {
598         int err;
599
600         err = ufshcd_vops_device_reset(hba);
601
602         if (!err) {
603                 ufshcd_set_ufs_dev_active(hba);
604                 if (ufshcd_is_wb_allowed(hba)) {
605                         hba->dev_info.wb_enabled = false;
606                         hba->dev_info.wb_buf_flush_enabled = false;
607                 }
608         }
609         if (err != -EOPNOTSUPP)
610                 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
611 }
612
613 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
614 {
615         if (!us)
616                 return;
617
618         if (us < 10)
619                 udelay(us);
620         else
621                 usleep_range(us, us + tolerance);
622 }
623 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
624
625 /**
626  * ufshcd_wait_for_register - wait for register value to change
627  * @hba: per-adapter interface
628  * @reg: mmio register offset
629  * @mask: mask to apply to the read register value
630  * @val: value to wait for
631  * @interval_us: polling interval in microseconds
632  * @timeout_ms: timeout in milliseconds
633  *
634  * Return:
635  * -ETIMEDOUT on error, zero on success.
636  */
637 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
638                                 u32 val, unsigned long interval_us,
639                                 unsigned long timeout_ms)
640 {
641         int err = 0;
642         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
643
644         /* ignore bits that we don't intend to wait on */
645         val = val & mask;
646
647         while ((ufshcd_readl(hba, reg) & mask) != val) {
648                 usleep_range(interval_us, interval_us + 50);
649                 if (time_after(jiffies, timeout)) {
650                         if ((ufshcd_readl(hba, reg) & mask) != val)
651                                 err = -ETIMEDOUT;
652                         break;
653                 }
654         }
655
656         return err;
657 }
658
659 /**
660  * ufshcd_get_intr_mask - Get the interrupt bit mask
661  * @hba: Pointer to adapter instance
662  *
663  * Returns interrupt bit mask per version
664  */
665 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
666 {
667         if (hba->ufs_version == ufshci_version(1, 0))
668                 return INTERRUPT_MASK_ALL_VER_10;
669         if (hba->ufs_version <= ufshci_version(2, 0))
670                 return INTERRUPT_MASK_ALL_VER_11;
671
672         return INTERRUPT_MASK_ALL_VER_21;
673 }
674
675 /**
676  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
677  * @hba: Pointer to adapter instance
678  *
679  * Returns UFSHCI version supported by the controller
680  */
681 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
682 {
683         u32 ufshci_ver;
684
685         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
686                 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
687         else
688                 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
689
690         /*
691          * UFSHCI v1.x uses a different version scheme, in order
692          * to allow the use of comparisons with the ufshci_version
693          * function, we convert it to the same scheme as ufs 2.0+.
694          */
695         if (ufshci_ver & 0x00010000)
696                 return ufshci_version(1, ufshci_ver & 0x00000100);
697
698         return ufshci_ver;
699 }
700
701 /**
702  * ufshcd_is_device_present - Check if any device connected to
703  *                            the host controller
704  * @hba: pointer to adapter instance
705  *
706  * Returns true if device present, false if no device detected
707  */
708 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
709 {
710         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
711                                                 DEVICE_PRESENT) ? true : false;
712 }
713
714 /**
715  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
716  * @lrbp: pointer to local command reference block
717  *
718  * This function is used to get the OCS field from UTRD
719  * Returns the OCS field in the UTRD
720  */
721 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
722 {
723         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
724 }
725
726 /**
727  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
728  * @hba: per adapter instance
729  * @pos: position of the bit to be cleared
730  */
731 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
732 {
733         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
734                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
735         else
736                 ufshcd_writel(hba, ~(1 << pos),
737                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
738 }
739
740 /**
741  * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
742  * @hba: per adapter instance
743  * @pos: position of the bit to be cleared
744  */
745 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
746 {
747         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
748                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
749         else
750                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
751 }
752
753 /**
754  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
755  * @reg: Register value of host controller status
756  *
757  * Returns integer, 0 on Success and positive value if failed
758  */
759 static inline int ufshcd_get_lists_status(u32 reg)
760 {
761         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
762 }
763
764 /**
765  * ufshcd_get_uic_cmd_result - Get the UIC command result
766  * @hba: Pointer to adapter instance
767  *
768  * This function gets the result of UIC command completion
769  * Returns 0 on success, non zero value on error
770  */
771 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
772 {
773         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
774                MASK_UIC_COMMAND_RESULT;
775 }
776
777 /**
778  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
779  * @hba: Pointer to adapter instance
780  *
781  * This function gets UIC command argument3
782  * Returns 0 on success, non zero value on error
783  */
784 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
785 {
786         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
787 }
788
789 /**
790  * ufshcd_get_req_rsp - returns the TR response transaction type
791  * @ucd_rsp_ptr: pointer to response UPIU
792  */
793 static inline int
794 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
795 {
796         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
797 }
798
799 /**
800  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
801  * @ucd_rsp_ptr: pointer to response UPIU
802  *
803  * This function gets the response status and scsi_status from response UPIU
804  * Returns the response result code.
805  */
806 static inline int
807 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
808 {
809         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
810 }
811
812 /*
813  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
814  *                              from response UPIU
815  * @ucd_rsp_ptr: pointer to response UPIU
816  *
817  * Return the data segment length.
818  */
819 static inline unsigned int
820 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
821 {
822         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
823                 MASK_RSP_UPIU_DATA_SEG_LEN;
824 }
825
826 /**
827  * ufshcd_is_exception_event - Check if the device raised an exception event
828  * @ucd_rsp_ptr: pointer to response UPIU
829  *
830  * The function checks if the device raised an exception event indicated in
831  * the Device Information field of response UPIU.
832  *
833  * Returns true if exception is raised, false otherwise.
834  */
835 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
836 {
837         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
838                         MASK_RSP_EXCEPTION_EVENT ? true : false;
839 }
840
841 /**
842  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
843  * @hba: per adapter instance
844  */
845 static inline void
846 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
847 {
848         ufshcd_writel(hba, INT_AGGR_ENABLE |
849                       INT_AGGR_COUNTER_AND_TIMER_RESET,
850                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
851 }
852
853 /**
854  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
855  * @hba: per adapter instance
856  * @cnt: Interrupt aggregation counter threshold
857  * @tmout: Interrupt aggregation timeout value
858  */
859 static inline void
860 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
861 {
862         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
863                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
864                       INT_AGGR_TIMEOUT_VAL(tmout),
865                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
866 }
867
868 /**
869  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
870  * @hba: per adapter instance
871  */
872 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
873 {
874         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
875 }
876
877 /**
878  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
879  *                      When run-stop registers are set to 1, it indicates the
880  *                      host controller that it can process the requests
881  * @hba: per adapter instance
882  */
883 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
884 {
885         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
886                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
887         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
888                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
889 }
890
891 /**
892  * ufshcd_hba_start - Start controller initialization sequence
893  * @hba: per adapter instance
894  */
895 static inline void ufshcd_hba_start(struct ufs_hba *hba)
896 {
897         u32 val = CONTROLLER_ENABLE;
898
899         if (ufshcd_crypto_enable(hba))
900                 val |= CRYPTO_GENERAL_ENABLE;
901
902         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
903 }
904
905 /**
906  * ufshcd_is_hba_active - Get controller state
907  * @hba: per adapter instance
908  *
909  * Returns false if controller is active, true otherwise
910  */
911 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
912 {
913         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
914                 ? false : true;
915 }
916
917 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
918 {
919         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
920         if (hba->ufs_version <= ufshci_version(1, 1))
921                 return UFS_UNIPRO_VER_1_41;
922         else
923                 return UFS_UNIPRO_VER_1_6;
924 }
925 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
926
927 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
928 {
929         /*
930          * If both host and device support UniPro ver1.6 or later, PA layer
931          * parameters tuning happens during link startup itself.
932          *
933          * We can manually tune PA layer parameters if either host or device
934          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
935          * logic simple, we will only do manual tuning if local unipro version
936          * doesn't support ver1.6 or later.
937          */
938         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
939                 return true;
940         else
941                 return false;
942 }
943
944 /**
945  * ufshcd_set_clk_freq - set UFS controller clock frequencies
946  * @hba: per adapter instance
947  * @scale_up: If True, set max possible frequency othewise set low frequency
948  *
949  * Returns 0 if successful
950  * Returns < 0 for any other errors
951  */
952 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
953 {
954         int ret = 0;
955         struct ufs_clk_info *clki;
956         struct list_head *head = &hba->clk_list_head;
957
958         if (list_empty(head))
959                 goto out;
960
961         list_for_each_entry(clki, head, list) {
962                 if (!IS_ERR_OR_NULL(clki->clk)) {
963                         if (scale_up && clki->max_freq) {
964                                 if (clki->curr_freq == clki->max_freq)
965                                         continue;
966
967                                 ret = clk_set_rate(clki->clk, clki->max_freq);
968                                 if (ret) {
969                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
970                                                 __func__, clki->name,
971                                                 clki->max_freq, ret);
972                                         break;
973                                 }
974                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
975                                                 "scaled up", clki->name,
976                                                 clki->curr_freq,
977                                                 clki->max_freq);
978
979                                 clki->curr_freq = clki->max_freq;
980
981                         } else if (!scale_up && clki->min_freq) {
982                                 if (clki->curr_freq == clki->min_freq)
983                                         continue;
984
985                                 ret = clk_set_rate(clki->clk, clki->min_freq);
986                                 if (ret) {
987                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
988                                                 __func__, clki->name,
989                                                 clki->min_freq, ret);
990                                         break;
991                                 }
992                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
993                                                 "scaled down", clki->name,
994                                                 clki->curr_freq,
995                                                 clki->min_freq);
996                                 clki->curr_freq = clki->min_freq;
997                         }
998                 }
999                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1000                                 clki->name, clk_get_rate(clki->clk));
1001         }
1002
1003 out:
1004         return ret;
1005 }
1006
1007 /**
1008  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1009  * @hba: per adapter instance
1010  * @scale_up: True if scaling up and false if scaling down
1011  *
1012  * Returns 0 if successful
1013  * Returns < 0 for any other errors
1014  */
1015 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1016 {
1017         int ret = 0;
1018         ktime_t start = ktime_get();
1019
1020         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1021         if (ret)
1022                 goto out;
1023
1024         ret = ufshcd_set_clk_freq(hba, scale_up);
1025         if (ret)
1026                 goto out;
1027
1028         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1029         if (ret)
1030                 ufshcd_set_clk_freq(hba, !scale_up);
1031
1032 out:
1033         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1034                         (scale_up ? "up" : "down"),
1035                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1036         return ret;
1037 }
1038
1039 /**
1040  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1041  * @hba: per adapter instance
1042  * @scale_up: True if scaling up and false if scaling down
1043  *
1044  * Returns true if scaling is required, false otherwise.
1045  */
1046 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1047                                                bool scale_up)
1048 {
1049         struct ufs_clk_info *clki;
1050         struct list_head *head = &hba->clk_list_head;
1051
1052         if (list_empty(head))
1053                 return false;
1054
1055         list_for_each_entry(clki, head, list) {
1056                 if (!IS_ERR_OR_NULL(clki->clk)) {
1057                         if (scale_up && clki->max_freq) {
1058                                 if (clki->curr_freq == clki->max_freq)
1059                                         continue;
1060                                 return true;
1061                         } else if (!scale_up && clki->min_freq) {
1062                                 if (clki->curr_freq == clki->min_freq)
1063                                         continue;
1064                                 return true;
1065                         }
1066                 }
1067         }
1068
1069         return false;
1070 }
1071
1072 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1073                                         u64 wait_timeout_us)
1074 {
1075         unsigned long flags;
1076         int ret = 0;
1077         u32 tm_doorbell;
1078         u32 tr_doorbell;
1079         bool timeout = false, do_last_check = false;
1080         ktime_t start;
1081
1082         ufshcd_hold(hba, false);
1083         spin_lock_irqsave(hba->host->host_lock, flags);
1084         /*
1085          * Wait for all the outstanding tasks/transfer requests.
1086          * Verify by checking the doorbell registers are clear.
1087          */
1088         start = ktime_get();
1089         do {
1090                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1091                         ret = -EBUSY;
1092                         goto out;
1093                 }
1094
1095                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1096                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1097                 if (!tm_doorbell && !tr_doorbell) {
1098                         timeout = false;
1099                         break;
1100                 } else if (do_last_check) {
1101                         break;
1102                 }
1103
1104                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1105                 schedule();
1106                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1107                     wait_timeout_us) {
1108                         timeout = true;
1109                         /*
1110                          * We might have scheduled out for long time so make
1111                          * sure to check if doorbells are cleared by this time
1112                          * or not.
1113                          */
1114                         do_last_check = true;
1115                 }
1116                 spin_lock_irqsave(hba->host->host_lock, flags);
1117         } while (tm_doorbell || tr_doorbell);
1118
1119         if (timeout) {
1120                 dev_err(hba->dev,
1121                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1122                         __func__, tm_doorbell, tr_doorbell);
1123                 ret = -EBUSY;
1124         }
1125 out:
1126         spin_unlock_irqrestore(hba->host->host_lock, flags);
1127         ufshcd_release(hba);
1128         return ret;
1129 }
1130
1131 /**
1132  * ufshcd_scale_gear - scale up/down UFS gear
1133  * @hba: per adapter instance
1134  * @scale_up: True for scaling up gear and false for scaling down
1135  *
1136  * Returns 0 for success,
1137  * Returns -EBUSY if scaling can't happen at this time
1138  * Returns non-zero for any other errors
1139  */
1140 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1141 {
1142         int ret = 0;
1143         struct ufs_pa_layer_attr new_pwr_info;
1144
1145         if (scale_up) {
1146                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1147                        sizeof(struct ufs_pa_layer_attr));
1148         } else {
1149                 memcpy(&new_pwr_info, &hba->pwr_info,
1150                        sizeof(struct ufs_pa_layer_attr));
1151
1152                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1153                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1154                         /* save the current power mode */
1155                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1156                                 &hba->pwr_info,
1157                                 sizeof(struct ufs_pa_layer_attr));
1158
1159                         /* scale down gear */
1160                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1161                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1162                 }
1163         }
1164
1165         /* check if the power mode needs to be changed or not? */
1166         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1167         if (ret)
1168                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1169                         __func__, ret,
1170                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1171                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1172
1173         return ret;
1174 }
1175
1176 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1177 {
1178         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1179         int ret = 0;
1180         /*
1181          * make sure that there are no outstanding requests when
1182          * clock scaling is in progress
1183          */
1184         ufshcd_scsi_block_requests(hba);
1185         down_write(&hba->clk_scaling_lock);
1186
1187         if (!hba->clk_scaling.is_allowed ||
1188             ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1189                 ret = -EBUSY;
1190                 up_write(&hba->clk_scaling_lock);
1191                 ufshcd_scsi_unblock_requests(hba);
1192                 goto out;
1193         }
1194
1195         /* let's not get into low power until clock scaling is completed */
1196         ufshcd_hold(hba, false);
1197
1198 out:
1199         return ret;
1200 }
1201
1202 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
1203 {
1204         if (writelock)
1205                 up_write(&hba->clk_scaling_lock);
1206         else
1207                 up_read(&hba->clk_scaling_lock);
1208         ufshcd_scsi_unblock_requests(hba);
1209         ufshcd_release(hba);
1210 }
1211
1212 /**
1213  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1214  * @hba: per adapter instance
1215  * @scale_up: True for scaling up and false for scalin down
1216  *
1217  * Returns 0 for success,
1218  * Returns -EBUSY if scaling can't happen at this time
1219  * Returns non-zero for any other errors
1220  */
1221 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1222 {
1223         int ret = 0;
1224         bool is_writelock = true;
1225
1226         ret = ufshcd_clock_scaling_prepare(hba);
1227         if (ret)
1228                 return ret;
1229
1230         /* scale down the gear before scaling down clocks */
1231         if (!scale_up) {
1232                 ret = ufshcd_scale_gear(hba, false);
1233                 if (ret)
1234                         goto out_unprepare;
1235         }
1236
1237         ret = ufshcd_scale_clks(hba, scale_up);
1238         if (ret) {
1239                 if (!scale_up)
1240                         ufshcd_scale_gear(hba, true);
1241                 goto out_unprepare;
1242         }
1243
1244         /* scale up the gear after scaling up clocks */
1245         if (scale_up) {
1246                 ret = ufshcd_scale_gear(hba, true);
1247                 if (ret) {
1248                         ufshcd_scale_clks(hba, false);
1249                         goto out_unprepare;
1250                 }
1251         }
1252
1253         /* Enable Write Booster if we have scaled up else disable it */
1254         downgrade_write(&hba->clk_scaling_lock);
1255         is_writelock = false;
1256         ufshcd_wb_toggle(hba, scale_up);
1257
1258 out_unprepare:
1259         ufshcd_clock_scaling_unprepare(hba, is_writelock);
1260         return ret;
1261 }
1262
1263 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1264 {
1265         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1266                                            clk_scaling.suspend_work);
1267         unsigned long irq_flags;
1268
1269         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1270         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1271                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1272                 return;
1273         }
1274         hba->clk_scaling.is_suspended = true;
1275         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1276
1277         __ufshcd_suspend_clkscaling(hba);
1278 }
1279
1280 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1281 {
1282         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1283                                            clk_scaling.resume_work);
1284         unsigned long irq_flags;
1285
1286         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1287         if (!hba->clk_scaling.is_suspended) {
1288                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1289                 return;
1290         }
1291         hba->clk_scaling.is_suspended = false;
1292         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1293
1294         devfreq_resume_device(hba->devfreq);
1295 }
1296
1297 static int ufshcd_devfreq_target(struct device *dev,
1298                                 unsigned long *freq, u32 flags)
1299 {
1300         int ret = 0;
1301         struct ufs_hba *hba = dev_get_drvdata(dev);
1302         ktime_t start;
1303         bool scale_up, sched_clk_scaling_suspend_work = false;
1304         struct list_head *clk_list = &hba->clk_list_head;
1305         struct ufs_clk_info *clki;
1306         unsigned long irq_flags;
1307
1308         if (!ufshcd_is_clkscaling_supported(hba))
1309                 return -EINVAL;
1310
1311         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1312         /* Override with the closest supported frequency */
1313         *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1314         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1315         if (ufshcd_eh_in_progress(hba)) {
1316                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1317                 return 0;
1318         }
1319
1320         if (!hba->clk_scaling.active_reqs)
1321                 sched_clk_scaling_suspend_work = true;
1322
1323         if (list_empty(clk_list)) {
1324                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1325                 goto out;
1326         }
1327
1328         /* Decide based on the rounded-off frequency and update */
1329         scale_up = (*freq == clki->max_freq) ? true : false;
1330         if (!scale_up)
1331                 *freq = clki->min_freq;
1332         /* Update the frequency */
1333         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1334                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1335                 ret = 0;
1336                 goto out; /* no state change required */
1337         }
1338         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1339
1340         start = ktime_get();
1341         ret = ufshcd_devfreq_scale(hba, scale_up);
1342
1343         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1344                 (scale_up ? "up" : "down"),
1345                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1346
1347 out:
1348         if (sched_clk_scaling_suspend_work)
1349                 queue_work(hba->clk_scaling.workq,
1350                            &hba->clk_scaling.suspend_work);
1351
1352         return ret;
1353 }
1354
1355 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1356 {
1357         int *busy = priv;
1358
1359         WARN_ON_ONCE(reserved);
1360         (*busy)++;
1361         return false;
1362 }
1363
1364 /* Whether or not any tag is in use by a request that is in progress. */
1365 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1366 {
1367         struct request_queue *q = hba->cmd_queue;
1368         int busy = 0;
1369
1370         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1371         return busy;
1372 }
1373
1374 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1375                 struct devfreq_dev_status *stat)
1376 {
1377         struct ufs_hba *hba = dev_get_drvdata(dev);
1378         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1379         unsigned long flags;
1380         struct list_head *clk_list = &hba->clk_list_head;
1381         struct ufs_clk_info *clki;
1382         ktime_t curr_t;
1383
1384         if (!ufshcd_is_clkscaling_supported(hba))
1385                 return -EINVAL;
1386
1387         memset(stat, 0, sizeof(*stat));
1388
1389         spin_lock_irqsave(hba->host->host_lock, flags);
1390         curr_t = ktime_get();
1391         if (!scaling->window_start_t)
1392                 goto start_window;
1393
1394         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1395         /*
1396          * If current frequency is 0, then the ondemand governor considers
1397          * there's no initial frequency set. And it always requests to set
1398          * to max. frequency.
1399          */
1400         stat->current_frequency = clki->curr_freq;
1401         if (scaling->is_busy_started)
1402                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1403                                 scaling->busy_start_t);
1404
1405         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1406         stat->busy_time = scaling->tot_busy_t;
1407 start_window:
1408         scaling->window_start_t = curr_t;
1409         scaling->tot_busy_t = 0;
1410
1411         if (hba->outstanding_reqs) {
1412                 scaling->busy_start_t = curr_t;
1413                 scaling->is_busy_started = true;
1414         } else {
1415                 scaling->busy_start_t = 0;
1416                 scaling->is_busy_started = false;
1417         }
1418         spin_unlock_irqrestore(hba->host->host_lock, flags);
1419         return 0;
1420 }
1421
1422 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1423 {
1424         struct list_head *clk_list = &hba->clk_list_head;
1425         struct ufs_clk_info *clki;
1426         struct devfreq *devfreq;
1427         int ret;
1428
1429         /* Skip devfreq if we don't have any clocks in the list */
1430         if (list_empty(clk_list))
1431                 return 0;
1432
1433         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1434         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1435         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1436
1437         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1438                                          &hba->vps->ondemand_data);
1439         devfreq = devfreq_add_device(hba->dev,
1440                         &hba->vps->devfreq_profile,
1441                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1442                         &hba->vps->ondemand_data);
1443         if (IS_ERR(devfreq)) {
1444                 ret = PTR_ERR(devfreq);
1445                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1446
1447                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1448                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1449                 return ret;
1450         }
1451
1452         hba->devfreq = devfreq;
1453
1454         return 0;
1455 }
1456
1457 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1458 {
1459         struct list_head *clk_list = &hba->clk_list_head;
1460         struct ufs_clk_info *clki;
1461
1462         if (!hba->devfreq)
1463                 return;
1464
1465         devfreq_remove_device(hba->devfreq);
1466         hba->devfreq = NULL;
1467
1468         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1469         dev_pm_opp_remove(hba->dev, clki->min_freq);
1470         dev_pm_opp_remove(hba->dev, clki->max_freq);
1471 }
1472
1473 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1474 {
1475         unsigned long flags;
1476
1477         devfreq_suspend_device(hba->devfreq);
1478         spin_lock_irqsave(hba->host->host_lock, flags);
1479         hba->clk_scaling.window_start_t = 0;
1480         spin_unlock_irqrestore(hba->host->host_lock, flags);
1481 }
1482
1483 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1484 {
1485         unsigned long flags;
1486         bool suspend = false;
1487
1488         cancel_work_sync(&hba->clk_scaling.suspend_work);
1489         cancel_work_sync(&hba->clk_scaling.resume_work);
1490
1491         spin_lock_irqsave(hba->host->host_lock, flags);
1492         if (!hba->clk_scaling.is_suspended) {
1493                 suspend = true;
1494                 hba->clk_scaling.is_suspended = true;
1495         }
1496         spin_unlock_irqrestore(hba->host->host_lock, flags);
1497
1498         if (suspend)
1499                 __ufshcd_suspend_clkscaling(hba);
1500 }
1501
1502 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1503 {
1504         unsigned long flags;
1505         bool resume = false;
1506
1507         spin_lock_irqsave(hba->host->host_lock, flags);
1508         if (hba->clk_scaling.is_suspended) {
1509                 resume = true;
1510                 hba->clk_scaling.is_suspended = false;
1511         }
1512         spin_unlock_irqrestore(hba->host->host_lock, flags);
1513
1514         if (resume)
1515                 devfreq_resume_device(hba->devfreq);
1516 }
1517
1518 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1519                 struct device_attribute *attr, char *buf)
1520 {
1521         struct ufs_hba *hba = dev_get_drvdata(dev);
1522
1523         return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1524 }
1525
1526 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1527                 struct device_attribute *attr, const char *buf, size_t count)
1528 {
1529         struct ufs_hba *hba = dev_get_drvdata(dev);
1530         u32 value;
1531         int err = 0;
1532
1533         if (kstrtou32(buf, 0, &value))
1534                 return -EINVAL;
1535
1536         down(&hba->host_sem);
1537         if (!ufshcd_is_user_access_allowed(hba)) {
1538                 err = -EBUSY;
1539                 goto out;
1540         }
1541
1542         value = !!value;
1543         if (value == hba->clk_scaling.is_enabled)
1544                 goto out;
1545
1546         ufshcd_rpm_get_sync(hba);
1547         ufshcd_hold(hba, false);
1548
1549         hba->clk_scaling.is_enabled = value;
1550
1551         if (value) {
1552                 ufshcd_resume_clkscaling(hba);
1553         } else {
1554                 ufshcd_suspend_clkscaling(hba);
1555                 err = ufshcd_devfreq_scale(hba, true);
1556                 if (err)
1557                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1558                                         __func__, err);
1559         }
1560
1561         ufshcd_release(hba);
1562         ufshcd_rpm_put_sync(hba);
1563 out:
1564         up(&hba->host_sem);
1565         return err ? err : count;
1566 }
1567
1568 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1569 {
1570         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1571         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1572         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1573         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1574         hba->clk_scaling.enable_attr.attr.mode = 0644;
1575         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1576                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1577 }
1578
1579 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1580 {
1581         if (hba->clk_scaling.enable_attr.attr.name)
1582                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1583 }
1584
1585 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1586 {
1587         char wq_name[sizeof("ufs_clkscaling_00")];
1588
1589         if (!ufshcd_is_clkscaling_supported(hba))
1590                 return;
1591
1592         if (!hba->clk_scaling.min_gear)
1593                 hba->clk_scaling.min_gear = UFS_HS_G1;
1594
1595         INIT_WORK(&hba->clk_scaling.suspend_work,
1596                   ufshcd_clk_scaling_suspend_work);
1597         INIT_WORK(&hba->clk_scaling.resume_work,
1598                   ufshcd_clk_scaling_resume_work);
1599
1600         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1601                  hba->host->host_no);
1602         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1603
1604         hba->clk_scaling.is_initialized = true;
1605 }
1606
1607 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1608 {
1609         if (!hba->clk_scaling.is_initialized)
1610                 return;
1611
1612         ufshcd_remove_clk_scaling_sysfs(hba);
1613         destroy_workqueue(hba->clk_scaling.workq);
1614         ufshcd_devfreq_remove(hba);
1615         hba->clk_scaling.is_initialized = false;
1616 }
1617
1618 static void ufshcd_ungate_work(struct work_struct *work)
1619 {
1620         int ret;
1621         unsigned long flags;
1622         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1623                         clk_gating.ungate_work);
1624
1625         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1626
1627         spin_lock_irqsave(hba->host->host_lock, flags);
1628         if (hba->clk_gating.state == CLKS_ON) {
1629                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1630                 goto unblock_reqs;
1631         }
1632
1633         spin_unlock_irqrestore(hba->host->host_lock, flags);
1634         ufshcd_hba_vreg_set_hpm(hba);
1635         ufshcd_setup_clocks(hba, true);
1636
1637         ufshcd_enable_irq(hba);
1638
1639         /* Exit from hibern8 */
1640         if (ufshcd_can_hibern8_during_gating(hba)) {
1641                 /* Prevent gating in this path */
1642                 hba->clk_gating.is_suspended = true;
1643                 if (ufshcd_is_link_hibern8(hba)) {
1644                         ret = ufshcd_uic_hibern8_exit(hba);
1645                         if (ret)
1646                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1647                                         __func__, ret);
1648                         else
1649                                 ufshcd_set_link_active(hba);
1650                 }
1651                 hba->clk_gating.is_suspended = false;
1652         }
1653 unblock_reqs:
1654         ufshcd_scsi_unblock_requests(hba);
1655 }
1656
1657 /**
1658  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1659  * Also, exit from hibern8 mode and set the link as active.
1660  * @hba: per adapter instance
1661  * @async: This indicates whether caller should ungate clocks asynchronously.
1662  */
1663 int ufshcd_hold(struct ufs_hba *hba, bool async)
1664 {
1665         int rc = 0;
1666         bool flush_result;
1667         unsigned long flags;
1668
1669         if (!ufshcd_is_clkgating_allowed(hba))
1670                 goto out;
1671         spin_lock_irqsave(hba->host->host_lock, flags);
1672         hba->clk_gating.active_reqs++;
1673
1674 start:
1675         switch (hba->clk_gating.state) {
1676         case CLKS_ON:
1677                 /*
1678                  * Wait for the ungate work to complete if in progress.
1679                  * Though the clocks may be in ON state, the link could
1680                  * still be in hibner8 state if hibern8 is allowed
1681                  * during clock gating.
1682                  * Make sure we exit hibern8 state also in addition to
1683                  * clocks being ON.
1684                  */
1685                 if (ufshcd_can_hibern8_during_gating(hba) &&
1686                     ufshcd_is_link_hibern8(hba)) {
1687                         if (async) {
1688                                 rc = -EAGAIN;
1689                                 hba->clk_gating.active_reqs--;
1690                                 break;
1691                         }
1692                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1693                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1694                         if (hba->clk_gating.is_suspended && !flush_result)
1695                                 goto out;
1696                         spin_lock_irqsave(hba->host->host_lock, flags);
1697                         goto start;
1698                 }
1699                 break;
1700         case REQ_CLKS_OFF:
1701                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1702                         hba->clk_gating.state = CLKS_ON;
1703                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1704                                                 hba->clk_gating.state);
1705                         break;
1706                 }
1707                 /*
1708                  * If we are here, it means gating work is either done or
1709                  * currently running. Hence, fall through to cancel gating
1710                  * work and to enable clocks.
1711                  */
1712                 fallthrough;
1713         case CLKS_OFF:
1714                 hba->clk_gating.state = REQ_CLKS_ON;
1715                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1716                                         hba->clk_gating.state);
1717                 if (queue_work(hba->clk_gating.clk_gating_workq,
1718                                &hba->clk_gating.ungate_work))
1719                         ufshcd_scsi_block_requests(hba);
1720                 /*
1721                  * fall through to check if we should wait for this
1722                  * work to be done or not.
1723                  */
1724                 fallthrough;
1725         case REQ_CLKS_ON:
1726                 if (async) {
1727                         rc = -EAGAIN;
1728                         hba->clk_gating.active_reqs--;
1729                         break;
1730                 }
1731
1732                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1733                 flush_work(&hba->clk_gating.ungate_work);
1734                 /* Make sure state is CLKS_ON before returning */
1735                 spin_lock_irqsave(hba->host->host_lock, flags);
1736                 goto start;
1737         default:
1738                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1739                                 __func__, hba->clk_gating.state);
1740                 break;
1741         }
1742         spin_unlock_irqrestore(hba->host->host_lock, flags);
1743 out:
1744         return rc;
1745 }
1746 EXPORT_SYMBOL_GPL(ufshcd_hold);
1747
1748 static void ufshcd_gate_work(struct work_struct *work)
1749 {
1750         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1751                         clk_gating.gate_work.work);
1752         unsigned long flags;
1753         int ret;
1754
1755         spin_lock_irqsave(hba->host->host_lock, flags);
1756         /*
1757          * In case you are here to cancel this work the gating state
1758          * would be marked as REQ_CLKS_ON. In this case save time by
1759          * skipping the gating work and exit after changing the clock
1760          * state to CLKS_ON.
1761          */
1762         if (hba->clk_gating.is_suspended ||
1763                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1764                 hba->clk_gating.state = CLKS_ON;
1765                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1766                                         hba->clk_gating.state);
1767                 goto rel_lock;
1768         }
1769
1770         if (hba->clk_gating.active_reqs
1771                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1772                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1773                 || hba->active_uic_cmd || hba->uic_async_done)
1774                 goto rel_lock;
1775
1776         spin_unlock_irqrestore(hba->host->host_lock, flags);
1777
1778         /* put the link into hibern8 mode before turning off clocks */
1779         if (ufshcd_can_hibern8_during_gating(hba)) {
1780                 ret = ufshcd_uic_hibern8_enter(hba);
1781                 if (ret) {
1782                         hba->clk_gating.state = CLKS_ON;
1783                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1784                                         __func__, ret);
1785                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1786                                                 hba->clk_gating.state);
1787                         goto out;
1788                 }
1789                 ufshcd_set_link_hibern8(hba);
1790         }
1791
1792         ufshcd_disable_irq(hba);
1793
1794         ufshcd_setup_clocks(hba, false);
1795
1796         /* Put the host controller in low power mode if possible */
1797         ufshcd_hba_vreg_set_lpm(hba);
1798         /*
1799          * In case you are here to cancel this work the gating state
1800          * would be marked as REQ_CLKS_ON. In this case keep the state
1801          * as REQ_CLKS_ON which would anyway imply that clocks are off
1802          * and a request to turn them on is pending. By doing this way,
1803          * we keep the state machine in tact and this would ultimately
1804          * prevent from doing cancel work multiple times when there are
1805          * new requests arriving before the current cancel work is done.
1806          */
1807         spin_lock_irqsave(hba->host->host_lock, flags);
1808         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1809                 hba->clk_gating.state = CLKS_OFF;
1810                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1811                                         hba->clk_gating.state);
1812         }
1813 rel_lock:
1814         spin_unlock_irqrestore(hba->host->host_lock, flags);
1815 out:
1816         return;
1817 }
1818
1819 /* host lock must be held before calling this variant */
1820 static void __ufshcd_release(struct ufs_hba *hba)
1821 {
1822         if (!ufshcd_is_clkgating_allowed(hba))
1823                 return;
1824
1825         hba->clk_gating.active_reqs--;
1826
1827         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1828             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1829             hba->outstanding_tasks ||
1830             hba->active_uic_cmd || hba->uic_async_done ||
1831             hba->clk_gating.state == CLKS_OFF)
1832                 return;
1833
1834         hba->clk_gating.state = REQ_CLKS_OFF;
1835         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1836         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1837                            &hba->clk_gating.gate_work,
1838                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1839 }
1840
1841 void ufshcd_release(struct ufs_hba *hba)
1842 {
1843         unsigned long flags;
1844
1845         spin_lock_irqsave(hba->host->host_lock, flags);
1846         __ufshcd_release(hba);
1847         spin_unlock_irqrestore(hba->host->host_lock, flags);
1848 }
1849 EXPORT_SYMBOL_GPL(ufshcd_release);
1850
1851 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1852                 struct device_attribute *attr, char *buf)
1853 {
1854         struct ufs_hba *hba = dev_get_drvdata(dev);
1855
1856         return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1857 }
1858
1859 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1860                 struct device_attribute *attr, const char *buf, size_t count)
1861 {
1862         struct ufs_hba *hba = dev_get_drvdata(dev);
1863         unsigned long flags, value;
1864
1865         if (kstrtoul(buf, 0, &value))
1866                 return -EINVAL;
1867
1868         spin_lock_irqsave(hba->host->host_lock, flags);
1869         hba->clk_gating.delay_ms = value;
1870         spin_unlock_irqrestore(hba->host->host_lock, flags);
1871         return count;
1872 }
1873
1874 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1875                 struct device_attribute *attr, char *buf)
1876 {
1877         struct ufs_hba *hba = dev_get_drvdata(dev);
1878
1879         return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1880 }
1881
1882 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1883                 struct device_attribute *attr, const char *buf, size_t count)
1884 {
1885         struct ufs_hba *hba = dev_get_drvdata(dev);
1886         unsigned long flags;
1887         u32 value;
1888
1889         if (kstrtou32(buf, 0, &value))
1890                 return -EINVAL;
1891
1892         value = !!value;
1893
1894         spin_lock_irqsave(hba->host->host_lock, flags);
1895         if (value == hba->clk_gating.is_enabled)
1896                 goto out;
1897
1898         if (value)
1899                 __ufshcd_release(hba);
1900         else
1901                 hba->clk_gating.active_reqs++;
1902
1903         hba->clk_gating.is_enabled = value;
1904 out:
1905         spin_unlock_irqrestore(hba->host->host_lock, flags);
1906         return count;
1907 }
1908
1909 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1910 {
1911         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1912         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1913         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1914         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1915         hba->clk_gating.delay_attr.attr.mode = 0644;
1916         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1917                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1918
1919         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1920         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1921         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1922         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1923         hba->clk_gating.enable_attr.attr.mode = 0644;
1924         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1925                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1926 }
1927
1928 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
1929 {
1930         if (hba->clk_gating.delay_attr.attr.name)
1931                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1932         if (hba->clk_gating.enable_attr.attr.name)
1933                 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1934 }
1935
1936 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1937 {
1938         char wq_name[sizeof("ufs_clk_gating_00")];
1939
1940         if (!ufshcd_is_clkgating_allowed(hba))
1941                 return;
1942
1943         hba->clk_gating.state = CLKS_ON;
1944
1945         hba->clk_gating.delay_ms = 150;
1946         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1947         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1948
1949         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1950                  hba->host->host_no);
1951         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1952                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
1953
1954         ufshcd_init_clk_gating_sysfs(hba);
1955
1956         hba->clk_gating.is_enabled = true;
1957         hba->clk_gating.is_initialized = true;
1958 }
1959
1960 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1961 {
1962         if (!hba->clk_gating.is_initialized)
1963                 return;
1964         ufshcd_remove_clk_gating_sysfs(hba);
1965         cancel_work_sync(&hba->clk_gating.ungate_work);
1966         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1967         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1968         hba->clk_gating.is_initialized = false;
1969 }
1970
1971 /* Must be called with host lock acquired */
1972 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1973 {
1974         bool queue_resume_work = false;
1975         ktime_t curr_t = ktime_get();
1976         unsigned long flags;
1977
1978         if (!ufshcd_is_clkscaling_supported(hba))
1979                 return;
1980
1981         spin_lock_irqsave(hba->host->host_lock, flags);
1982         if (!hba->clk_scaling.active_reqs++)
1983                 queue_resume_work = true;
1984
1985         if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
1986                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1987                 return;
1988         }
1989
1990         if (queue_resume_work)
1991                 queue_work(hba->clk_scaling.workq,
1992                            &hba->clk_scaling.resume_work);
1993
1994         if (!hba->clk_scaling.window_start_t) {
1995                 hba->clk_scaling.window_start_t = curr_t;
1996                 hba->clk_scaling.tot_busy_t = 0;
1997                 hba->clk_scaling.is_busy_started = false;
1998         }
1999
2000         if (!hba->clk_scaling.is_busy_started) {
2001                 hba->clk_scaling.busy_start_t = curr_t;
2002                 hba->clk_scaling.is_busy_started = true;
2003         }
2004         spin_unlock_irqrestore(hba->host->host_lock, flags);
2005 }
2006
2007 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2008 {
2009         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2010         unsigned long flags;
2011
2012         if (!ufshcd_is_clkscaling_supported(hba))
2013                 return;
2014
2015         spin_lock_irqsave(hba->host->host_lock, flags);
2016         hba->clk_scaling.active_reqs--;
2017         if (!hba->outstanding_reqs && scaling->is_busy_started) {
2018                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2019                                         scaling->busy_start_t));
2020                 scaling->busy_start_t = 0;
2021                 scaling->is_busy_started = false;
2022         }
2023         spin_unlock_irqrestore(hba->host->host_lock, flags);
2024 }
2025
2026 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2027 {
2028         if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2029                 return READ;
2030         else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2031                 return WRITE;
2032         else
2033                 return -EINVAL;
2034 }
2035
2036 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2037                                                 struct ufshcd_lrb *lrbp)
2038 {
2039         struct ufs_hba_monitor *m = &hba->monitor;
2040
2041         return (m->enabled && lrbp && lrbp->cmd &&
2042                 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2043                 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2044 }
2045
2046 static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2047 {
2048         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2049         unsigned long flags;
2050
2051         spin_lock_irqsave(hba->host->host_lock, flags);
2052         if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2053                 hba->monitor.busy_start_ts[dir] = ktime_get();
2054         spin_unlock_irqrestore(hba->host->host_lock, flags);
2055 }
2056
2057 static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2058 {
2059         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2060         unsigned long flags;
2061
2062         spin_lock_irqsave(hba->host->host_lock, flags);
2063         if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2064                 struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2065                 struct ufs_hba_monitor *m = &hba->monitor;
2066                 ktime_t now, inc, lat;
2067
2068                 now = lrbp->compl_time_stamp;
2069                 inc = ktime_sub(now, m->busy_start_ts[dir]);
2070                 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2071                 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2072
2073                 /* Update latencies */
2074                 m->nr_req[dir]++;
2075                 lat = ktime_sub(now, lrbp->issue_time_stamp);
2076                 m->lat_sum[dir] += lat;
2077                 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2078                         m->lat_max[dir] = lat;
2079                 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2080                         m->lat_min[dir] = lat;
2081
2082                 m->nr_queued[dir]--;
2083                 /* Push forward the busy start of monitor */
2084                 m->busy_start_ts[dir] = now;
2085         }
2086         spin_unlock_irqrestore(hba->host->host_lock, flags);
2087 }
2088
2089 /**
2090  * ufshcd_send_command - Send SCSI or device management commands
2091  * @hba: per adapter instance
2092  * @task_tag: Task tag of the command
2093  */
2094 static inline
2095 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2096 {
2097         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2098         unsigned long flags;
2099
2100         lrbp->issue_time_stamp = ktime_get();
2101         lrbp->compl_time_stamp = ktime_set(0, 0);
2102         ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2103         ufshcd_clk_scaling_start_busy(hba);
2104         if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2105                 ufshcd_start_monitor(hba, lrbp);
2106
2107         spin_lock_irqsave(&hba->outstanding_lock, flags);
2108         if (hba->vops && hba->vops->setup_xfer_req)
2109                 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
2110         __set_bit(task_tag, &hba->outstanding_reqs);
2111         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2112         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2113
2114         /* Make sure that doorbell is committed immediately */
2115         wmb();
2116 }
2117
2118 /**
2119  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2120  * @lrbp: pointer to local reference block
2121  */
2122 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2123 {
2124         int len;
2125         if (lrbp->sense_buffer &&
2126             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2127                 int len_to_copy;
2128
2129                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2130                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2131
2132                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2133                        len_to_copy);
2134         }
2135 }
2136
2137 /**
2138  * ufshcd_copy_query_response() - Copy the Query Response and the data
2139  * descriptor
2140  * @hba: per adapter instance
2141  * @lrbp: pointer to local reference block
2142  */
2143 static
2144 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2145 {
2146         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2147
2148         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2149
2150         /* Get the descriptor */
2151         if (hba->dev_cmd.query.descriptor &&
2152             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2153                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2154                                 GENERAL_UPIU_REQUEST_SIZE;
2155                 u16 resp_len;
2156                 u16 buf_len;
2157
2158                 /* data segment length */
2159                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2160                                                 MASK_QUERY_DATA_SEG_LEN;
2161                 buf_len = be16_to_cpu(
2162                                 hba->dev_cmd.query.request.upiu_req.length);
2163                 if (likely(buf_len >= resp_len)) {
2164                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2165                 } else {
2166                         dev_warn(hba->dev,
2167                                  "%s: rsp size %d is bigger than buffer size %d",
2168                                  __func__, resp_len, buf_len);
2169                         return -EINVAL;
2170                 }
2171         }
2172
2173         return 0;
2174 }
2175
2176 /**
2177  * ufshcd_hba_capabilities - Read controller capabilities
2178  * @hba: per adapter instance
2179  *
2180  * Return: 0 on success, negative on error.
2181  */
2182 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2183 {
2184         int err;
2185
2186         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2187
2188         /* nutrs and nutmrs are 0 based values */
2189         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2190         hba->nutmrs =
2191         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2192
2193         /* Read crypto capabilities */
2194         err = ufshcd_hba_init_crypto_capabilities(hba);
2195         if (err)
2196                 dev_err(hba->dev, "crypto setup failed\n");
2197
2198         return err;
2199 }
2200
2201 /**
2202  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2203  *                            to accept UIC commands
2204  * @hba: per adapter instance
2205  * Return true on success, else false
2206  */
2207 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2208 {
2209         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2210                 return true;
2211         else
2212                 return false;
2213 }
2214
2215 /**
2216  * ufshcd_get_upmcrs - Get the power mode change request status
2217  * @hba: Pointer to adapter instance
2218  *
2219  * This function gets the UPMCRS field of HCS register
2220  * Returns value of UPMCRS field
2221  */
2222 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2223 {
2224         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2225 }
2226
2227 /**
2228  * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2229  * @hba: per adapter instance
2230  * @uic_cmd: UIC command
2231  */
2232 static inline void
2233 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2234 {
2235         lockdep_assert_held(&hba->uic_cmd_mutex);
2236
2237         WARN_ON(hba->active_uic_cmd);
2238
2239         hba->active_uic_cmd = uic_cmd;
2240
2241         /* Write Args */
2242         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2243         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2244         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2245
2246         ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2247
2248         /* Write UIC Cmd */
2249         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2250                       REG_UIC_COMMAND);
2251 }
2252
2253 /**
2254  * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2255  * @hba: per adapter instance
2256  * @uic_cmd: UIC command
2257  *
2258  * Returns 0 only if success.
2259  */
2260 static int
2261 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2262 {
2263         int ret;
2264         unsigned long flags;
2265
2266         lockdep_assert_held(&hba->uic_cmd_mutex);
2267
2268         if (wait_for_completion_timeout(&uic_cmd->done,
2269                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2270                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2271         } else {
2272                 ret = -ETIMEDOUT;
2273                 dev_err(hba->dev,
2274                         "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2275                         uic_cmd->command, uic_cmd->argument3);
2276
2277                 if (!uic_cmd->cmd_active) {
2278                         dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2279                                 __func__);
2280                         ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2281                 }
2282         }
2283
2284         spin_lock_irqsave(hba->host->host_lock, flags);
2285         hba->active_uic_cmd = NULL;
2286         spin_unlock_irqrestore(hba->host->host_lock, flags);
2287
2288         return ret;
2289 }
2290
2291 /**
2292  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2293  * @hba: per adapter instance
2294  * @uic_cmd: UIC command
2295  * @completion: initialize the completion only if this is set to true
2296  *
2297  * Returns 0 only if success.
2298  */
2299 static int
2300 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2301                       bool completion)
2302 {
2303         lockdep_assert_held(&hba->uic_cmd_mutex);
2304         lockdep_assert_held(hba->host->host_lock);
2305
2306         if (!ufshcd_ready_for_uic_cmd(hba)) {
2307                 dev_err(hba->dev,
2308                         "Controller not ready to accept UIC commands\n");
2309                 return -EIO;
2310         }
2311
2312         if (completion)
2313                 init_completion(&uic_cmd->done);
2314
2315         uic_cmd->cmd_active = 1;
2316         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2317
2318         return 0;
2319 }
2320
2321 /**
2322  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2323  * @hba: per adapter instance
2324  * @uic_cmd: UIC command
2325  *
2326  * Returns 0 only if success.
2327  */
2328 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2329 {
2330         int ret;
2331         unsigned long flags;
2332
2333         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2334                 return 0;
2335
2336         ufshcd_hold(hba, false);
2337         mutex_lock(&hba->uic_cmd_mutex);
2338         ufshcd_add_delay_before_dme_cmd(hba);
2339
2340         spin_lock_irqsave(hba->host->host_lock, flags);
2341         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2342         spin_unlock_irqrestore(hba->host->host_lock, flags);
2343         if (!ret)
2344                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2345
2346         mutex_unlock(&hba->uic_cmd_mutex);
2347
2348         ufshcd_release(hba);
2349         return ret;
2350 }
2351
2352 /**
2353  * ufshcd_map_sg - Map scatter-gather list to prdt
2354  * @hba: per adapter instance
2355  * @lrbp: pointer to local reference block
2356  *
2357  * Returns 0 in case of success, non-zero value in case of failure
2358  */
2359 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2360 {
2361         struct ufshcd_sg_entry *prd_table;
2362         struct scatterlist *sg;
2363         struct scsi_cmnd *cmd;
2364         int sg_segments;
2365         int i;
2366
2367         cmd = lrbp->cmd;
2368         sg_segments = scsi_dma_map(cmd);
2369         if (sg_segments < 0)
2370                 return sg_segments;
2371
2372         if (sg_segments) {
2373
2374                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2375                         lrbp->utr_descriptor_ptr->prd_table_length =
2376                                 cpu_to_le16((sg_segments *
2377                                         sizeof(struct ufshcd_sg_entry)));
2378                 else
2379                         lrbp->utr_descriptor_ptr->prd_table_length =
2380                                 cpu_to_le16(sg_segments);
2381
2382                 prd_table = lrbp->ucd_prdt_ptr;
2383
2384                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2385                         const unsigned int len = sg_dma_len(sg);
2386
2387                         /*
2388                          * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2389                          * based value that indicates the length, in bytes, of
2390                          * the data block. A maximum of length of 256KB may
2391                          * exist for any entry. Bits 1:0 of this field shall be
2392                          * 11b to indicate Dword granularity. A value of '3'
2393                          * indicates 4 bytes, '7' indicates 8 bytes, etc."
2394                          */
2395                         WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
2396                         prd_table[i].size = cpu_to_le32(len - 1);
2397                         prd_table[i].addr = cpu_to_le64(sg->dma_address);
2398                         prd_table[i].reserved = 0;
2399                 }
2400         } else {
2401                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2402         }
2403
2404         return 0;
2405 }
2406
2407 /**
2408  * ufshcd_enable_intr - enable interrupts
2409  * @hba: per adapter instance
2410  * @intrs: interrupt bits
2411  */
2412 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2413 {
2414         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2415
2416         if (hba->ufs_version == ufshci_version(1, 0)) {
2417                 u32 rw;
2418                 rw = set & INTERRUPT_MASK_RW_VER_10;
2419                 set = rw | ((set ^ intrs) & intrs);
2420         } else {
2421                 set |= intrs;
2422         }
2423
2424         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2425 }
2426
2427 /**
2428  * ufshcd_disable_intr - disable interrupts
2429  * @hba: per adapter instance
2430  * @intrs: interrupt bits
2431  */
2432 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2433 {
2434         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2435
2436         if (hba->ufs_version == ufshci_version(1, 0)) {
2437                 u32 rw;
2438                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2439                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2440                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2441
2442         } else {
2443                 set &= ~intrs;
2444         }
2445
2446         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2447 }
2448
2449 /**
2450  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2451  * descriptor according to request
2452  * @lrbp: pointer to local reference block
2453  * @upiu_flags: flags required in the header
2454  * @cmd_dir: requests data direction
2455  */
2456 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2457                         u8 *upiu_flags, enum dma_data_direction cmd_dir)
2458 {
2459         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2460         u32 data_direction;
2461         u32 dword_0;
2462         u32 dword_1 = 0;
2463         u32 dword_3 = 0;
2464
2465         if (cmd_dir == DMA_FROM_DEVICE) {
2466                 data_direction = UTP_DEVICE_TO_HOST;
2467                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2468         } else if (cmd_dir == DMA_TO_DEVICE) {
2469                 data_direction = UTP_HOST_TO_DEVICE;
2470                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2471         } else {
2472                 data_direction = UTP_NO_DATA_TRANSFER;
2473                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2474         }
2475
2476         dword_0 = data_direction | (lrbp->command_type
2477                                 << UPIU_COMMAND_TYPE_OFFSET);
2478         if (lrbp->intr_cmd)
2479                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2480
2481         /* Prepare crypto related dwords */
2482         ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2483
2484         /* Transfer request descriptor header fields */
2485         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2486         req_desc->header.dword_1 = cpu_to_le32(dword_1);
2487         /*
2488          * assigning invalid value for command status. Controller
2489          * updates OCS on command completion, with the command
2490          * status
2491          */
2492         req_desc->header.dword_2 =
2493                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2494         req_desc->header.dword_3 = cpu_to_le32(dword_3);
2495
2496         req_desc->prd_table_length = 0;
2497 }
2498
2499 /**
2500  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2501  * for scsi commands
2502  * @lrbp: local reference block pointer
2503  * @upiu_flags: flags
2504  */
2505 static
2506 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2507 {
2508         struct scsi_cmnd *cmd = lrbp->cmd;
2509         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2510         unsigned short cdb_len;
2511
2512         /* command descriptor fields */
2513         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2514                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2515                                 lrbp->lun, lrbp->task_tag);
2516         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2517                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2518
2519         /* Total EHS length and Data segment length will be zero */
2520         ucd_req_ptr->header.dword_2 = 0;
2521
2522         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2523
2524         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2525         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2526         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2527
2528         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2529 }
2530
2531 /**
2532  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2533  * for query requsts
2534  * @hba: UFS hba
2535  * @lrbp: local reference block pointer
2536  * @upiu_flags: flags
2537  */
2538 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2539                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2540 {
2541         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2542         struct ufs_query *query = &hba->dev_cmd.query;
2543         u16 len = be16_to_cpu(query->request.upiu_req.length);
2544
2545         /* Query request header */
2546         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2547                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2548                         lrbp->lun, lrbp->task_tag);
2549         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2550                         0, query->request.query_func, 0, 0);
2551
2552         /* Data segment length only need for WRITE_DESC */
2553         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2554                 ucd_req_ptr->header.dword_2 =
2555                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2556         else
2557                 ucd_req_ptr->header.dword_2 = 0;
2558
2559         /* Copy the Query Request buffer as is */
2560         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2561                         QUERY_OSF_SIZE);
2562
2563         /* Copy the Descriptor */
2564         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2565                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2566
2567         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2568 }
2569
2570 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2571 {
2572         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2573
2574         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2575
2576         /* command descriptor fields */
2577         ucd_req_ptr->header.dword_0 =
2578                 UPIU_HEADER_DWORD(
2579                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2580         /* clear rest of the fields of basic header */
2581         ucd_req_ptr->header.dword_1 = 0;
2582         ucd_req_ptr->header.dword_2 = 0;
2583
2584         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2585 }
2586
2587 /**
2588  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2589  *                           for Device Management Purposes
2590  * @hba: per adapter instance
2591  * @lrbp: pointer to local reference block
2592  */
2593 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2594                                       struct ufshcd_lrb *lrbp)
2595 {
2596         u8 upiu_flags;
2597         int ret = 0;
2598
2599         if (hba->ufs_version <= ufshci_version(1, 1))
2600                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2601         else
2602                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2603
2604         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2605         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2606                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2607         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2608                 ufshcd_prepare_utp_nop_upiu(lrbp);
2609         else
2610                 ret = -EINVAL;
2611
2612         return ret;
2613 }
2614
2615 /**
2616  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2617  *                         for SCSI Purposes
2618  * @hba: per adapter instance
2619  * @lrbp: pointer to local reference block
2620  */
2621 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2622 {
2623         u8 upiu_flags;
2624         int ret = 0;
2625
2626         if (hba->ufs_version <= ufshci_version(1, 1))
2627                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2628         else
2629                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2630
2631         if (likely(lrbp->cmd)) {
2632                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2633                                                 lrbp->cmd->sc_data_direction);
2634                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2635         } else {
2636                 ret = -EINVAL;
2637         }
2638
2639         return ret;
2640 }
2641
2642 /**
2643  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2644  * @upiu_wlun_id: UPIU W-LUN id
2645  *
2646  * Returns SCSI W-LUN id
2647  */
2648 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2649 {
2650         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2651 }
2652
2653 static inline bool is_rpmb_wlun(struct scsi_device *sdev)
2654 {
2655         return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
2656 }
2657
2658 static inline bool is_device_wlun(struct scsi_device *sdev)
2659 {
2660         return sdev->lun ==
2661                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2662 }
2663
2664 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2665 {
2666         struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2667         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2668         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2669                 i * sizeof(struct utp_transfer_cmd_desc);
2670         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2671                                        response_upiu);
2672         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2673
2674         lrb->utr_descriptor_ptr = utrdlp + i;
2675         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2676                 i * sizeof(struct utp_transfer_req_desc);
2677         lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2678         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2679         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2680         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2681         lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
2682         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2683 }
2684
2685 /**
2686  * ufshcd_queuecommand - main entry point for SCSI requests
2687  * @host: SCSI host pointer
2688  * @cmd: command from SCSI Midlayer
2689  *
2690  * Returns 0 for success, non-zero in case of failure
2691  */
2692 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2693 {
2694         struct ufs_hba *hba = shost_priv(host);
2695         int tag = scsi_cmd_to_rq(cmd)->tag;
2696         struct ufshcd_lrb *lrbp;
2697         int err = 0;
2698
2699         WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
2700
2701         if (!down_read_trylock(&hba->clk_scaling_lock))
2702                 return SCSI_MLQUEUE_HOST_BUSY;
2703
2704         switch (hba->ufshcd_state) {
2705         case UFSHCD_STATE_OPERATIONAL:
2706                 break;
2707         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2708                 /*
2709                  * SCSI error handler can call ->queuecommand() while UFS error
2710                  * handler is in progress. Error interrupts could change the
2711                  * state from UFSHCD_STATE_RESET to
2712                  * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2713                  * being issued in that case.
2714                  */
2715                 if (ufshcd_eh_in_progress(hba)) {
2716                         err = SCSI_MLQUEUE_HOST_BUSY;
2717                         goto out;
2718                 }
2719                 break;
2720         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2721                 /*
2722                  * pm_runtime_get_sync() is used at error handling preparation
2723                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2724                  * PM ops, it can never be finished if we let SCSI layer keep
2725                  * retrying it, which gets err handler stuck forever. Neither
2726                  * can we let the scsi cmd pass through, because UFS is in bad
2727                  * state, the scsi cmd may eventually time out, which will get
2728                  * err handler blocked for too long. So, just fail the scsi cmd
2729                  * sent from PM ops, err handler can recover PM error anyways.
2730                  */
2731                 if (hba->pm_op_in_progress) {
2732                         hba->force_reset = true;
2733                         set_host_byte(cmd, DID_BAD_TARGET);
2734                         scsi_done(cmd);
2735                         goto out;
2736                 }
2737                 fallthrough;
2738         case UFSHCD_STATE_RESET:
2739                 err = SCSI_MLQUEUE_HOST_BUSY;
2740                 goto out;
2741         case UFSHCD_STATE_ERROR:
2742                 set_host_byte(cmd, DID_ERROR);
2743                 scsi_done(cmd);
2744                 goto out;
2745         }
2746
2747         hba->req_abort_count = 0;
2748
2749         err = ufshcd_hold(hba, true);
2750         if (err) {
2751                 err = SCSI_MLQUEUE_HOST_BUSY;
2752                 goto out;
2753         }
2754         WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2755                 (hba->clk_gating.state != CLKS_ON));
2756
2757         lrbp = &hba->lrb[tag];
2758         WARN_ON(lrbp->cmd);
2759         lrbp->cmd = cmd;
2760         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2761         lrbp->sense_buffer = cmd->sense_buffer;
2762         lrbp->task_tag = tag;
2763         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2764         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2765
2766         ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
2767
2768         lrbp->req_abort_skip = false;
2769
2770         ufshpb_prep(hba, lrbp);
2771
2772         ufshcd_comp_scsi_upiu(hba, lrbp);
2773
2774         err = ufshcd_map_sg(hba, lrbp);
2775         if (err) {
2776                 lrbp->cmd = NULL;
2777                 ufshcd_release(hba);
2778                 goto out;
2779         }
2780
2781         ufshcd_send_command(hba, tag);
2782 out:
2783         up_read(&hba->clk_scaling_lock);
2784
2785         if (ufs_trigger_eh()) {
2786                 unsigned long flags;
2787
2788                 spin_lock_irqsave(hba->host->host_lock, flags);
2789                 ufshcd_schedule_eh_work(hba);
2790                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2791         }
2792
2793         return err;
2794 }
2795
2796 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2797                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2798 {
2799         lrbp->cmd = NULL;
2800         lrbp->sense_bufflen = 0;
2801         lrbp->sense_buffer = NULL;
2802         lrbp->task_tag = tag;
2803         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2804         lrbp->intr_cmd = true; /* No interrupt aggregation */
2805         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2806         hba->dev_cmd.type = cmd_type;
2807
2808         return ufshcd_compose_devman_upiu(hba, lrbp);
2809 }
2810
2811 static int
2812 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2813 {
2814         int err = 0;
2815         unsigned long flags;
2816         u32 mask = 1 << tag;
2817
2818         /* clear outstanding transaction before retry */
2819         spin_lock_irqsave(hba->host->host_lock, flags);
2820         ufshcd_utrl_clear(hba, tag);
2821         spin_unlock_irqrestore(hba->host->host_lock, flags);
2822
2823         /*
2824          * wait for h/w to clear corresponding bit in door-bell.
2825          * max. wait is 1 sec.
2826          */
2827         err = ufshcd_wait_for_register(hba,
2828                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2829                         mask, ~mask, 1000, 1000);
2830
2831         return err;
2832 }
2833
2834 static int
2835 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2836 {
2837         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2838
2839         /* Get the UPIU response */
2840         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2841                                 UPIU_RSP_CODE_OFFSET;
2842         return query_res->response;
2843 }
2844
2845 /**
2846  * ufshcd_dev_cmd_completion() - handles device management command responses
2847  * @hba: per adapter instance
2848  * @lrbp: pointer to local reference block
2849  */
2850 static int
2851 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2852 {
2853         int resp;
2854         int err = 0;
2855
2856         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2857         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2858
2859         switch (resp) {
2860         case UPIU_TRANSACTION_NOP_IN:
2861                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2862                         err = -EINVAL;
2863                         dev_err(hba->dev, "%s: unexpected response %x\n",
2864                                         __func__, resp);
2865                 }
2866                 break;
2867         case UPIU_TRANSACTION_QUERY_RSP:
2868                 err = ufshcd_check_query_response(hba, lrbp);
2869                 if (!err)
2870                         err = ufshcd_copy_query_response(hba, lrbp);
2871                 break;
2872         case UPIU_TRANSACTION_REJECT_UPIU:
2873                 /* TODO: handle Reject UPIU Response */
2874                 err = -EPERM;
2875                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2876                                 __func__);
2877                 break;
2878         default:
2879                 err = -EINVAL;
2880                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2881                                 __func__, resp);
2882                 break;
2883         }
2884
2885         return err;
2886 }
2887
2888 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2889                 struct ufshcd_lrb *lrbp, int max_timeout)
2890 {
2891         int err = 0;
2892         unsigned long time_left;
2893         unsigned long flags;
2894
2895         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2896                         msecs_to_jiffies(max_timeout));
2897
2898         spin_lock_irqsave(hba->host->host_lock, flags);
2899         hba->dev_cmd.complete = NULL;
2900         if (likely(time_left)) {
2901                 err = ufshcd_get_tr_ocs(lrbp);
2902                 if (!err)
2903                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2904         }
2905         spin_unlock_irqrestore(hba->host->host_lock, flags);
2906
2907         if (!time_left) {
2908                 err = -ETIMEDOUT;
2909                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2910                         __func__, lrbp->task_tag);
2911                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2912                         /* successfully cleared the command, retry if needed */
2913                         err = -EAGAIN;
2914                 /*
2915                  * in case of an error, after clearing the doorbell,
2916                  * we also need to clear the outstanding_request
2917                  * field in hba
2918                  */
2919                 spin_lock_irqsave(&hba->outstanding_lock, flags);
2920                 __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
2921                 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2922         }
2923
2924         return err;
2925 }
2926
2927 /**
2928  * ufshcd_exec_dev_cmd - API for sending device management requests
2929  * @hba: UFS hba
2930  * @cmd_type: specifies the type (NOP, Query...)
2931  * @timeout: timeout in milliseconds
2932  *
2933  * NOTE: Since there is only one available tag for device management commands,
2934  * it is expected you hold the hba->dev_cmd.lock mutex.
2935  */
2936 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2937                 enum dev_cmd_type cmd_type, int timeout)
2938 {
2939         struct request_queue *q = hba->cmd_queue;
2940         DECLARE_COMPLETION_ONSTACK(wait);
2941         struct request *req;
2942         struct ufshcd_lrb *lrbp;
2943         int err;
2944         int tag;
2945
2946         down_read(&hba->clk_scaling_lock);
2947
2948         /*
2949          * Get free slot, sleep if slots are unavailable.
2950          * Even though we use wait_event() which sleeps indefinitely,
2951          * the maximum wait time is bounded by SCSI request timeout.
2952          */
2953         req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
2954         if (IS_ERR(req)) {
2955                 err = PTR_ERR(req);
2956                 goto out_unlock;
2957         }
2958         tag = req->tag;
2959         WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
2960         /* Set the timeout such that the SCSI error handler is not activated. */
2961         req->timeout = msecs_to_jiffies(2 * timeout);
2962         blk_mq_start_request(req);
2963
2964         lrbp = &hba->lrb[tag];
2965         WARN_ON(lrbp->cmd);
2966         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2967         if (unlikely(err))
2968                 goto out;
2969
2970         hba->dev_cmd.complete = &wait;
2971
2972         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
2973
2974         ufshcd_send_command(hba, tag);
2975         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2976         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
2977                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
2978
2979 out:
2980         blk_mq_free_request(req);
2981 out_unlock:
2982         up_read(&hba->clk_scaling_lock);
2983         return err;
2984 }
2985
2986 /**
2987  * ufshcd_init_query() - init the query response and request parameters
2988  * @hba: per-adapter instance
2989  * @request: address of the request pointer to be initialized
2990  * @response: address of the response pointer to be initialized
2991  * @opcode: operation to perform
2992  * @idn: flag idn to access
2993  * @index: LU number to access
2994  * @selector: query/flag/descriptor further identification
2995  */
2996 static inline void ufshcd_init_query(struct ufs_hba *hba,
2997                 struct ufs_query_req **request, struct ufs_query_res **response,
2998                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2999 {
3000         *request = &hba->dev_cmd.query.request;
3001         *response = &hba->dev_cmd.query.response;
3002         memset(*request, 0, sizeof(struct ufs_query_req));
3003         memset(*response, 0, sizeof(struct ufs_query_res));
3004         (*request)->upiu_req.opcode = opcode;
3005         (*request)->upiu_req.idn = idn;
3006         (*request)->upiu_req.index = index;
3007         (*request)->upiu_req.selector = selector;
3008 }
3009
3010 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3011         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3012 {
3013         int ret;
3014         int retries;
3015
3016         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3017                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3018                 if (ret)
3019                         dev_dbg(hba->dev,
3020                                 "%s: failed with error %d, retries %d\n",
3021                                 __func__, ret, retries);
3022                 else
3023                         break;
3024         }
3025
3026         if (ret)
3027                 dev_err(hba->dev,
3028                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3029                         __func__, opcode, idn, ret, retries);
3030         return ret;
3031 }
3032
3033 /**
3034  * ufshcd_query_flag() - API function for sending flag query requests
3035  * @hba: per-adapter instance
3036  * @opcode: flag query to perform
3037  * @idn: flag idn to access
3038  * @index: flag index to access
3039  * @flag_res: the flag value after the query request completes
3040  *
3041  * Returns 0 for success, non-zero in case of failure
3042  */
3043 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3044                         enum flag_idn idn, u8 index, bool *flag_res)
3045 {
3046         struct ufs_query_req *request = NULL;
3047         struct ufs_query_res *response = NULL;
3048         int err, selector = 0;
3049         int timeout = QUERY_REQ_TIMEOUT;
3050
3051         BUG_ON(!hba);
3052
3053         ufshcd_hold(hba, false);
3054         mutex_lock(&hba->dev_cmd.lock);
3055         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3056                         selector);
3057
3058         switch (opcode) {
3059         case UPIU_QUERY_OPCODE_SET_FLAG:
3060         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3061         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3062                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3063                 break;
3064         case UPIU_QUERY_OPCODE_READ_FLAG:
3065                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3066                 if (!flag_res) {
3067                         /* No dummy reads */
3068                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3069                                         __func__);
3070                         err = -EINVAL;
3071                         goto out_unlock;
3072                 }
3073                 break;
3074         default:
3075                 dev_err(hba->dev,
3076                         "%s: Expected query flag opcode but got = %d\n",
3077                         __func__, opcode);
3078                 err = -EINVAL;
3079                 goto out_unlock;
3080         }
3081
3082         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3083
3084         if (err) {
3085                 dev_err(hba->dev,
3086                         "%s: Sending flag query for idn %d failed, err = %d\n",
3087                         __func__, idn, err);
3088                 goto out_unlock;
3089         }
3090
3091         if (flag_res)
3092                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3093                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3094
3095 out_unlock:
3096         mutex_unlock(&hba->dev_cmd.lock);
3097         ufshcd_release(hba);
3098         return err;
3099 }
3100
3101 /**
3102  * ufshcd_query_attr - API function for sending attribute requests
3103  * @hba: per-adapter instance
3104  * @opcode: attribute opcode
3105  * @idn: attribute idn to access
3106  * @index: index field
3107  * @selector: selector field
3108  * @attr_val: the attribute value after the query request completes
3109  *
3110  * Returns 0 for success, non-zero in case of failure
3111 */
3112 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3113                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3114 {
3115         struct ufs_query_req *request = NULL;
3116         struct ufs_query_res *response = NULL;
3117         int err;
3118
3119         BUG_ON(!hba);
3120
3121         if (!attr_val) {
3122                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3123                                 __func__, opcode);
3124                 return -EINVAL;
3125         }
3126
3127         ufshcd_hold(hba, false);
3128
3129         mutex_lock(&hba->dev_cmd.lock);
3130         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3131                         selector);
3132
3133         switch (opcode) {
3134         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3135                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3136                 request->upiu_req.value = cpu_to_be32(*attr_val);
3137                 break;
3138         case UPIU_QUERY_OPCODE_READ_ATTR:
3139                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3140                 break;
3141         default:
3142                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3143                                 __func__, opcode);
3144                 err = -EINVAL;
3145                 goto out_unlock;
3146         }
3147
3148         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3149
3150         if (err) {
3151                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3152                                 __func__, opcode, idn, index, err);
3153                 goto out_unlock;
3154         }
3155
3156         *attr_val = be32_to_cpu(response->upiu_res.value);
3157
3158 out_unlock:
3159         mutex_unlock(&hba->dev_cmd.lock);
3160         ufshcd_release(hba);
3161         return err;
3162 }
3163
3164 /**
3165  * ufshcd_query_attr_retry() - API function for sending query
3166  * attribute with retries
3167  * @hba: per-adapter instance
3168  * @opcode: attribute opcode
3169  * @idn: attribute idn to access
3170  * @index: index field
3171  * @selector: selector field
3172  * @attr_val: the attribute value after the query request
3173  * completes
3174  *
3175  * Returns 0 for success, non-zero in case of failure
3176 */
3177 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3178         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3179         u32 *attr_val)
3180 {
3181         int ret = 0;
3182         u32 retries;
3183
3184         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3185                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3186                                                 selector, attr_val);
3187                 if (ret)
3188                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3189                                 __func__, ret, retries);
3190                 else
3191                         break;
3192         }
3193
3194         if (ret)
3195                 dev_err(hba->dev,
3196                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3197                         __func__, idn, ret, QUERY_REQ_RETRIES);
3198         return ret;
3199 }
3200
3201 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3202                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3203                         u8 selector, u8 *desc_buf, int *buf_len)
3204 {
3205         struct ufs_query_req *request = NULL;
3206         struct ufs_query_res *response = NULL;
3207         int err;
3208
3209         BUG_ON(!hba);
3210
3211         if (!desc_buf) {
3212                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3213                                 __func__, opcode);
3214                 return -EINVAL;
3215         }
3216
3217         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3218                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3219                                 __func__, *buf_len);
3220                 return -EINVAL;
3221         }
3222
3223         ufshcd_hold(hba, false);
3224
3225         mutex_lock(&hba->dev_cmd.lock);
3226         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3227                         selector);
3228         hba->dev_cmd.query.descriptor = desc_buf;
3229         request->upiu_req.length = cpu_to_be16(*buf_len);
3230
3231         switch (opcode) {
3232         case UPIU_QUERY_OPCODE_WRITE_DESC:
3233                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3234                 break;
3235         case UPIU_QUERY_OPCODE_READ_DESC:
3236                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3237                 break;
3238         default:
3239                 dev_err(hba->dev,
3240                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3241                                 __func__, opcode);
3242                 err = -EINVAL;
3243                 goto out_unlock;
3244         }
3245
3246         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3247
3248         if (err) {
3249                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3250                                 __func__, opcode, idn, index, err);
3251                 goto out_unlock;
3252         }
3253
3254         *buf_len = be16_to_cpu(response->upiu_res.length);
3255
3256 out_unlock:
3257         hba->dev_cmd.query.descriptor = NULL;
3258         mutex_unlock(&hba->dev_cmd.lock);
3259         ufshcd_release(hba);
3260         return err;
3261 }
3262
3263 /**
3264  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3265  * @hba: per-adapter instance
3266  * @opcode: attribute opcode
3267  * @idn: attribute idn to access
3268  * @index: index field
3269  * @selector: selector field
3270  * @desc_buf: the buffer that contains the descriptor
3271  * @buf_len: length parameter passed to the device
3272  *
3273  * Returns 0 for success, non-zero in case of failure.
3274  * The buf_len parameter will contain, on return, the length parameter
3275  * received on the response.
3276  */
3277 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3278                                   enum query_opcode opcode,
3279                                   enum desc_idn idn, u8 index,
3280                                   u8 selector,
3281                                   u8 *desc_buf, int *buf_len)
3282 {
3283         int err;
3284         int retries;
3285
3286         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3287                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3288                                                 selector, desc_buf, buf_len);
3289                 if (!err || err == -EINVAL)
3290                         break;
3291         }
3292
3293         return err;
3294 }
3295
3296 /**
3297  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3298  * @hba: Pointer to adapter instance
3299  * @desc_id: descriptor idn value
3300  * @desc_len: mapped desc length (out)
3301  */
3302 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3303                                   int *desc_len)
3304 {
3305         if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3306             desc_id == QUERY_DESC_IDN_RFU_1)
3307                 *desc_len = 0;
3308         else
3309                 *desc_len = hba->desc_size[desc_id];
3310 }
3311 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3312
3313 static void ufshcd_update_desc_length(struct ufs_hba *hba,
3314                                       enum desc_idn desc_id, int desc_index,
3315                                       unsigned char desc_len)
3316 {
3317         if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3318             desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3319                 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3320                  * than the RPMB unit, however, both descriptors share the same
3321                  * desc_idn, to cover both unit descriptors with one length, we
3322                  * choose the normal unit descriptor length by desc_index.
3323                  */
3324                 hba->desc_size[desc_id] = desc_len;
3325 }
3326
3327 /**
3328  * ufshcd_read_desc_param - read the specified descriptor parameter
3329  * @hba: Pointer to adapter instance
3330  * @desc_id: descriptor idn value
3331  * @desc_index: descriptor index
3332  * @param_offset: offset of the parameter to read
3333  * @param_read_buf: pointer to buffer where parameter would be read
3334  * @param_size: sizeof(param_read_buf)
3335  *
3336  * Return 0 in case of success, non-zero otherwise
3337  */
3338 int ufshcd_read_desc_param(struct ufs_hba *hba,
3339                            enum desc_idn desc_id,
3340                            int desc_index,
3341                            u8 param_offset,
3342                            u8 *param_read_buf,
3343                            u8 param_size)
3344 {
3345         int ret;
3346         u8 *desc_buf;
3347         int buff_len;
3348         bool is_kmalloc = true;
3349
3350         /* Safety check */
3351         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3352                 return -EINVAL;
3353
3354         /* Get the length of descriptor */
3355         ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3356         if (!buff_len) {
3357                 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3358                 return -EINVAL;
3359         }
3360
3361         if (param_offset >= buff_len) {
3362                 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3363                         __func__, param_offset, desc_id, buff_len);
3364                 return -EINVAL;
3365         }
3366
3367         /* Check whether we need temp memory */
3368         if (param_offset != 0 || param_size < buff_len) {
3369                 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3370                 if (!desc_buf)
3371                         return -ENOMEM;
3372         } else {
3373                 desc_buf = param_read_buf;
3374                 is_kmalloc = false;
3375         }
3376
3377         /* Request for full descriptor */
3378         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3379                                         desc_id, desc_index, 0,
3380                                         desc_buf, &buff_len);
3381
3382         if (ret) {
3383                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3384                         __func__, desc_id, desc_index, param_offset, ret);
3385                 goto out;
3386         }
3387
3388         /* Sanity check */
3389         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3390                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3391                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3392                 ret = -EINVAL;
3393                 goto out;
3394         }
3395
3396         /* Update descriptor length */
3397         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3398         ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3399
3400         if (is_kmalloc) {
3401                 /* Make sure we don't copy more data than available */
3402                 if (param_offset >= buff_len)
3403                         ret = -EINVAL;
3404                 else
3405                         memcpy(param_read_buf, &desc_buf[param_offset],
3406                                min_t(u32, param_size, buff_len - param_offset));
3407         }
3408 out:
3409         if (is_kmalloc)
3410                 kfree(desc_buf);
3411         return ret;
3412 }
3413
3414 /**
3415  * struct uc_string_id - unicode string
3416  *
3417  * @len: size of this descriptor inclusive
3418  * @type: descriptor type
3419  * @uc: unicode string character
3420  */
3421 struct uc_string_id {
3422         u8 len;
3423         u8 type;
3424         wchar_t uc[];
3425 } __packed;
3426
3427 /* replace non-printable or non-ASCII characters with spaces */
3428 static inline char ufshcd_remove_non_printable(u8 ch)
3429 {
3430         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3431 }
3432
3433 /**
3434  * ufshcd_read_string_desc - read string descriptor
3435  * @hba: pointer to adapter instance
3436  * @desc_index: descriptor index
3437  * @buf: pointer to buffer where descriptor would be read,
3438  *       the caller should free the memory.
3439  * @ascii: if true convert from unicode to ascii characters
3440  *         null terminated string.
3441  *
3442  * Return:
3443  * *      string size on success.
3444  * *      -ENOMEM: on allocation failure
3445  * *      -EINVAL: on a wrong parameter
3446  */
3447 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3448                             u8 **buf, bool ascii)
3449 {
3450         struct uc_string_id *uc_str;
3451         u8 *str;
3452         int ret;
3453
3454         if (!buf)
3455                 return -EINVAL;
3456
3457         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3458         if (!uc_str)
3459                 return -ENOMEM;
3460
3461         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3462                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3463         if (ret < 0) {
3464                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3465                         QUERY_REQ_RETRIES, ret);
3466                 str = NULL;
3467                 goto out;
3468         }
3469
3470         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3471                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3472                 str = NULL;
3473                 ret = 0;
3474                 goto out;
3475         }
3476
3477         if (ascii) {
3478                 ssize_t ascii_len;
3479                 int i;
3480                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3481                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3482                 str = kzalloc(ascii_len, GFP_KERNEL);
3483                 if (!str) {
3484                         ret = -ENOMEM;
3485                         goto out;
3486                 }
3487
3488                 /*
3489                  * the descriptor contains string in UTF16 format
3490                  * we need to convert to utf-8 so it can be displayed
3491                  */
3492                 ret = utf16s_to_utf8s(uc_str->uc,
3493                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3494                                       UTF16_BIG_ENDIAN, str, ascii_len);
3495
3496                 /* replace non-printable or non-ASCII characters with spaces */
3497                 for (i = 0; i < ret; i++)
3498                         str[i] = ufshcd_remove_non_printable(str[i]);
3499
3500                 str[ret++] = '\0';
3501
3502         } else {
3503                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3504                 if (!str) {
3505                         ret = -ENOMEM;
3506                         goto out;
3507                 }
3508                 ret = uc_str->len;
3509         }
3510 out:
3511         *buf = str;
3512         kfree(uc_str);
3513         return ret;
3514 }
3515
3516 /**
3517  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3518  * @hba: Pointer to adapter instance
3519  * @lun: lun id
3520  * @param_offset: offset of the parameter to read
3521  * @param_read_buf: pointer to buffer where parameter would be read
3522  * @param_size: sizeof(param_read_buf)
3523  *
3524  * Return 0 in case of success, non-zero otherwise
3525  */
3526 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3527                                               int lun,
3528                                               enum unit_desc_param param_offset,
3529                                               u8 *param_read_buf,
3530                                               u32 param_size)
3531 {
3532         /*
3533          * Unit descriptors are only available for general purpose LUs (LUN id
3534          * from 0 to 7) and RPMB Well known LU.
3535          */
3536         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3537                 return -EOPNOTSUPP;
3538
3539         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3540                                       param_offset, param_read_buf, param_size);
3541 }
3542
3543 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3544 {
3545         int err = 0;
3546         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3547
3548         if (hba->dev_info.wspecversion >= 0x300) {
3549                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3550                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3551                                 &gating_wait);
3552                 if (err)
3553                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3554                                          err, gating_wait);
3555
3556                 if (gating_wait == 0) {
3557                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3558                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3559                                          gating_wait);
3560                 }
3561
3562                 hba->dev_info.clk_gating_wait_us = gating_wait;
3563         }
3564
3565         return err;
3566 }
3567
3568 /**
3569  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3570  * @hba: per adapter instance
3571  *
3572  * 1. Allocate DMA memory for Command Descriptor array
3573  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3574  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3575  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3576  *      (UTMRDL)
3577  * 4. Allocate memory for local reference block(lrb).
3578  *
3579  * Returns 0 for success, non-zero in case of failure
3580  */
3581 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3582 {
3583         size_t utmrdl_size, utrdl_size, ucdl_size;
3584
3585         /* Allocate memory for UTP command descriptors */
3586         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3587         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3588                                                   ucdl_size,
3589                                                   &hba->ucdl_dma_addr,
3590                                                   GFP_KERNEL);
3591
3592         /*
3593          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3594          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3595          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3596          * be aligned to 128 bytes as well
3597          */
3598         if (!hba->ucdl_base_addr ||
3599             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3600                 dev_err(hba->dev,
3601                         "Command Descriptor Memory allocation failed\n");
3602                 goto out;
3603         }
3604
3605         /*
3606          * Allocate memory for UTP Transfer descriptors
3607          * UFSHCI requires 1024 byte alignment of UTRD
3608          */
3609         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3610         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3611                                                    utrdl_size,
3612                                                    &hba->utrdl_dma_addr,
3613                                                    GFP_KERNEL);
3614         if (!hba->utrdl_base_addr ||
3615             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3616                 dev_err(hba->dev,
3617                         "Transfer Descriptor Memory allocation failed\n");
3618                 goto out;
3619         }
3620
3621         /*
3622          * Allocate memory for UTP Task Management descriptors
3623          * UFSHCI requires 1024 byte alignment of UTMRD
3624          */
3625         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3626         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3627                                                     utmrdl_size,
3628                                                     &hba->utmrdl_dma_addr,
3629                                                     GFP_KERNEL);
3630         if (!hba->utmrdl_base_addr ||
3631             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3632                 dev_err(hba->dev,
3633                 "Task Management Descriptor Memory allocation failed\n");
3634                 goto out;
3635         }
3636
3637         /* Allocate memory for local reference block */
3638         hba->lrb = devm_kcalloc(hba->dev,
3639                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3640                                 GFP_KERNEL);
3641         if (!hba->lrb) {
3642                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3643                 goto out;
3644         }
3645         return 0;
3646 out:
3647         return -ENOMEM;
3648 }
3649
3650 /**
3651  * ufshcd_host_memory_configure - configure local reference block with
3652  *                              memory offsets
3653  * @hba: per adapter instance
3654  *
3655  * Configure Host memory space
3656  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3657  * address.
3658  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3659  * and PRDT offset.
3660  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3661  * into local reference block.
3662  */
3663 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3664 {
3665         struct utp_transfer_req_desc *utrdlp;
3666         dma_addr_t cmd_desc_dma_addr;
3667         dma_addr_t cmd_desc_element_addr;
3668         u16 response_offset;
3669         u16 prdt_offset;
3670         int cmd_desc_size;
3671         int i;
3672
3673         utrdlp = hba->utrdl_base_addr;
3674
3675         response_offset =
3676                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3677         prdt_offset =
3678                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3679
3680         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3681         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3682
3683         for (i = 0; i < hba->nutrs; i++) {
3684                 /* Configure UTRD with command descriptor base address */
3685                 cmd_desc_element_addr =
3686                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3687                 utrdlp[i].command_desc_base_addr_lo =
3688                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3689                 utrdlp[i].command_desc_base_addr_hi =
3690                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3691
3692                 /* Response upiu and prdt offset should be in double words */
3693                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3694                         utrdlp[i].response_upiu_offset =
3695                                 cpu_to_le16(response_offset);
3696                         utrdlp[i].prd_table_offset =
3697                                 cpu_to_le16(prdt_offset);
3698                         utrdlp[i].response_upiu_length =
3699                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3700                 } else {
3701                         utrdlp[i].response_upiu_offset =
3702                                 cpu_to_le16(response_offset >> 2);
3703                         utrdlp[i].prd_table_offset =
3704                                 cpu_to_le16(prdt_offset >> 2);
3705                         utrdlp[i].response_upiu_length =
3706                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3707                 }
3708
3709                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3710         }
3711 }
3712
3713 /**
3714  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3715  * @hba: per adapter instance
3716  *
3717  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3718  * in order to initialize the Unipro link startup procedure.
3719  * Once the Unipro links are up, the device connected to the controller
3720  * is detected.
3721  *
3722  * Returns 0 on success, non-zero value on failure
3723  */
3724 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3725 {
3726         struct uic_command uic_cmd = {0};
3727         int ret;
3728
3729         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3730
3731         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3732         if (ret)
3733                 dev_dbg(hba->dev,
3734                         "dme-link-startup: error code %d\n", ret);
3735         return ret;
3736 }
3737 /**
3738  * ufshcd_dme_reset - UIC command for DME_RESET
3739  * @hba: per adapter instance
3740  *
3741  * DME_RESET command is issued in order to reset UniPro stack.
3742  * This function now deals with cold reset.
3743  *
3744  * Returns 0 on success, non-zero value on failure
3745  */
3746 static int ufshcd_dme_reset(struct ufs_hba *hba)
3747 {
3748         struct uic_command uic_cmd = {0};
3749         int ret;
3750
3751         uic_cmd.command = UIC_CMD_DME_RESET;
3752
3753         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3754         if (ret)
3755                 dev_err(hba->dev,
3756                         "dme-reset: error code %d\n", ret);
3757
3758         return ret;
3759 }
3760
3761 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3762                                int agreed_gear,
3763                                int adapt_val)
3764 {
3765         int ret;
3766
3767         if (agreed_gear != UFS_HS_G4)
3768                 adapt_val = PA_NO_ADAPT;
3769
3770         ret = ufshcd_dme_set(hba,
3771                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3772                              adapt_val);
3773         return ret;
3774 }
3775 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3776
3777 /**
3778  * ufshcd_dme_enable - UIC command for DME_ENABLE
3779  * @hba: per adapter instance
3780  *
3781  * DME_ENABLE command is issued in order to enable UniPro stack.
3782  *
3783  * Returns 0 on success, non-zero value on failure
3784  */
3785 static int ufshcd_dme_enable(struct ufs_hba *hba)
3786 {
3787         struct uic_command uic_cmd = {0};
3788         int ret;
3789
3790         uic_cmd.command = UIC_CMD_DME_ENABLE;
3791
3792         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3793         if (ret)
3794                 dev_err(hba->dev,
3795                         "dme-enable: error code %d\n", ret);
3796
3797         return ret;
3798 }
3799
3800 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3801 {
3802         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3803         unsigned long min_sleep_time_us;
3804
3805         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3806                 return;
3807
3808         /*
3809          * last_dme_cmd_tstamp will be 0 only for 1st call to
3810          * this function
3811          */
3812         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3813                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3814         } else {
3815                 unsigned long delta =
3816                         (unsigned long) ktime_to_us(
3817                                 ktime_sub(ktime_get(),
3818                                 hba->last_dme_cmd_tstamp));
3819
3820                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3821                         min_sleep_time_us =
3822                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3823                 else
3824                         return; /* no more delay required */
3825         }
3826
3827         /* allow sleep for extra 50us if needed */
3828         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3829 }
3830
3831 /**
3832  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3833  * @hba: per adapter instance
3834  * @attr_sel: uic command argument1
3835  * @attr_set: attribute set type as uic command argument2
3836  * @mib_val: setting value as uic command argument3
3837  * @peer: indicate whether peer or local
3838  *
3839  * Returns 0 on success, non-zero value on failure
3840  */
3841 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3842                         u8 attr_set, u32 mib_val, u8 peer)
3843 {
3844         struct uic_command uic_cmd = {0};
3845         static const char *const action[] = {
3846                 "dme-set",
3847                 "dme-peer-set"
3848         };
3849         const char *set = action[!!peer];
3850         int ret;
3851         int retries = UFS_UIC_COMMAND_RETRIES;
3852
3853         uic_cmd.command = peer ?
3854                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3855         uic_cmd.argument1 = attr_sel;
3856         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3857         uic_cmd.argument3 = mib_val;
3858
3859         do {
3860                 /* for peer attributes we retry upon failure */
3861                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3862                 if (ret)
3863                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3864                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3865         } while (ret && peer && --retries);
3866
3867         if (ret)
3868                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3869                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3870                         UFS_UIC_COMMAND_RETRIES - retries);
3871
3872         return ret;
3873 }
3874 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3875
3876 /**
3877  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3878  * @hba: per adapter instance
3879  * @attr_sel: uic command argument1
3880  * @mib_val: the value of the attribute as returned by the UIC command
3881  * @peer: indicate whether peer or local
3882  *
3883  * Returns 0 on success, non-zero value on failure
3884  */
3885 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3886                         u32 *mib_val, u8 peer)
3887 {
3888         struct uic_command uic_cmd = {0};
3889         static const char *const action[] = {
3890                 "dme-get",
3891                 "dme-peer-get"
3892         };
3893         const char *get = action[!!peer];
3894         int ret;
3895         int retries = UFS_UIC_COMMAND_RETRIES;
3896         struct ufs_pa_layer_attr orig_pwr_info;
3897         struct ufs_pa_layer_attr temp_pwr_info;
3898         bool pwr_mode_change = false;
3899
3900         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3901                 orig_pwr_info = hba->pwr_info;
3902                 temp_pwr_info = orig_pwr_info;
3903
3904                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3905                     orig_pwr_info.pwr_rx == FAST_MODE) {
3906                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3907                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3908                         pwr_mode_change = true;
3909                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3910                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3911                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3912                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3913                         pwr_mode_change = true;
3914                 }
3915                 if (pwr_mode_change) {
3916                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3917                         if (ret)
3918                                 goto out;
3919                 }
3920         }
3921
3922         uic_cmd.command = peer ?
3923                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3924         uic_cmd.argument1 = attr_sel;
3925
3926         do {
3927                 /* for peer attributes we retry upon failure */
3928                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3929                 if (ret)
3930                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3931                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3932         } while (ret && peer && --retries);
3933
3934         if (ret)
3935                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3936                         get, UIC_GET_ATTR_ID(attr_sel),
3937                         UFS_UIC_COMMAND_RETRIES - retries);
3938
3939         if (mib_val && !ret)
3940                 *mib_val = uic_cmd.argument3;
3941
3942         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3943             && pwr_mode_change)
3944                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3945 out:
3946         return ret;
3947 }
3948 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3949
3950 /**
3951  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3952  * state) and waits for it to take effect.
3953  *
3954  * @hba: per adapter instance
3955  * @cmd: UIC command to execute
3956  *
3957  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3958  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3959  * and device UniPro link and hence it's final completion would be indicated by
3960  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3961  * addition to normal UIC command completion Status (UCCS). This function only
3962  * returns after the relevant status bits indicate the completion.
3963  *
3964  * Returns 0 on success, non-zero value on failure
3965  */
3966 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3967 {
3968         DECLARE_COMPLETION_ONSTACK(uic_async_done);
3969         unsigned long flags;
3970         u8 status;
3971         int ret;
3972         bool reenable_intr = false;
3973
3974         mutex_lock(&hba->uic_cmd_mutex);
3975         ufshcd_add_delay_before_dme_cmd(hba);
3976
3977         spin_lock_irqsave(hba->host->host_lock, flags);
3978         if (ufshcd_is_link_broken(hba)) {
3979                 ret = -ENOLINK;
3980                 goto out_unlock;
3981         }
3982         hba->uic_async_done = &uic_async_done;
3983         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3984                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3985                 /*
3986                  * Make sure UIC command completion interrupt is disabled before
3987                  * issuing UIC command.
3988                  */
3989                 wmb();
3990                 reenable_intr = true;
3991         }
3992         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3993         spin_unlock_irqrestore(hba->host->host_lock, flags);
3994         if (ret) {
3995                 dev_err(hba->dev,
3996                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3997                         cmd->command, cmd->argument3, ret);
3998                 goto out;
3999         }
4000
4001         if (!wait_for_completion_timeout(hba->uic_async_done,
4002                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4003                 dev_err(hba->dev,
4004                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4005                         cmd->command, cmd->argument3);
4006
4007                 if (!cmd->cmd_active) {
4008                         dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4009                                 __func__);
4010                         goto check_upmcrs;
4011                 }
4012
4013                 ret = -ETIMEDOUT;
4014                 goto out;
4015         }
4016
4017 check_upmcrs:
4018         status = ufshcd_get_upmcrs(hba);
4019         if (status != PWR_LOCAL) {
4020                 dev_err(hba->dev,
4021                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4022                         cmd->command, status);
4023                 ret = (status != PWR_OK) ? status : -1;
4024         }
4025 out:
4026         if (ret) {
4027                 ufshcd_print_host_state(hba);
4028                 ufshcd_print_pwr_info(hba);
4029                 ufshcd_print_evt_hist(hba);
4030         }
4031
4032         spin_lock_irqsave(hba->host->host_lock, flags);
4033         hba->active_uic_cmd = NULL;
4034         hba->uic_async_done = NULL;
4035         if (reenable_intr)
4036                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4037         if (ret) {
4038                 ufshcd_set_link_broken(hba);
4039                 ufshcd_schedule_eh_work(hba);
4040         }
4041 out_unlock:
4042         spin_unlock_irqrestore(hba->host->host_lock, flags);
4043         mutex_unlock(&hba->uic_cmd_mutex);
4044
4045         return ret;
4046 }
4047
4048 /**
4049  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4050  *                              using DME_SET primitives.
4051  * @hba: per adapter instance
4052  * @mode: powr mode value
4053  *
4054  * Returns 0 on success, non-zero value on failure
4055  */
4056 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4057 {
4058         struct uic_command uic_cmd = {0};
4059         int ret;
4060
4061         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4062                 ret = ufshcd_dme_set(hba,
4063                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4064                 if (ret) {
4065                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4066                                                 __func__, ret);
4067                         goto out;
4068                 }
4069         }
4070
4071         uic_cmd.command = UIC_CMD_DME_SET;
4072         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4073         uic_cmd.argument3 = mode;
4074         ufshcd_hold(hba, false);
4075         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4076         ufshcd_release(hba);
4077
4078 out:
4079         return ret;
4080 }
4081
4082 int ufshcd_link_recovery(struct ufs_hba *hba)
4083 {
4084         int ret;
4085         unsigned long flags;
4086
4087         spin_lock_irqsave(hba->host->host_lock, flags);
4088         hba->ufshcd_state = UFSHCD_STATE_RESET;
4089         ufshcd_set_eh_in_progress(hba);
4090         spin_unlock_irqrestore(hba->host->host_lock, flags);
4091
4092         /* Reset the attached device */
4093         ufshcd_device_reset(hba);
4094
4095         ret = ufshcd_host_reset_and_restore(hba);
4096
4097         spin_lock_irqsave(hba->host->host_lock, flags);
4098         if (ret)
4099                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4100         ufshcd_clear_eh_in_progress(hba);
4101         spin_unlock_irqrestore(hba->host->host_lock, flags);
4102
4103         if (ret)
4104                 dev_err(hba->dev, "%s: link recovery failed, err %d",
4105                         __func__, ret);
4106
4107         return ret;
4108 }
4109 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4110
4111 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4112 {
4113         int ret;
4114         struct uic_command uic_cmd = {0};
4115         ktime_t start = ktime_get();
4116
4117         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4118
4119         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4120         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4121         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4122                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4123
4124         if (ret)
4125                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4126                         __func__, ret);
4127         else
4128                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4129                                                                 POST_CHANGE);
4130
4131         return ret;
4132 }
4133 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4134
4135 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4136 {
4137         struct uic_command uic_cmd = {0};
4138         int ret;
4139         ktime_t start = ktime_get();
4140
4141         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4142
4143         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4144         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4145         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4146                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4147
4148         if (ret) {
4149                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4150                         __func__, ret);
4151         } else {
4152                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4153                                                                 POST_CHANGE);
4154                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4155                 hba->ufs_stats.hibern8_exit_cnt++;
4156         }
4157
4158         return ret;
4159 }
4160 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4161
4162 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4163 {
4164         unsigned long flags;
4165         bool update = false;
4166
4167         if (!ufshcd_is_auto_hibern8_supported(hba))
4168                 return;
4169
4170         spin_lock_irqsave(hba->host->host_lock, flags);
4171         if (hba->ahit != ahit) {
4172                 hba->ahit = ahit;
4173                 update = true;
4174         }
4175         spin_unlock_irqrestore(hba->host->host_lock, flags);
4176
4177         if (update &&
4178             !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
4179                 ufshcd_rpm_get_sync(hba);
4180                 ufshcd_hold(hba, false);
4181                 ufshcd_auto_hibern8_enable(hba);
4182                 ufshcd_release(hba);
4183                 ufshcd_rpm_put_sync(hba);
4184         }
4185 }
4186 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4187
4188 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4189 {
4190         unsigned long flags;
4191
4192         if (!ufshcd_is_auto_hibern8_supported(hba))
4193                 return;
4194
4195         spin_lock_irqsave(hba->host->host_lock, flags);
4196         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4197         spin_unlock_irqrestore(hba->host->host_lock, flags);
4198 }
4199
4200  /**
4201  * ufshcd_init_pwr_info - setting the POR (power on reset)
4202  * values in hba power info
4203  * @hba: per-adapter instance
4204  */
4205 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4206 {
4207         hba->pwr_info.gear_rx = UFS_PWM_G1;
4208         hba->pwr_info.gear_tx = UFS_PWM_G1;
4209         hba->pwr_info.lane_rx = 1;
4210         hba->pwr_info.lane_tx = 1;
4211         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4212         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4213         hba->pwr_info.hs_rate = 0;
4214 }
4215
4216 /**
4217  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4218  * @hba: per-adapter instance
4219  */
4220 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4221 {
4222         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4223
4224         if (hba->max_pwr_info.is_valid)
4225                 return 0;
4226
4227         pwr_info->pwr_tx = FAST_MODE;
4228         pwr_info->pwr_rx = FAST_MODE;
4229         pwr_info->hs_rate = PA_HS_MODE_B;
4230
4231         /* Get the connected lane count */
4232         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4233                         &pwr_info->lane_rx);
4234         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4235                         &pwr_info->lane_tx);
4236
4237         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4238                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4239                                 __func__,
4240                                 pwr_info->lane_rx,
4241                                 pwr_info->lane_tx);
4242                 return -EINVAL;
4243         }
4244
4245         /*
4246          * First, get the maximum gears of HS speed.
4247          * If a zero value, it means there is no HSGEAR capability.
4248          * Then, get the maximum gears of PWM speed.
4249          */
4250         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4251         if (!pwr_info->gear_rx) {
4252                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4253                                 &pwr_info->gear_rx);
4254                 if (!pwr_info->gear_rx) {
4255                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4256                                 __func__, pwr_info->gear_rx);
4257                         return -EINVAL;
4258                 }
4259                 pwr_info->pwr_rx = SLOW_MODE;
4260         }
4261
4262         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4263                         &pwr_info->gear_tx);
4264         if (!pwr_info->gear_tx) {
4265                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4266                                 &pwr_info->gear_tx);
4267                 if (!pwr_info->gear_tx) {
4268                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4269                                 __func__, pwr_info->gear_tx);
4270                         return -EINVAL;
4271                 }
4272                 pwr_info->pwr_tx = SLOW_MODE;
4273         }
4274
4275         hba->max_pwr_info.is_valid = true;
4276         return 0;
4277 }
4278
4279 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4280                              struct ufs_pa_layer_attr *pwr_mode)
4281 {
4282         int ret;
4283
4284         /* if already configured to the requested pwr_mode */
4285         if (!hba->force_pmc &&
4286             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4287             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4288             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4289             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4290             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4291             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4292             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4293                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4294                 return 0;
4295         }
4296
4297         /*
4298          * Configure attributes for power mode change with below.
4299          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4300          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4301          * - PA_HSSERIES
4302          */
4303         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4304         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4305                         pwr_mode->lane_rx);
4306         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4307                         pwr_mode->pwr_rx == FAST_MODE)
4308                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4309         else
4310                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4311
4312         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4313         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4314                         pwr_mode->lane_tx);
4315         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4316                         pwr_mode->pwr_tx == FAST_MODE)
4317                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4318         else
4319                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4320
4321         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4322             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4323             pwr_mode->pwr_rx == FAST_MODE ||
4324             pwr_mode->pwr_tx == FAST_MODE)
4325                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4326                                                 pwr_mode->hs_rate);
4327
4328         if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4329                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4330                                 DL_FC0ProtectionTimeOutVal_Default);
4331                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4332                                 DL_TC0ReplayTimeOutVal_Default);
4333                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4334                                 DL_AFC0ReqTimeOutVal_Default);
4335                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4336                                 DL_FC1ProtectionTimeOutVal_Default);
4337                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4338                                 DL_TC1ReplayTimeOutVal_Default);
4339                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4340                                 DL_AFC1ReqTimeOutVal_Default);
4341
4342                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4343                                 DL_FC0ProtectionTimeOutVal_Default);
4344                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4345                                 DL_TC0ReplayTimeOutVal_Default);
4346                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4347                                 DL_AFC0ReqTimeOutVal_Default);
4348         }
4349
4350         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4351                         | pwr_mode->pwr_tx);
4352
4353         if (ret) {
4354                 dev_err(hba->dev,
4355                         "%s: power mode change failed %d\n", __func__, ret);
4356         } else {
4357                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4358                                                                 pwr_mode);
4359
4360                 memcpy(&hba->pwr_info, pwr_mode,
4361                         sizeof(struct ufs_pa_layer_attr));
4362         }
4363
4364         return ret;
4365 }
4366
4367 /**
4368  * ufshcd_config_pwr_mode - configure a new power mode
4369  * @hba: per-adapter instance
4370  * @desired_pwr_mode: desired power configuration
4371  */
4372 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4373                 struct ufs_pa_layer_attr *desired_pwr_mode)
4374 {
4375         struct ufs_pa_layer_attr final_params = { 0 };
4376         int ret;
4377
4378         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4379                                         desired_pwr_mode, &final_params);
4380
4381         if (ret)
4382                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4383
4384         ret = ufshcd_change_power_mode(hba, &final_params);
4385
4386         return ret;
4387 }
4388 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4389
4390 /**
4391  * ufshcd_complete_dev_init() - checks device readiness
4392  * @hba: per-adapter instance
4393  *
4394  * Set fDeviceInit flag and poll until device toggles it.
4395  */
4396 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4397 {
4398         int err;
4399         bool flag_res = true;
4400         ktime_t timeout;
4401
4402         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4403                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4404         if (err) {
4405                 dev_err(hba->dev,
4406                         "%s setting fDeviceInit flag failed with error %d\n",
4407                         __func__, err);
4408                 goto out;
4409         }
4410
4411         /* Poll fDeviceInit flag to be cleared */
4412         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4413         do {
4414                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4415                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4416                 if (!flag_res)
4417                         break;
4418                 usleep_range(5000, 10000);
4419         } while (ktime_before(ktime_get(), timeout));
4420
4421         if (err) {
4422                 dev_err(hba->dev,
4423                                 "%s reading fDeviceInit flag failed with error %d\n",
4424                                 __func__, err);
4425         } else if (flag_res) {
4426                 dev_err(hba->dev,
4427                                 "%s fDeviceInit was not cleared by the device\n",
4428                                 __func__);
4429                 err = -EBUSY;
4430         }
4431 out:
4432         return err;
4433 }
4434
4435 /**
4436  * ufshcd_make_hba_operational - Make UFS controller operational
4437  * @hba: per adapter instance
4438  *
4439  * To bring UFS host controller to operational state,
4440  * 1. Enable required interrupts
4441  * 2. Configure interrupt aggregation
4442  * 3. Program UTRL and UTMRL base address
4443  * 4. Configure run-stop-registers
4444  *
4445  * Returns 0 on success, non-zero value on failure
4446  */
4447 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4448 {
4449         int err = 0;
4450         u32 reg;
4451
4452         /* Enable required interrupts */
4453         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4454
4455         /* Configure interrupt aggregation */
4456         if (ufshcd_is_intr_aggr_allowed(hba))
4457                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4458         else
4459                 ufshcd_disable_intr_aggr(hba);
4460
4461         /* Configure UTRL and UTMRL base address registers */
4462         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4463                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4464         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4465                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4466         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4467                         REG_UTP_TASK_REQ_LIST_BASE_L);
4468         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4469                         REG_UTP_TASK_REQ_LIST_BASE_H);
4470
4471         /*
4472          * Make sure base address and interrupt setup are updated before
4473          * enabling the run/stop registers below.
4474          */
4475         wmb();
4476
4477         /*
4478          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4479          */
4480         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4481         if (!(ufshcd_get_lists_status(reg))) {
4482                 ufshcd_enable_run_stop_reg(hba);
4483         } else {
4484                 dev_err(hba->dev,
4485                         "Host controller not ready to process requests");
4486                 err = -EIO;
4487         }
4488
4489         return err;
4490 }
4491 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4492
4493 /**
4494  * ufshcd_hba_stop - Send controller to reset state
4495  * @hba: per adapter instance
4496  */
4497 void ufshcd_hba_stop(struct ufs_hba *hba)
4498 {
4499         unsigned long flags;
4500         int err;
4501
4502         /*
4503          * Obtain the host lock to prevent that the controller is disabled
4504          * while the UFS interrupt handler is active on another CPU.
4505          */
4506         spin_lock_irqsave(hba->host->host_lock, flags);
4507         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4508         spin_unlock_irqrestore(hba->host->host_lock, flags);
4509
4510         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4511                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4512                                         10, 1);
4513         if (err)
4514                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4515 }
4516 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4517
4518 /**
4519  * ufshcd_hba_execute_hce - initialize the controller
4520  * @hba: per adapter instance
4521  *
4522  * The controller resets itself and controller firmware initialization
4523  * sequence kicks off. When controller is ready it will set
4524  * the Host Controller Enable bit to 1.
4525  *
4526  * Returns 0 on success, non-zero value on failure
4527  */
4528 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4529 {
4530         int retry_outer = 3;
4531         int retry_inner;
4532
4533 start:
4534         if (!ufshcd_is_hba_active(hba))
4535                 /* change controller state to "reset state" */
4536                 ufshcd_hba_stop(hba);
4537
4538         /* UniPro link is disabled at this point */
4539         ufshcd_set_link_off(hba);
4540
4541         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4542
4543         /* start controller initialization sequence */
4544         ufshcd_hba_start(hba);
4545
4546         /*
4547          * To initialize a UFS host controller HCE bit must be set to 1.
4548          * During initialization the HCE bit value changes from 1->0->1.
4549          * When the host controller completes initialization sequence
4550          * it sets the value of HCE bit to 1. The same HCE bit is read back
4551          * to check if the controller has completed initialization sequence.
4552          * So without this delay the value HCE = 1, set in the previous
4553          * instruction might be read back.
4554          * This delay can be changed based on the controller.
4555          */
4556         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4557
4558         /* wait for the host controller to complete initialization */
4559         retry_inner = 50;
4560         while (ufshcd_is_hba_active(hba)) {
4561                 if (retry_inner) {
4562                         retry_inner--;
4563                 } else {
4564                         dev_err(hba->dev,
4565                                 "Controller enable failed\n");
4566                         if (retry_outer) {
4567                                 retry_outer--;
4568                                 goto start;
4569                         }
4570                         return -EIO;
4571                 }
4572                 usleep_range(1000, 1100);
4573         }
4574
4575         /* enable UIC related interrupts */
4576         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4577
4578         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4579
4580         return 0;
4581 }
4582
4583 int ufshcd_hba_enable(struct ufs_hba *hba)
4584 {
4585         int ret;
4586
4587         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4588                 ufshcd_set_link_off(hba);
4589                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4590
4591                 /* enable UIC related interrupts */
4592                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4593                 ret = ufshcd_dme_reset(hba);
4594                 if (!ret) {
4595                         ret = ufshcd_dme_enable(hba);
4596                         if (!ret)
4597                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4598                         if (ret)
4599                                 dev_err(hba->dev,
4600                                         "Host controller enable failed with non-hce\n");
4601                 }
4602         } else {
4603                 ret = ufshcd_hba_execute_hce(hba);
4604         }
4605
4606         return ret;
4607 }
4608 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4609
4610 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4611 {
4612         int tx_lanes = 0, i, err = 0;
4613
4614         if (!peer)
4615                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4616                                &tx_lanes);
4617         else
4618                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4619                                     &tx_lanes);
4620         for (i = 0; i < tx_lanes; i++) {
4621                 if (!peer)
4622                         err = ufshcd_dme_set(hba,
4623                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4624                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4625                                         0);
4626                 else
4627                         err = ufshcd_dme_peer_set(hba,
4628                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4629                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4630                                         0);
4631                 if (err) {
4632                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4633                                 __func__, peer, i, err);
4634                         break;
4635                 }
4636         }
4637
4638         return err;
4639 }
4640
4641 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4642 {
4643         return ufshcd_disable_tx_lcc(hba, true);
4644 }
4645
4646 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4647 {
4648         struct ufs_event_hist *e;
4649
4650         if (id >= UFS_EVT_CNT)
4651                 return;
4652
4653         e = &hba->ufs_stats.event[id];
4654         e->val[e->pos] = val;
4655         e->tstamp[e->pos] = ktime_get();
4656         e->cnt += 1;
4657         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4658
4659         ufshcd_vops_event_notify(hba, id, &val);
4660 }
4661 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4662
4663 /**
4664  * ufshcd_link_startup - Initialize unipro link startup
4665  * @hba: per adapter instance
4666  *
4667  * Returns 0 for success, non-zero in case of failure
4668  */
4669 static int ufshcd_link_startup(struct ufs_hba *hba)
4670 {
4671         int ret;
4672         int retries = DME_LINKSTARTUP_RETRIES;
4673         bool link_startup_again = false;
4674
4675         /*
4676          * If UFS device isn't active then we will have to issue link startup
4677          * 2 times to make sure the device state move to active.
4678          */
4679         if (!ufshcd_is_ufs_dev_active(hba))
4680                 link_startup_again = true;
4681
4682 link_startup:
4683         do {
4684                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4685
4686                 ret = ufshcd_dme_link_startup(hba);
4687
4688                 /* check if device is detected by inter-connect layer */
4689                 if (!ret && !ufshcd_is_device_present(hba)) {
4690                         ufshcd_update_evt_hist(hba,
4691                                                UFS_EVT_LINK_STARTUP_FAIL,
4692                                                0);
4693                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4694                         ret = -ENXIO;
4695                         goto out;
4696                 }
4697
4698                 /*
4699                  * DME link lost indication is only received when link is up,
4700                  * but we can't be sure if the link is up until link startup
4701                  * succeeds. So reset the local Uni-Pro and try again.
4702                  */
4703                 if (ret && ufshcd_hba_enable(hba)) {
4704                         ufshcd_update_evt_hist(hba,
4705                                                UFS_EVT_LINK_STARTUP_FAIL,
4706                                                (u32)ret);
4707                         goto out;
4708                 }
4709         } while (ret && retries--);
4710
4711         if (ret) {
4712                 /* failed to get the link up... retire */
4713                 ufshcd_update_evt_hist(hba,
4714                                        UFS_EVT_LINK_STARTUP_FAIL,
4715                                        (u32)ret);
4716                 goto out;
4717         }
4718
4719         if (link_startup_again) {
4720                 link_startup_again = false;
4721                 retries = DME_LINKSTARTUP_RETRIES;
4722                 goto link_startup;
4723         }
4724
4725         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4726         ufshcd_init_pwr_info(hba);
4727         ufshcd_print_pwr_info(hba);
4728
4729         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4730                 ret = ufshcd_disable_device_tx_lcc(hba);
4731                 if (ret)
4732                         goto out;
4733         }
4734
4735         /* Include any host controller configuration via UIC commands */
4736         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4737         if (ret)
4738                 goto out;
4739
4740         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4741         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4742         ret = ufshcd_make_hba_operational(hba);
4743 out:
4744         if (ret) {
4745                 dev_err(hba->dev, "link startup failed %d\n", ret);
4746                 ufshcd_print_host_state(hba);
4747                 ufshcd_print_pwr_info(hba);
4748                 ufshcd_print_evt_hist(hba);
4749         }
4750         return ret;
4751 }
4752
4753 /**
4754  * ufshcd_verify_dev_init() - Verify device initialization
4755  * @hba: per-adapter instance
4756  *
4757  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4758  * device Transport Protocol (UTP) layer is ready after a reset.
4759  * If the UTP layer at the device side is not initialized, it may
4760  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4761  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4762  */
4763 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4764 {
4765         int err = 0;
4766         int retries;
4767
4768         ufshcd_hold(hba, false);
4769         mutex_lock(&hba->dev_cmd.lock);
4770         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4771                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4772                                           hba->nop_out_timeout);
4773
4774                 if (!err || err == -ETIMEDOUT)
4775                         break;
4776
4777                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4778         }
4779         mutex_unlock(&hba->dev_cmd.lock);
4780         ufshcd_release(hba);
4781
4782         if (err)
4783                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4784         return err;
4785 }
4786
4787 /**
4788  * ufshcd_set_queue_depth - set lun queue depth
4789  * @sdev: pointer to SCSI device
4790  *
4791  * Read bLUQueueDepth value and activate scsi tagged command
4792  * queueing. For WLUN, queue depth is set to 1. For best-effort
4793  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4794  * value that host can queue.
4795  */
4796 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4797 {
4798         int ret = 0;
4799         u8 lun_qdepth;
4800         struct ufs_hba *hba;
4801
4802         hba = shost_priv(sdev->host);
4803
4804         lun_qdepth = hba->nutrs;
4805         ret = ufshcd_read_unit_desc_param(hba,
4806                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4807                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4808                                           &lun_qdepth,
4809                                           sizeof(lun_qdepth));
4810
4811         /* Some WLUN doesn't support unit descriptor */
4812         if (ret == -EOPNOTSUPP)
4813                 lun_qdepth = 1;
4814         else if (!lun_qdepth)
4815                 /* eventually, we can figure out the real queue depth */
4816                 lun_qdepth = hba->nutrs;
4817         else
4818                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4819
4820         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4821                         __func__, lun_qdepth);
4822         scsi_change_queue_depth(sdev, lun_qdepth);
4823 }
4824
4825 /*
4826  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4827  * @hba: per-adapter instance
4828  * @lun: UFS device lun id
4829  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4830  *
4831  * Returns 0 in case of success and b_lu_write_protect status would be returned
4832  * @b_lu_write_protect parameter.
4833  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4834  * Returns -EINVAL in case of invalid parameters passed to this function.
4835  */
4836 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4837                             u8 lun,
4838                             u8 *b_lu_write_protect)
4839 {
4840         int ret;
4841
4842         if (!b_lu_write_protect)
4843                 ret = -EINVAL;
4844         /*
4845          * According to UFS device spec, RPMB LU can't be write
4846          * protected so skip reading bLUWriteProtect parameter for
4847          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4848          */
4849         else if (lun >= hba->dev_info.max_lu_supported)
4850                 ret = -ENOTSUPP;
4851         else
4852                 ret = ufshcd_read_unit_desc_param(hba,
4853                                           lun,
4854                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4855                                           b_lu_write_protect,
4856                                           sizeof(*b_lu_write_protect));
4857         return ret;
4858 }
4859
4860 /**
4861  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4862  * status
4863  * @hba: per-adapter instance
4864  * @sdev: pointer to SCSI device
4865  *
4866  */
4867 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4868                                                     struct scsi_device *sdev)
4869 {
4870         if (hba->dev_info.f_power_on_wp_en &&
4871             !hba->dev_info.is_lu_power_on_wp) {
4872                 u8 b_lu_write_protect;
4873
4874                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4875                                       &b_lu_write_protect) &&
4876                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4877                         hba->dev_info.is_lu_power_on_wp = true;
4878         }
4879 }
4880
4881 /**
4882  * ufshcd_setup_links - associate link b/w device wlun and other luns
4883  * @sdev: pointer to SCSI device
4884  * @hba: pointer to ufs hba
4885  */
4886 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4887 {
4888         struct device_link *link;
4889
4890         /*
4891          * Device wlun is the supplier & rest of the luns are consumers.
4892          * This ensures that device wlun suspends after all other luns.
4893          */
4894         if (hba->sdev_ufs_device) {
4895                 link = device_link_add(&sdev->sdev_gendev,
4896                                        &hba->sdev_ufs_device->sdev_gendev,
4897                                        DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4898                 if (!link) {
4899                         dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
4900                                 dev_name(&hba->sdev_ufs_device->sdev_gendev));
4901                         return;
4902                 }
4903                 hba->luns_avail--;
4904                 /* Ignore REPORT_LUN wlun probing */
4905                 if (hba->luns_avail == 1) {
4906                         ufshcd_rpm_put(hba);
4907                         return;
4908                 }
4909         } else {
4910                 /*
4911                  * Device wlun is probed. The assumption is that WLUNs are
4912                  * scanned before other LUNs.
4913                  */
4914                 hba->luns_avail--;
4915         }
4916 }
4917
4918 /**
4919  * ufshcd_slave_alloc - handle initial SCSI device configurations
4920  * @sdev: pointer to SCSI device
4921  *
4922  * Returns success
4923  */
4924 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4925 {
4926         struct ufs_hba *hba;
4927
4928         hba = shost_priv(sdev->host);
4929
4930         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4931         sdev->use_10_for_ms = 1;
4932
4933         /* DBD field should be set to 1 in mode sense(10) */
4934         sdev->set_dbd_for_ms = 1;
4935
4936         /* allow SCSI layer to restart the device in case of errors */
4937         sdev->allow_restart = 1;
4938
4939         /* REPORT SUPPORTED OPERATION CODES is not supported */
4940         sdev->no_report_opcodes = 1;
4941
4942         /* WRITE_SAME command is not supported */
4943         sdev->no_write_same = 1;
4944
4945         ufshcd_set_queue_depth(sdev);
4946
4947         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4948
4949         ufshcd_setup_links(hba, sdev);
4950
4951         return 0;
4952 }
4953
4954 /**
4955  * ufshcd_change_queue_depth - change queue depth
4956  * @sdev: pointer to SCSI device
4957  * @depth: required depth to set
4958  *
4959  * Change queue depth and make sure the max. limits are not crossed.
4960  */
4961 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4962 {
4963         struct ufs_hba *hba = shost_priv(sdev->host);
4964
4965         if (depth > hba->nutrs)
4966                 depth = hba->nutrs;
4967         return scsi_change_queue_depth(sdev, depth);
4968 }
4969
4970 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
4971 {
4972         /* skip well-known LU */
4973         if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4974             !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4975                 return;
4976
4977         ufshpb_destroy_lu(hba, sdev);
4978 }
4979
4980 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
4981 {
4982         /* skip well-known LU */
4983         if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4984             !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4985                 return;
4986
4987         ufshpb_init_hpb_lu(hba, sdev);
4988 }
4989
4990 /**
4991  * ufshcd_slave_configure - adjust SCSI device configurations
4992  * @sdev: pointer to SCSI device
4993  */
4994 static int ufshcd_slave_configure(struct scsi_device *sdev)
4995 {
4996         struct ufs_hba *hba = shost_priv(sdev->host);
4997         struct request_queue *q = sdev->request_queue;
4998
4999         ufshcd_hpb_configure(hba, sdev);
5000
5001         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5002         if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
5003                 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
5004         /*
5005          * Block runtime-pm until all consumers are added.
5006          * Refer ufshcd_setup_links().
5007          */
5008         if (is_device_wlun(sdev))
5009                 pm_runtime_get_noresume(&sdev->sdev_gendev);
5010         else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5011                 sdev->rpm_autosuspend = 1;
5012
5013         ufshcd_crypto_register(hba, q);
5014
5015         return 0;
5016 }
5017
5018 /**
5019  * ufshcd_slave_destroy - remove SCSI device configurations
5020  * @sdev: pointer to SCSI device
5021  */
5022 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5023 {
5024         struct ufs_hba *hba;
5025         unsigned long flags;
5026
5027         hba = shost_priv(sdev->host);
5028
5029         ufshcd_hpb_destroy(hba, sdev);
5030
5031         /* Drop the reference as it won't be needed anymore */
5032         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5033                 spin_lock_irqsave(hba->host->host_lock, flags);
5034                 hba->sdev_ufs_device = NULL;
5035                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5036         } else if (hba->sdev_ufs_device) {
5037                 struct device *supplier = NULL;
5038
5039                 /* Ensure UFS Device WLUN exists and does not disappear */
5040                 spin_lock_irqsave(hba->host->host_lock, flags);
5041                 if (hba->sdev_ufs_device) {
5042                         supplier = &hba->sdev_ufs_device->sdev_gendev;
5043                         get_device(supplier);
5044                 }
5045                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5046
5047                 if (supplier) {
5048                         /*
5049                          * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5050                          * device will not have been registered but can still
5051                          * have a device link holding a reference to the device.
5052                          */
5053                         device_link_remove(&sdev->sdev_gendev, supplier);
5054                         put_device(supplier);
5055                 }
5056         }
5057 }
5058
5059 /**
5060  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5061  * @lrbp: pointer to local reference block of completed command
5062  * @scsi_status: SCSI command status
5063  *
5064  * Returns value base on SCSI command status
5065  */
5066 static inline int
5067 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5068 {
5069         int result = 0;
5070
5071         switch (scsi_status) {
5072         case SAM_STAT_CHECK_CONDITION:
5073                 ufshcd_copy_sense_data(lrbp);
5074                 fallthrough;
5075         case SAM_STAT_GOOD:
5076                 result |= DID_OK << 16 | scsi_status;
5077                 break;
5078         case SAM_STAT_TASK_SET_FULL:
5079         case SAM_STAT_BUSY:
5080         case SAM_STAT_TASK_ABORTED:
5081                 ufshcd_copy_sense_data(lrbp);
5082                 result |= scsi_status;
5083                 break;
5084         default:
5085                 result |= DID_ERROR << 16;
5086                 break;
5087         } /* end of switch */
5088
5089         return result;
5090 }
5091
5092 /**
5093  * ufshcd_transfer_rsp_status - Get overall status of the response
5094  * @hba: per adapter instance
5095  * @lrbp: pointer to local reference block of completed command
5096  *
5097  * Returns result of the command to notify SCSI midlayer
5098  */
5099 static inline int
5100 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5101 {
5102         int result = 0;
5103         int scsi_status;
5104         enum utp_ocs ocs;
5105
5106         /* overall command status of utrd */
5107         ocs = ufshcd_get_tr_ocs(lrbp);
5108
5109         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5110                 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5111                                         MASK_RSP_UPIU_RESULT)
5112                         ocs = OCS_SUCCESS;
5113         }
5114
5115         switch (ocs) {
5116         case OCS_SUCCESS:
5117                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5118                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5119                 switch (result) {
5120                 case UPIU_TRANSACTION_RESPONSE:
5121                         /*
5122                          * get the response UPIU result to extract
5123                          * the SCSI command status
5124                          */
5125                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5126
5127                         /*
5128                          * get the result based on SCSI status response
5129                          * to notify the SCSI midlayer of the command status
5130                          */
5131                         scsi_status = result & MASK_SCSI_STATUS;
5132                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5133
5134                         /*
5135                          * Currently we are only supporting BKOPs exception
5136                          * events hence we can ignore BKOPs exception event
5137                          * during power management callbacks. BKOPs exception
5138                          * event is not expected to be raised in runtime suspend
5139                          * callback as it allows the urgent bkops.
5140                          * During system suspend, we are anyway forcefully
5141                          * disabling the bkops and if urgent bkops is needed
5142                          * it will be enabled on system resume. Long term
5143                          * solution could be to abort the system suspend if
5144                          * UFS device needs urgent BKOPs.
5145                          */
5146                         if (!hba->pm_op_in_progress &&
5147                             !ufshcd_eh_in_progress(hba) &&
5148                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5149                                 /* Flushed in suspend */
5150                                 schedule_work(&hba->eeh_work);
5151
5152                         if (scsi_status == SAM_STAT_GOOD)
5153                                 ufshpb_rsp_upiu(hba, lrbp);
5154                         break;
5155                 case UPIU_TRANSACTION_REJECT_UPIU:
5156                         /* TODO: handle Reject UPIU Response */
5157                         result = DID_ERROR << 16;
5158                         dev_err(hba->dev,
5159                                 "Reject UPIU not fully implemented\n");
5160                         break;
5161                 default:
5162                         dev_err(hba->dev,
5163                                 "Unexpected request response code = %x\n",
5164                                 result);
5165                         result = DID_ERROR << 16;
5166                         break;
5167                 }
5168                 break;
5169         case OCS_ABORTED:
5170                 result |= DID_ABORT << 16;
5171                 break;
5172         case OCS_INVALID_COMMAND_STATUS:
5173                 result |= DID_REQUEUE << 16;
5174                 break;
5175         case OCS_INVALID_CMD_TABLE_ATTR:
5176         case OCS_INVALID_PRDT_ATTR:
5177         case OCS_MISMATCH_DATA_BUF_SIZE:
5178         case OCS_MISMATCH_RESP_UPIU_SIZE:
5179         case OCS_PEER_COMM_FAILURE:
5180         case OCS_FATAL_ERROR:
5181         case OCS_DEVICE_FATAL_ERROR:
5182         case OCS_INVALID_CRYPTO_CONFIG:
5183         case OCS_GENERAL_CRYPTO_ERROR:
5184         default:
5185                 result |= DID_ERROR << 16;
5186                 dev_err(hba->dev,
5187                                 "OCS error from controller = %x for tag %d\n",
5188                                 ocs, lrbp->task_tag);
5189                 ufshcd_print_evt_hist(hba);
5190                 ufshcd_print_host_state(hba);
5191                 break;
5192         } /* end of switch */
5193
5194         if ((host_byte(result) != DID_OK) &&
5195             (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5196                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
5197         return result;
5198 }
5199
5200 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5201                                          u32 intr_mask)
5202 {
5203         if (!ufshcd_is_auto_hibern8_supported(hba) ||
5204             !ufshcd_is_auto_hibern8_enabled(hba))
5205                 return false;
5206
5207         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5208                 return false;
5209
5210         if (hba->active_uic_cmd &&
5211             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5212             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5213                 return false;
5214
5215         return true;
5216 }
5217
5218 /**
5219  * ufshcd_uic_cmd_compl - handle completion of uic command
5220  * @hba: per adapter instance
5221  * @intr_status: interrupt status generated by the controller
5222  *
5223  * Returns
5224  *  IRQ_HANDLED - If interrupt is valid
5225  *  IRQ_NONE    - If invalid interrupt
5226  */
5227 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5228 {
5229         irqreturn_t retval = IRQ_NONE;
5230
5231         spin_lock(hba->host->host_lock);
5232         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5233                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5234
5235         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5236                 hba->active_uic_cmd->argument2 |=
5237                         ufshcd_get_uic_cmd_result(hba);
5238                 hba->active_uic_cmd->argument3 =
5239                         ufshcd_get_dme_attr_val(hba);
5240                 if (!hba->uic_async_done)
5241                         hba->active_uic_cmd->cmd_active = 0;
5242                 complete(&hba->active_uic_cmd->done);
5243                 retval = IRQ_HANDLED;
5244         }
5245
5246         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5247                 hba->active_uic_cmd->cmd_active = 0;
5248                 complete(hba->uic_async_done);
5249                 retval = IRQ_HANDLED;
5250         }
5251
5252         if (retval == IRQ_HANDLED)
5253                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5254                                              UFS_CMD_COMP);
5255         spin_unlock(hba->host->host_lock);
5256         return retval;
5257 }
5258
5259 /**
5260  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5261  * @hba: per adapter instance
5262  * @completed_reqs: bitmask that indicates which requests to complete
5263  */
5264 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5265                                         unsigned long completed_reqs)
5266 {
5267         struct ufshcd_lrb *lrbp;
5268         struct scsi_cmnd *cmd;
5269         int result;
5270         int index;
5271         bool update_scaling = false;
5272
5273         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5274                 lrbp = &hba->lrb[index];
5275                 lrbp->compl_time_stamp = ktime_get();
5276                 cmd = lrbp->cmd;
5277                 if (cmd) {
5278                         if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5279                                 ufshcd_update_monitor(hba, lrbp);
5280                         ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
5281                         result = ufshcd_transfer_rsp_status(hba, lrbp);
5282                         scsi_dma_unmap(cmd);
5283                         cmd->result = result;
5284                         /* Mark completed command as NULL in LRB */
5285                         lrbp->cmd = NULL;
5286                         /* Do not touch lrbp after scsi done */
5287                         scsi_done(cmd);
5288                         ufshcd_release(hba);
5289                         update_scaling = true;
5290                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5291                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5292                         if (hba->dev_cmd.complete) {
5293                                 ufshcd_add_command_trace(hba, index,
5294                                                          UFS_DEV_COMP);
5295                                 complete(hba->dev_cmd.complete);
5296                                 update_scaling = true;
5297                         }
5298                 }
5299                 if (update_scaling)
5300                         ufshcd_clk_scaling_update_busy(hba);
5301         }
5302 }
5303
5304 /**
5305  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5306  * @hba: per adapter instance
5307  *
5308  * Returns
5309  *  IRQ_HANDLED - If interrupt is valid
5310  *  IRQ_NONE    - If invalid interrupt
5311  */
5312 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5313 {
5314         unsigned long completed_reqs, flags;
5315         u32 tr_doorbell;
5316
5317         /* Resetting interrupt aggregation counters first and reading the
5318          * DOOR_BELL afterward allows us to handle all the completed requests.
5319          * In order to prevent other interrupts starvation the DB is read once
5320          * after reset. The down side of this solution is the possibility of
5321          * false interrupt if device completes another request after resetting
5322          * aggregation and before reading the DB.
5323          */
5324         if (ufshcd_is_intr_aggr_allowed(hba) &&
5325             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5326                 ufshcd_reset_intr_aggr(hba);
5327
5328         if (ufs_fail_completion())
5329                 return IRQ_HANDLED;
5330
5331         spin_lock_irqsave(&hba->outstanding_lock, flags);
5332         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5333         completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5334         WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5335                   "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5336                   hba->outstanding_reqs);
5337         hba->outstanding_reqs &= ~completed_reqs;
5338         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5339
5340         if (completed_reqs) {
5341                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5342                 return IRQ_HANDLED;
5343         } else {
5344                 return IRQ_NONE;
5345         }
5346 }
5347
5348 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5349 {
5350         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5351                                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5352                                        &ee_ctrl_mask);
5353 }
5354
5355 int ufshcd_write_ee_control(struct ufs_hba *hba)
5356 {
5357         int err;
5358
5359         mutex_lock(&hba->ee_ctrl_mutex);
5360         err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5361         mutex_unlock(&hba->ee_ctrl_mutex);
5362         if (err)
5363                 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5364                         __func__, err);
5365         return err;
5366 }
5367
5368 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
5369                              u16 set, u16 clr)
5370 {
5371         u16 new_mask, ee_ctrl_mask;
5372         int err = 0;
5373
5374         mutex_lock(&hba->ee_ctrl_mutex);
5375         new_mask = (*mask & ~clr) | set;
5376         ee_ctrl_mask = new_mask | *other_mask;
5377         if (ee_ctrl_mask != hba->ee_ctrl_mask)
5378                 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5379         /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5380         if (!err) {
5381                 hba->ee_ctrl_mask = ee_ctrl_mask;
5382                 *mask = new_mask;
5383         }
5384         mutex_unlock(&hba->ee_ctrl_mutex);
5385         return err;
5386 }
5387
5388 /**
5389  * ufshcd_disable_ee - disable exception event
5390  * @hba: per-adapter instance
5391  * @mask: exception event to disable
5392  *
5393  * Disables exception event in the device so that the EVENT_ALERT
5394  * bit is not set.
5395  *
5396  * Returns zero on success, non-zero error value on failure.
5397  */
5398 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5399 {
5400         return ufshcd_update_ee_drv_mask(hba, 0, mask);
5401 }
5402
5403 /**
5404  * ufshcd_enable_ee - enable exception event
5405  * @hba: per-adapter instance
5406  * @mask: exception event to enable
5407  *
5408  * Enable corresponding exception event in the device to allow
5409  * device to alert host in critical scenarios.
5410  *
5411  * Returns zero on success, non-zero error value on failure.
5412  */
5413 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5414 {
5415         return ufshcd_update_ee_drv_mask(hba, mask, 0);
5416 }
5417
5418 /**
5419  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5420  * @hba: per-adapter instance
5421  *
5422  * Allow device to manage background operations on its own. Enabling
5423  * this might lead to inconsistent latencies during normal data transfers
5424  * as the device is allowed to manage its own way of handling background
5425  * operations.
5426  *
5427  * Returns zero on success, non-zero on failure.
5428  */
5429 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5430 {
5431         int err = 0;
5432
5433         if (hba->auto_bkops_enabled)
5434                 goto out;
5435
5436         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5437                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5438         if (err) {
5439                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5440                                 __func__, err);
5441                 goto out;
5442         }
5443
5444         hba->auto_bkops_enabled = true;
5445         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5446
5447         /* No need of URGENT_BKOPS exception from the device */
5448         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5449         if (err)
5450                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5451                                 __func__, err);
5452 out:
5453         return err;
5454 }
5455
5456 /**
5457  * ufshcd_disable_auto_bkops - block device in doing background operations
5458  * @hba: per-adapter instance
5459  *
5460  * Disabling background operations improves command response latency but
5461  * has drawback of device moving into critical state where the device is
5462  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5463  * host is idle so that BKOPS are managed effectively without any negative
5464  * impacts.
5465  *
5466  * Returns zero on success, non-zero on failure.
5467  */
5468 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5469 {
5470         int err = 0;
5471
5472         if (!hba->auto_bkops_enabled)
5473                 goto out;
5474
5475         /*
5476          * If host assisted BKOPs is to be enabled, make sure
5477          * urgent bkops exception is allowed.
5478          */
5479         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5480         if (err) {
5481                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5482                                 __func__, err);
5483                 goto out;
5484         }
5485
5486         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5487                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5488         if (err) {
5489                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5490                                 __func__, err);
5491                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5492                 goto out;
5493         }
5494
5495         hba->auto_bkops_enabled = false;
5496         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5497         hba->is_urgent_bkops_lvl_checked = false;
5498 out:
5499         return err;
5500 }
5501
5502 /**
5503  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5504  * @hba: per adapter instance
5505  *
5506  * After a device reset the device may toggle the BKOPS_EN flag
5507  * to default value. The s/w tracking variables should be updated
5508  * as well. This function would change the auto-bkops state based on
5509  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5510  */
5511 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5512 {
5513         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5514                 hba->auto_bkops_enabled = false;
5515                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5516                 ufshcd_enable_auto_bkops(hba);
5517         } else {
5518                 hba->auto_bkops_enabled = true;
5519                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5520                 ufshcd_disable_auto_bkops(hba);
5521         }
5522         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5523         hba->is_urgent_bkops_lvl_checked = false;
5524 }
5525
5526 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5527 {
5528         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5529                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5530 }
5531
5532 /**
5533  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5534  * @hba: per-adapter instance
5535  * @status: bkops_status value
5536  *
5537  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5538  * flag in the device to permit background operations if the device
5539  * bkops_status is greater than or equal to "status" argument passed to
5540  * this function, disable otherwise.
5541  *
5542  * Returns 0 for success, non-zero in case of failure.
5543  *
5544  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5545  * to know whether auto bkops is enabled or disabled after this function
5546  * returns control to it.
5547  */
5548 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5549                              enum bkops_status status)
5550 {
5551         int err;
5552         u32 curr_status = 0;
5553
5554         err = ufshcd_get_bkops_status(hba, &curr_status);
5555         if (err) {
5556                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5557                                 __func__, err);
5558                 goto out;
5559         } else if (curr_status > BKOPS_STATUS_MAX) {
5560                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5561                                 __func__, curr_status);
5562                 err = -EINVAL;
5563                 goto out;
5564         }
5565
5566         if (curr_status >= status)
5567                 err = ufshcd_enable_auto_bkops(hba);
5568         else
5569                 err = ufshcd_disable_auto_bkops(hba);
5570 out:
5571         return err;
5572 }
5573
5574 /**
5575  * ufshcd_urgent_bkops - handle urgent bkops exception event
5576  * @hba: per-adapter instance
5577  *
5578  * Enable fBackgroundOpsEn flag in the device to permit background
5579  * operations.
5580  *
5581  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5582  * and negative error value for any other failure.
5583  */
5584 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5585 {
5586         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5587 }
5588
5589 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5590 {
5591         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5592                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5593 }
5594
5595 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5596 {
5597         int err;
5598         u32 curr_status = 0;
5599
5600         if (hba->is_urgent_bkops_lvl_checked)
5601                 goto enable_auto_bkops;
5602
5603         err = ufshcd_get_bkops_status(hba, &curr_status);
5604         if (err) {
5605                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5606                                 __func__, err);
5607                 goto out;
5608         }
5609
5610         /*
5611          * We are seeing that some devices are raising the urgent bkops
5612          * exception events even when BKOPS status doesn't indicate performace
5613          * impacted or critical. Handle these device by determining their urgent
5614          * bkops status at runtime.
5615          */
5616         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5617                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5618                                 __func__, curr_status);
5619                 /* update the current status as the urgent bkops level */
5620                 hba->urgent_bkops_lvl = curr_status;
5621                 hba->is_urgent_bkops_lvl_checked = true;
5622         }
5623
5624 enable_auto_bkops:
5625         err = ufshcd_enable_auto_bkops(hba);
5626 out:
5627         if (err < 0)
5628                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5629                                 __func__, err);
5630 }
5631
5632 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5633 {
5634         u32 value;
5635
5636         if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5637                                 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5638                 return;
5639
5640         dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5641
5642         ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5643
5644         /*
5645          * A placeholder for the platform vendors to add whatever additional
5646          * steps required
5647          */
5648 }
5649
5650 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5651 {
5652         u8 index;
5653         enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5654                                    UPIU_QUERY_OPCODE_CLEAR_FLAG;
5655
5656         index = ufshcd_wb_get_query_index(hba);
5657         return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5658 }
5659
5660 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5661 {
5662         int ret;
5663
5664         if (!ufshcd_is_wb_allowed(hba))
5665                 return 0;
5666
5667         if (!(enable ^ hba->dev_info.wb_enabled))
5668                 return 0;
5669
5670         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5671         if (ret) {
5672                 dev_err(hba->dev, "%s Write Booster %s failed %d\n",
5673                         __func__, enable ? "enable" : "disable", ret);
5674                 return ret;
5675         }
5676
5677         hba->dev_info.wb_enabled = enable;
5678         dev_info(hba->dev, "%s Write Booster %s\n",
5679                         __func__, enable ? "enabled" : "disabled");
5680
5681         return ret;
5682 }
5683
5684 static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5685 {
5686         int ret;
5687
5688         ret = __ufshcd_wb_toggle(hba, set,
5689                         QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5690         if (ret) {
5691                 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
5692                         __func__, set ? "enable" : "disable", ret);
5693                 return;
5694         }
5695         dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
5696                         __func__, set ? "enabled" : "disabled");
5697 }
5698
5699 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5700 {
5701         int ret;
5702
5703         if (!ufshcd_is_wb_allowed(hba) ||
5704             hba->dev_info.wb_buf_flush_enabled == enable)
5705                 return;
5706
5707         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
5708         if (ret) {
5709                 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
5710                         enable ? "enable" : "disable", ret);
5711                 return;
5712         }
5713
5714         hba->dev_info.wb_buf_flush_enabled = enable;
5715
5716         dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
5717                         __func__, enable ? "enabled" : "disabled");
5718 }
5719
5720 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5721                                                 u32 avail_buf)
5722 {
5723         u32 cur_buf;
5724         int ret;
5725         u8 index;
5726
5727         index = ufshcd_wb_get_query_index(hba);
5728         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5729                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5730                                               index, 0, &cur_buf);
5731         if (ret) {
5732                 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5733                         __func__, ret);
5734                 return false;
5735         }
5736
5737         if (!cur_buf) {
5738                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5739                          cur_buf);
5740                 return false;
5741         }
5742         /* Let it continue to flush when available buffer exceeds threshold */
5743         if (avail_buf < hba->vps->wb_flush_threshold)
5744                 return true;
5745
5746         return false;
5747 }
5748
5749 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5750 {
5751         int ret;
5752         u32 avail_buf;
5753         u8 index;
5754
5755         if (!ufshcd_is_wb_allowed(hba))
5756                 return false;
5757         /*
5758          * The ufs device needs the vcc to be ON to flush.
5759          * With user-space reduction enabled, it's enough to enable flush
5760          * by checking only the available buffer. The threshold
5761          * defined here is > 90% full.
5762          * With user-space preserved enabled, the current-buffer
5763          * should be checked too because the wb buffer size can reduce
5764          * when disk tends to be full. This info is provided by current
5765          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5766          * keeping vcc on when current buffer is empty.
5767          */
5768         index = ufshcd_wb_get_query_index(hba);
5769         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5770                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5771                                       index, 0, &avail_buf);
5772         if (ret) {
5773                 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5774                          __func__, ret);
5775                 return false;
5776         }
5777
5778         if (!hba->dev_info.b_presrv_uspc_en) {
5779                 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5780                         return true;
5781                 return false;
5782         }
5783
5784         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5785 }
5786
5787 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5788 {
5789         struct ufs_hba *hba = container_of(to_delayed_work(work),
5790                                            struct ufs_hba,
5791                                            rpm_dev_flush_recheck_work);
5792         /*
5793          * To prevent unnecessary VCC power drain after device finishes
5794          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5795          * after a certain delay to recheck the threshold by next runtime
5796          * suspend.
5797          */
5798         ufshcd_rpm_get_sync(hba);
5799         ufshcd_rpm_put_sync(hba);
5800 }
5801
5802 /**
5803  * ufshcd_exception_event_handler - handle exceptions raised by device
5804  * @work: pointer to work data
5805  *
5806  * Read bExceptionEventStatus attribute from the device and handle the
5807  * exception event accordingly.
5808  */
5809 static void ufshcd_exception_event_handler(struct work_struct *work)
5810 {
5811         struct ufs_hba *hba;
5812         int err;
5813         u32 status = 0;
5814         hba = container_of(work, struct ufs_hba, eeh_work);
5815
5816         ufshcd_scsi_block_requests(hba);
5817         err = ufshcd_get_ee_status(hba, &status);
5818         if (err) {
5819                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5820                                 __func__, err);
5821                 goto out;
5822         }
5823
5824         trace_ufshcd_exception_event(dev_name(hba->dev), status);
5825
5826         if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
5827                 ufshcd_bkops_exception_event_handler(hba);
5828
5829         if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
5830                 ufshcd_temp_exception_event_handler(hba, status);
5831
5832         ufs_debugfs_exception_event(hba, status);
5833 out:
5834         ufshcd_scsi_unblock_requests(hba);
5835 }
5836
5837 /* Complete requests that have door-bell cleared */
5838 static void ufshcd_complete_requests(struct ufs_hba *hba)
5839 {
5840         ufshcd_transfer_req_compl(hba);
5841         ufshcd_tmc_handler(hba);
5842 }
5843
5844 /**
5845  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5846  *                              to recover from the DL NAC errors or not.
5847  * @hba: per-adapter instance
5848  *
5849  * Returns true if error handling is required, false otherwise
5850  */
5851 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5852 {
5853         unsigned long flags;
5854         bool err_handling = true;
5855
5856         spin_lock_irqsave(hba->host->host_lock, flags);
5857         /*
5858          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5859          * device fatal error and/or DL NAC & REPLAY timeout errors.
5860          */
5861         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5862                 goto out;
5863
5864         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5865             ((hba->saved_err & UIC_ERROR) &&
5866              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5867                 goto out;
5868
5869         if ((hba->saved_err & UIC_ERROR) &&
5870             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5871                 int err;
5872                 /*
5873                  * wait for 50ms to see if we can get any other errors or not.
5874                  */
5875                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5876                 msleep(50);
5877                 spin_lock_irqsave(hba->host->host_lock, flags);
5878
5879                 /*
5880                  * now check if we have got any other severe errors other than
5881                  * DL NAC error?
5882                  */
5883                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5884                     ((hba->saved_err & UIC_ERROR) &&
5885                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5886                         goto out;
5887
5888                 /*
5889                  * As DL NAC is the only error received so far, send out NOP
5890                  * command to confirm if link is still active or not.
5891                  *   - If we don't get any response then do error recovery.
5892                  *   - If we get response then clear the DL NAC error bit.
5893                  */
5894
5895                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5896                 err = ufshcd_verify_dev_init(hba);
5897                 spin_lock_irqsave(hba->host->host_lock, flags);
5898
5899                 if (err)
5900                         goto out;
5901
5902                 /* Link seems to be alive hence ignore the DL NAC errors */
5903                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5904                         hba->saved_err &= ~UIC_ERROR;
5905                 /* clear NAC error */
5906                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5907                 if (!hba->saved_uic_err)
5908                         err_handling = false;
5909         }
5910 out:
5911         spin_unlock_irqrestore(hba->host->host_lock, flags);
5912         return err_handling;
5913 }
5914
5915 /* host lock must be held before calling this func */
5916 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5917 {
5918         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5919                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5920 }
5921
5922 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5923 {
5924         lockdep_assert_held(hba->host->host_lock);
5925
5926         /* handle fatal errors only when link is not in error state */
5927         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5928                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5929                     ufshcd_is_saved_err_fatal(hba))
5930                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5931                 else
5932                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5933                 queue_work(hba->eh_wq, &hba->eh_work);
5934         }
5935 }
5936
5937 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
5938 {
5939         down_write(&hba->clk_scaling_lock);
5940         hba->clk_scaling.is_allowed = allow;
5941         up_write(&hba->clk_scaling_lock);
5942 }
5943
5944 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
5945 {
5946         if (suspend) {
5947                 if (hba->clk_scaling.is_enabled)
5948                         ufshcd_suspend_clkscaling(hba);
5949                 ufshcd_clk_scaling_allow(hba, false);
5950         } else {
5951                 ufshcd_clk_scaling_allow(hba, true);
5952                 if (hba->clk_scaling.is_enabled)
5953                         ufshcd_resume_clkscaling(hba);
5954         }
5955 }
5956
5957 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5958 {
5959         ufshcd_rpm_get_sync(hba);
5960         if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
5961             hba->is_sys_suspended) {
5962                 enum ufs_pm_op pm_op;
5963
5964                 /*
5965                  * Don't assume anything of resume, if
5966                  * resume fails, irq and clocks can be OFF, and powers
5967                  * can be OFF or in LPM.
5968                  */
5969                 ufshcd_setup_hba_vreg(hba, true);
5970                 ufshcd_enable_irq(hba);
5971                 ufshcd_setup_vreg(hba, true);
5972                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5973                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5974                 ufshcd_hold(hba, false);
5975                 if (!ufshcd_is_clkgating_allowed(hba))
5976                         ufshcd_setup_clocks(hba, true);
5977                 ufshcd_release(hba);
5978                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5979                 ufshcd_vops_resume(hba, pm_op);
5980         } else {
5981                 ufshcd_hold(hba, false);
5982                 if (ufshcd_is_clkscaling_supported(hba) &&
5983                     hba->clk_scaling.is_enabled)
5984                         ufshcd_suspend_clkscaling(hba);
5985                 ufshcd_clk_scaling_allow(hba, false);
5986         }
5987         ufshcd_scsi_block_requests(hba);
5988         /* Drain ufshcd_queuecommand() */
5989         down_write(&hba->clk_scaling_lock);
5990         up_write(&hba->clk_scaling_lock);
5991         cancel_work_sync(&hba->eeh_work);
5992 }
5993
5994 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5995 {
5996         ufshcd_scsi_unblock_requests(hba);
5997         ufshcd_release(hba);
5998         if (ufshcd_is_clkscaling_supported(hba))
5999                 ufshcd_clk_scaling_suspend(hba, false);
6000         ufshcd_rpm_put(hba);
6001 }
6002
6003 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6004 {
6005         return (!hba->is_powered || hba->shutting_down ||
6006                 !hba->sdev_ufs_device ||
6007                 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6008                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6009                    ufshcd_is_link_broken(hba))));
6010 }
6011
6012 #ifdef CONFIG_PM
6013 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6014 {
6015         struct Scsi_Host *shost = hba->host;
6016         struct scsi_device *sdev;
6017         struct request_queue *q;
6018         int ret;
6019
6020         hba->is_sys_suspended = false;
6021         /*
6022          * Set RPM status of wlun device to RPM_ACTIVE,
6023          * this also clears its runtime error.
6024          */
6025         ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
6026
6027         /* hba device might have a runtime error otherwise */
6028         if (ret)
6029                 ret = pm_runtime_set_active(hba->dev);
6030         /*
6031          * If wlun device had runtime error, we also need to resume those
6032          * consumer scsi devices in case any of them has failed to be
6033          * resumed due to supplier runtime resume failure. This is to unblock
6034          * blk_queue_enter in case there are bios waiting inside it.
6035          */
6036         if (!ret) {
6037                 shost_for_each_device(sdev, shost) {
6038                         q = sdev->request_queue;
6039                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6040                                        q->rpm_status == RPM_SUSPENDING))
6041                                 pm_request_resume(q->dev);
6042                 }
6043         }
6044 }
6045 #else
6046 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6047 {
6048 }
6049 #endif
6050
6051 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6052 {
6053         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6054         u32 mode;
6055
6056         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6057
6058         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6059                 return true;
6060
6061         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6062                 return true;
6063
6064         return false;
6065 }
6066
6067 /**
6068  * ufshcd_err_handler - handle UFS errors that require s/w attention
6069  * @work: pointer to work structure
6070  */
6071 static void ufshcd_err_handler(struct work_struct *work)
6072 {
6073         int retries = MAX_ERR_HANDLER_RETRIES;
6074         struct ufs_hba *hba;
6075         unsigned long flags;
6076         bool needs_restore;
6077         bool needs_reset;
6078         bool err_xfer;
6079         bool err_tm;
6080         int pmc_err;
6081         int tag;
6082
6083         hba = container_of(work, struct ufs_hba, eh_work);
6084
6085         dev_info(hba->dev,
6086                  "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6087                  __func__, ufshcd_state_name[hba->ufshcd_state],
6088                  hba->is_powered, hba->shutting_down, hba->saved_err,
6089                  hba->saved_uic_err, hba->force_reset,
6090                  ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6091
6092         down(&hba->host_sem);
6093         spin_lock_irqsave(hba->host->host_lock, flags);
6094         if (ufshcd_err_handling_should_stop(hba)) {
6095                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6096                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6097                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6098                 up(&hba->host_sem);
6099                 return;
6100         }
6101         ufshcd_set_eh_in_progress(hba);
6102         spin_unlock_irqrestore(hba->host->host_lock, flags);
6103         ufshcd_err_handling_prepare(hba);
6104         /* Complete requests that have door-bell cleared by h/w */
6105         ufshcd_complete_requests(hba);
6106         spin_lock_irqsave(hba->host->host_lock, flags);
6107 again:
6108         needs_restore = false;
6109         needs_reset = false;
6110         err_xfer = false;
6111         err_tm = false;
6112
6113         if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6114                 hba->ufshcd_state = UFSHCD_STATE_RESET;
6115         /*
6116          * A full reset and restore might have happened after preparation
6117          * is finished, double check whether we should stop.
6118          */
6119         if (ufshcd_err_handling_should_stop(hba))
6120                 goto skip_err_handling;
6121
6122         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6123                 bool ret;
6124
6125                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6126                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6127                 ret = ufshcd_quirk_dl_nac_errors(hba);
6128                 spin_lock_irqsave(hba->host->host_lock, flags);
6129                 if (!ret && ufshcd_err_handling_should_stop(hba))
6130                         goto skip_err_handling;
6131         }
6132
6133         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6134             (hba->saved_uic_err &&
6135              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6136                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6137
6138                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6139                 ufshcd_print_host_state(hba);
6140                 ufshcd_print_pwr_info(hba);
6141                 ufshcd_print_evt_hist(hba);
6142                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6143                 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6144                 spin_lock_irqsave(hba->host->host_lock, flags);
6145         }
6146
6147         /*
6148          * if host reset is required then skip clearing the pending
6149          * transfers forcefully because they will get cleared during
6150          * host reset and restore
6151          */
6152         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6153             ufshcd_is_saved_err_fatal(hba) ||
6154             ((hba->saved_err & UIC_ERROR) &&
6155              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6156                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6157                 needs_reset = true;
6158                 goto do_reset;
6159         }
6160
6161         /*
6162          * If LINERESET was caught, UFS might have been put to PWM mode,
6163          * check if power mode restore is needed.
6164          */
6165         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6166                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6167                 if (!hba->saved_uic_err)
6168                         hba->saved_err &= ~UIC_ERROR;
6169                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6170                 if (ufshcd_is_pwr_mode_restore_needed(hba))
6171                         needs_restore = true;
6172                 spin_lock_irqsave(hba->host->host_lock, flags);
6173                 if (!hba->saved_err && !needs_restore)
6174                         goto skip_err_handling;
6175         }
6176
6177         hba->silence_err_logs = true;
6178         /* release lock as clear command might sleep */
6179         spin_unlock_irqrestore(hba->host->host_lock, flags);
6180         /* Clear pending transfer requests */
6181         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6182                 if (ufshcd_try_to_abort_task(hba, tag)) {
6183                         err_xfer = true;
6184                         goto lock_skip_pending_xfer_clear;
6185                 }
6186                 dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
6187                         hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
6188         }
6189
6190         /* Clear pending task management requests */
6191         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6192                 if (ufshcd_clear_tm_cmd(hba, tag)) {
6193                         err_tm = true;
6194                         goto lock_skip_pending_xfer_clear;
6195                 }
6196         }
6197
6198 lock_skip_pending_xfer_clear:
6199         /* Complete the requests that are cleared by s/w */
6200         ufshcd_complete_requests(hba);
6201
6202         spin_lock_irqsave(hba->host->host_lock, flags);
6203         hba->silence_err_logs = false;
6204         if (err_xfer || err_tm) {
6205                 needs_reset = true;
6206                 goto do_reset;
6207         }
6208
6209         /*
6210          * After all reqs and tasks are cleared from doorbell,
6211          * now it is safe to retore power mode.
6212          */
6213         if (needs_restore) {
6214                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6215                 /*
6216                  * Hold the scaling lock just in case dev cmds
6217                  * are sent via bsg and/or sysfs.
6218                  */
6219                 down_write(&hba->clk_scaling_lock);
6220                 hba->force_pmc = true;
6221                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6222                 if (pmc_err) {
6223                         needs_reset = true;
6224                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6225                                         __func__, pmc_err);
6226                 }
6227                 hba->force_pmc = false;
6228                 ufshcd_print_pwr_info(hba);
6229                 up_write(&hba->clk_scaling_lock);
6230                 spin_lock_irqsave(hba->host->host_lock, flags);
6231         }
6232
6233 do_reset:
6234         /* Fatal errors need reset */
6235         if (needs_reset) {
6236                 int err;
6237
6238                 hba->force_reset = false;
6239                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6240                 err = ufshcd_reset_and_restore(hba);
6241                 if (err)
6242                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6243                                         __func__, err);
6244                 else
6245                         ufshcd_recover_pm_error(hba);
6246                 spin_lock_irqsave(hba->host->host_lock, flags);
6247         }
6248
6249 skip_err_handling:
6250         if (!needs_reset) {
6251                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6252                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6253                 if (hba->saved_err || hba->saved_uic_err)
6254                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6255                             __func__, hba->saved_err, hba->saved_uic_err);
6256         }
6257         /* Exit in an operational state or dead */
6258         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6259             hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6260                 if (--retries)
6261                         goto again;
6262                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6263         }
6264         ufshcd_clear_eh_in_progress(hba);
6265         spin_unlock_irqrestore(hba->host->host_lock, flags);
6266         ufshcd_err_handling_unprepare(hba);
6267         up(&hba->host_sem);
6268
6269         dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6270                  ufshcd_state_name[hba->ufshcd_state]);
6271 }
6272
6273 /**
6274  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6275  * @hba: per-adapter instance
6276  *
6277  * Returns
6278  *  IRQ_HANDLED - If interrupt is valid
6279  *  IRQ_NONE    - If invalid interrupt
6280  */
6281 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6282 {
6283         u32 reg;
6284         irqreturn_t retval = IRQ_NONE;
6285
6286         /* PHY layer error */
6287         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6288         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6289             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6290                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6291                 /*
6292                  * To know whether this error is fatal or not, DB timeout
6293                  * must be checked but this error is handled separately.
6294                  */
6295                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6296                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6297                                         __func__);
6298
6299                 /* Got a LINERESET indication. */
6300                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6301                         struct uic_command *cmd = NULL;
6302
6303                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6304                         if (hba->uic_async_done && hba->active_uic_cmd)
6305                                 cmd = hba->active_uic_cmd;
6306                         /*
6307                          * Ignore the LINERESET during power mode change
6308                          * operation via DME_SET command.
6309                          */
6310                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
6311                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6312                 }
6313                 retval |= IRQ_HANDLED;
6314         }
6315
6316         /* PA_INIT_ERROR is fatal and needs UIC reset */
6317         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6318         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6319             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6320                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6321
6322                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6323                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6324                 else if (hba->dev_quirks &
6325                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6326                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6327                                 hba->uic_error |=
6328                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6329                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6330                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6331                 }
6332                 retval |= IRQ_HANDLED;
6333         }
6334
6335         /* UIC NL/TL/DME errors needs software retry */
6336         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6337         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6338             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6339                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6340                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6341                 retval |= IRQ_HANDLED;
6342         }
6343
6344         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6345         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6346             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6347                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6348                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6349                 retval |= IRQ_HANDLED;
6350         }
6351
6352         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6353         if ((reg & UIC_DME_ERROR) &&
6354             (reg & UIC_DME_ERROR_CODE_MASK)) {
6355                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6356                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6357                 retval |= IRQ_HANDLED;
6358         }
6359
6360         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6361                         __func__, hba->uic_error);
6362         return retval;
6363 }
6364
6365 /**
6366  * ufshcd_check_errors - Check for errors that need s/w attention
6367  * @hba: per-adapter instance
6368  * @intr_status: interrupt status generated by the controller
6369  *
6370  * Returns
6371  *  IRQ_HANDLED - If interrupt is valid
6372  *  IRQ_NONE    - If invalid interrupt
6373  */
6374 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6375 {
6376         bool queue_eh_work = false;
6377         irqreturn_t retval = IRQ_NONE;
6378
6379         spin_lock(hba->host->host_lock);
6380         hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6381
6382         if (hba->errors & INT_FATAL_ERRORS) {
6383                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6384                                        hba->errors);
6385                 queue_eh_work = true;
6386         }
6387
6388         if (hba->errors & UIC_ERROR) {
6389                 hba->uic_error = 0;
6390                 retval = ufshcd_update_uic_error(hba);
6391                 if (hba->uic_error)
6392                         queue_eh_work = true;
6393         }
6394
6395         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6396                 dev_err(hba->dev,
6397                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6398                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6399                         "Enter" : "Exit",
6400                         hba->errors, ufshcd_get_upmcrs(hba));
6401                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6402                                        hba->errors);
6403                 ufshcd_set_link_broken(hba);
6404                 queue_eh_work = true;
6405         }
6406
6407         if (queue_eh_work) {
6408                 /*
6409                  * update the transfer error masks to sticky bits, let's do this
6410                  * irrespective of current ufshcd_state.
6411                  */
6412                 hba->saved_err |= hba->errors;
6413                 hba->saved_uic_err |= hba->uic_error;
6414
6415                 /* dump controller state before resetting */
6416                 if ((hba->saved_err &
6417                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6418                     (hba->saved_uic_err &&
6419                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6420                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6421                                         __func__, hba->saved_err,
6422                                         hba->saved_uic_err);
6423                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6424                                          "host_regs: ");
6425                         ufshcd_print_pwr_info(hba);
6426                 }
6427                 ufshcd_schedule_eh_work(hba);
6428                 retval |= IRQ_HANDLED;
6429         }
6430         /*
6431          * if (!queue_eh_work) -
6432          * Other errors are either non-fatal where host recovers
6433          * itself without s/w intervention or errors that will be
6434          * handled by the SCSI core layer.
6435          */
6436         hba->errors = 0;
6437         hba->uic_error = 0;
6438         spin_unlock(hba->host->host_lock);
6439         return retval;
6440 }
6441
6442 /**
6443  * ufshcd_tmc_handler - handle task management function completion
6444  * @hba: per adapter instance
6445  *
6446  * Returns
6447  *  IRQ_HANDLED - If interrupt is valid
6448  *  IRQ_NONE    - If invalid interrupt
6449  */
6450 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6451 {
6452         unsigned long flags, pending, issued;
6453         irqreturn_t ret = IRQ_NONE;
6454         int tag;
6455
6456         spin_lock_irqsave(hba->host->host_lock, flags);
6457         pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6458         issued = hba->outstanding_tasks & ~pending;
6459         for_each_set_bit(tag, &issued, hba->nutmrs) {
6460                 struct request *req = hba->tmf_rqs[tag];
6461                 struct completion *c = req->end_io_data;
6462
6463                 complete(c);
6464                 ret = IRQ_HANDLED;
6465         }
6466         spin_unlock_irqrestore(hba->host->host_lock, flags);
6467
6468         return ret;
6469 }
6470
6471 /**
6472  * ufshcd_sl_intr - Interrupt service routine
6473  * @hba: per adapter instance
6474  * @intr_status: contains interrupts generated by the controller
6475  *
6476  * Returns
6477  *  IRQ_HANDLED - If interrupt is valid
6478  *  IRQ_NONE    - If invalid interrupt
6479  */
6480 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6481 {
6482         irqreturn_t retval = IRQ_NONE;
6483
6484         if (intr_status & UFSHCD_UIC_MASK)
6485                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6486
6487         if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6488                 retval |= ufshcd_check_errors(hba, intr_status);
6489
6490         if (intr_status & UTP_TASK_REQ_COMPL)
6491                 retval |= ufshcd_tmc_handler(hba);
6492
6493         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6494                 retval |= ufshcd_transfer_req_compl(hba);
6495
6496         return retval;
6497 }
6498
6499 /**
6500  * ufshcd_intr - Main interrupt service routine
6501  * @irq: irq number
6502  * @__hba: pointer to adapter instance
6503  *
6504  * Returns
6505  *  IRQ_HANDLED - If interrupt is valid
6506  *  IRQ_NONE    - If invalid interrupt
6507  */
6508 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6509 {
6510         u32 intr_status, enabled_intr_status = 0;
6511         irqreturn_t retval = IRQ_NONE;
6512         struct ufs_hba *hba = __hba;
6513         int retries = hba->nutrs;
6514
6515         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6516         hba->ufs_stats.last_intr_status = intr_status;
6517         hba->ufs_stats.last_intr_ts = ktime_get();
6518
6519         /*
6520          * There could be max of hba->nutrs reqs in flight and in worst case
6521          * if the reqs get finished 1 by 1 after the interrupt status is
6522          * read, make sure we handle them by checking the interrupt status
6523          * again in a loop until we process all of the reqs before returning.
6524          */
6525         while (intr_status && retries--) {
6526                 enabled_intr_status =
6527                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6528                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6529                 if (enabled_intr_status)
6530                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6531
6532                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6533         }
6534
6535         if (enabled_intr_status && retval == IRQ_NONE &&
6536             (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6537              hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6538                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6539                                         __func__,
6540                                         intr_status,
6541                                         hba->ufs_stats.last_intr_status,
6542                                         enabled_intr_status);
6543                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6544         }
6545
6546         return retval;
6547 }
6548
6549 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6550 {
6551         int err = 0;
6552         u32 mask = 1 << tag;
6553         unsigned long flags;
6554
6555         if (!test_bit(tag, &hba->outstanding_tasks))
6556                 goto out;
6557
6558         spin_lock_irqsave(hba->host->host_lock, flags);
6559         ufshcd_utmrl_clear(hba, tag);
6560         spin_unlock_irqrestore(hba->host->host_lock, flags);
6561
6562         /* poll for max. 1 sec to clear door bell register by h/w */
6563         err = ufshcd_wait_for_register(hba,
6564                         REG_UTP_TASK_REQ_DOOR_BELL,
6565                         mask, 0, 1000, 1000);
6566
6567         dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6568                 tag, err ? "succeeded" : "failed");
6569
6570 out:
6571         return err;
6572 }
6573
6574 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6575                 struct utp_task_req_desc *treq, u8 tm_function)
6576 {
6577         struct request_queue *q = hba->tmf_queue;
6578         struct Scsi_Host *host = hba->host;
6579         DECLARE_COMPLETION_ONSTACK(wait);
6580         struct request *req;
6581         unsigned long flags;
6582         int task_tag, err;
6583
6584         /*
6585          * blk_mq_alloc_request() is used here only to get a free tag.
6586          */
6587         req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
6588         if (IS_ERR(req))
6589                 return PTR_ERR(req);
6590
6591         req->end_io_data = &wait;
6592         ufshcd_hold(hba, false);
6593
6594         spin_lock_irqsave(host->host_lock, flags);
6595
6596         task_tag = req->tag;
6597         hba->tmf_rqs[req->tag] = req;
6598         treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
6599
6600         memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6601         ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6602
6603         /* send command to the controller */
6604         __set_bit(task_tag, &hba->outstanding_tasks);
6605
6606         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6607         /* Make sure that doorbell is committed immediately */
6608         wmb();
6609
6610         spin_unlock_irqrestore(host->host_lock, flags);
6611
6612         ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6613
6614         /* wait until the task management command is completed */
6615         err = wait_for_completion_io_timeout(&wait,
6616                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6617         if (!err) {
6618                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
6619                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6620                                 __func__, tm_function);
6621                 if (ufshcd_clear_tm_cmd(hba, task_tag))
6622                         dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6623                                         __func__, task_tag);
6624                 err = -ETIMEDOUT;
6625         } else {
6626                 err = 0;
6627                 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6628
6629                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
6630         }
6631
6632         spin_lock_irqsave(hba->host->host_lock, flags);
6633         hba->tmf_rqs[req->tag] = NULL;
6634         __clear_bit(task_tag, &hba->outstanding_tasks);
6635         spin_unlock_irqrestore(hba->host->host_lock, flags);
6636
6637         ufshcd_release(hba);
6638         blk_mq_free_request(req);
6639
6640         return err;
6641 }
6642
6643 /**
6644  * ufshcd_issue_tm_cmd - issues task management commands to controller
6645  * @hba: per adapter instance
6646  * @lun_id: LUN ID to which TM command is sent
6647  * @task_id: task ID to which the TM command is applicable
6648  * @tm_function: task management function opcode
6649  * @tm_response: task management service response return value
6650  *
6651  * Returns non-zero value on error, zero on success.
6652  */
6653 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6654                 u8 tm_function, u8 *tm_response)
6655 {
6656         struct utp_task_req_desc treq = { { 0 }, };
6657         enum utp_ocs ocs_value;
6658         int err;
6659
6660         /* Configure task request descriptor */
6661         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6662         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6663
6664         /* Configure task request UPIU */
6665         treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6666                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6667         treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6668
6669         /*
6670          * The host shall provide the same value for LUN field in the basic
6671          * header and for Input Parameter.
6672          */
6673         treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6674         treq.upiu_req.input_param2 = cpu_to_be32(task_id);
6675
6676         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6677         if (err == -ETIMEDOUT)
6678                 return err;
6679
6680         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6681         if (ocs_value != OCS_SUCCESS)
6682                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6683                                 __func__, ocs_value);
6684         else if (tm_response)
6685                 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
6686                                 MASK_TM_SERVICE_RESP;
6687         return err;
6688 }
6689
6690 /**
6691  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6692  * @hba:        per-adapter instance
6693  * @req_upiu:   upiu request
6694  * @rsp_upiu:   upiu reply
6695  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6696  * @buff_len:   descriptor size, 0 if NA
6697  * @cmd_type:   specifies the type (NOP, Query...)
6698  * @desc_op:    descriptor operation
6699  *
6700  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6701  * Therefore, it "rides" the device management infrastructure: uses its tag and
6702  * tasks work queues.
6703  *
6704  * Since there is only one available tag for device management commands,
6705  * the caller is expected to hold the hba->dev_cmd.lock mutex.
6706  */
6707 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6708                                         struct utp_upiu_req *req_upiu,
6709                                         struct utp_upiu_req *rsp_upiu,
6710                                         u8 *desc_buff, int *buff_len,
6711                                         enum dev_cmd_type cmd_type,
6712                                         enum query_opcode desc_op)
6713 {
6714         struct request_queue *q = hba->cmd_queue;
6715         DECLARE_COMPLETION_ONSTACK(wait);
6716         struct request *req;
6717         struct ufshcd_lrb *lrbp;
6718         int err = 0;
6719         int tag;
6720         u8 upiu_flags;
6721
6722         down_read(&hba->clk_scaling_lock);
6723
6724         req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
6725         if (IS_ERR(req)) {
6726                 err = PTR_ERR(req);
6727                 goto out_unlock;
6728         }
6729         tag = req->tag;
6730         WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
6731
6732         if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
6733                 err = -EBUSY;
6734                 goto out;
6735         }
6736
6737         lrbp = &hba->lrb[tag];
6738         WARN_ON(lrbp->cmd);
6739         lrbp->cmd = NULL;
6740         lrbp->sense_bufflen = 0;
6741         lrbp->sense_buffer = NULL;
6742         lrbp->task_tag = tag;
6743         lrbp->lun = 0;
6744         lrbp->intr_cmd = true;
6745         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6746         hba->dev_cmd.type = cmd_type;
6747
6748         if (hba->ufs_version <= ufshci_version(1, 1))
6749                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6750         else
6751                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6752
6753         /* update the task tag in the request upiu */
6754         req_upiu->header.dword_0 |= cpu_to_be32(tag);
6755
6756         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6757
6758         /* just copy the upiu request as it is */
6759         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6760         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6761                 /* The Data Segment Area is optional depending upon the query
6762                  * function value. for WRITE DESCRIPTOR, the data segment
6763                  * follows right after the tsf.
6764                  */
6765                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6766                 *buff_len = 0;
6767         }
6768
6769         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6770
6771         hba->dev_cmd.complete = &wait;
6772
6773         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
6774
6775         ufshcd_send_command(hba, tag);
6776         /*
6777          * ignore the returning value here - ufshcd_check_query_response is
6778          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6779          * read the response directly ignoring all errors.
6780          */
6781         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6782
6783         /* just copy the upiu response as it is */
6784         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6785         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6786                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6787                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6788                                MASK_QUERY_DATA_SEG_LEN;
6789
6790                 if (*buff_len >= resp_len) {
6791                         memcpy(desc_buff, descp, resp_len);
6792                         *buff_len = resp_len;
6793                 } else {
6794                         dev_warn(hba->dev,
6795                                  "%s: rsp size %d is bigger than buffer size %d",
6796                                  __func__, resp_len, *buff_len);
6797                         *buff_len = 0;
6798                         err = -EINVAL;
6799                 }
6800         }
6801         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
6802                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
6803
6804 out:
6805         blk_mq_free_request(req);
6806 out_unlock:
6807         up_read(&hba->clk_scaling_lock);
6808         return err;
6809 }
6810
6811 /**
6812  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6813  * @hba:        per-adapter instance
6814  * @req_upiu:   upiu request
6815  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
6816  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
6817  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6818  * @buff_len:   descriptor size, 0 if NA
6819  * @desc_op:    descriptor operation
6820  *
6821  * Supports UTP Transfer requests (nop and query), and UTP Task
6822  * Management requests.
6823  * It is up to the caller to fill the upiu conent properly, as it will
6824  * be copied without any further input validations.
6825  */
6826 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6827                              struct utp_upiu_req *req_upiu,
6828                              struct utp_upiu_req *rsp_upiu,
6829                              int msgcode,
6830                              u8 *desc_buff, int *buff_len,
6831                              enum query_opcode desc_op)
6832 {
6833         int err;
6834         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6835         struct utp_task_req_desc treq = { { 0 }, };
6836         enum utp_ocs ocs_value;
6837         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6838
6839         switch (msgcode) {
6840         case UPIU_TRANSACTION_NOP_OUT:
6841                 cmd_type = DEV_CMD_TYPE_NOP;
6842                 fallthrough;
6843         case UPIU_TRANSACTION_QUERY_REQ:
6844                 ufshcd_hold(hba, false);
6845                 mutex_lock(&hba->dev_cmd.lock);
6846                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6847                                                    desc_buff, buff_len,
6848                                                    cmd_type, desc_op);
6849                 mutex_unlock(&hba->dev_cmd.lock);
6850                 ufshcd_release(hba);
6851
6852                 break;
6853         case UPIU_TRANSACTION_TASK_REQ:
6854                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6855                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6856
6857                 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
6858
6859                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6860                 if (err == -ETIMEDOUT)
6861                         break;
6862
6863                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6864                 if (ocs_value != OCS_SUCCESS) {
6865                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6866                                 ocs_value);
6867                         break;
6868                 }
6869
6870                 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
6871
6872                 break;
6873         default:
6874                 err = -EINVAL;
6875
6876                 break;
6877         }
6878
6879         return err;
6880 }
6881
6882 /**
6883  * ufshcd_eh_device_reset_handler - device reset handler registered to
6884  *                                    scsi layer.
6885  * @cmd: SCSI command pointer
6886  *
6887  * Returns SUCCESS/FAILED
6888  */
6889 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6890 {
6891         struct Scsi_Host *host;
6892         struct ufs_hba *hba;
6893         u32 pos;
6894         int err;
6895         u8 resp = 0xF, lun;
6896
6897         host = cmd->device->host;
6898         hba = shost_priv(host);
6899
6900         lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
6901         err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
6902         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6903                 if (!err)
6904                         err = resp;
6905                 goto out;
6906         }
6907
6908         /* clear the commands that were pending for corresponding LUN */
6909         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6910                 if (hba->lrb[pos].lun == lun) {
6911                         err = ufshcd_clear_cmd(hba, pos);
6912                         if (err)
6913                                 break;
6914                         __ufshcd_transfer_req_compl(hba, 1U << pos);
6915                 }
6916         }
6917
6918 out:
6919         hba->req_abort_count = 0;
6920         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
6921         if (!err) {
6922                 err = SUCCESS;
6923         } else {
6924                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6925                 err = FAILED;
6926         }
6927         return err;
6928 }
6929
6930 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6931 {
6932         struct ufshcd_lrb *lrbp;
6933         int tag;
6934
6935         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6936                 lrbp = &hba->lrb[tag];
6937                 lrbp->req_abort_skip = true;
6938         }
6939 }
6940
6941 /**
6942  * ufshcd_try_to_abort_task - abort a specific task
6943  * @hba: Pointer to adapter instance
6944  * @tag: Task tag/index to be aborted
6945  *
6946  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6947  * command, and in host controller by clearing the door-bell register. There can
6948  * be race between controller sending the command to the device while abort is
6949  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6950  * really issued and then try to abort it.
6951  *
6952  * Returns zero on success, non-zero on failure
6953  */
6954 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6955 {
6956         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6957         int err = 0;
6958         int poll_cnt;
6959         u8 resp = 0xF;
6960         u32 reg;
6961
6962         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6963                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6964                                 UFS_QUERY_TASK, &resp);
6965                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6966                         /* cmd pending in the device */
6967                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6968                                 __func__, tag);
6969                         break;
6970                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6971                         /*
6972                          * cmd not pending in the device, check if it is
6973                          * in transition.
6974                          */
6975                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6976                                 __func__, tag);
6977                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6978                         if (reg & (1 << tag)) {
6979                                 /* sleep for max. 200us to stabilize */
6980                                 usleep_range(100, 200);
6981                                 continue;
6982                         }
6983                         /* command completed already */
6984                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6985                                 __func__, tag);
6986                         goto out;
6987                 } else {
6988                         dev_err(hba->dev,
6989                                 "%s: no response from device. tag = %d, err %d\n",
6990                                 __func__, tag, err);
6991                         if (!err)
6992                                 err = resp; /* service response error */
6993                         goto out;
6994                 }
6995         }
6996
6997         if (!poll_cnt) {
6998                 err = -EBUSY;
6999                 goto out;
7000         }
7001
7002         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7003                         UFS_ABORT_TASK, &resp);
7004         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7005                 if (!err) {
7006                         err = resp; /* service response error */
7007                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7008                                 __func__, tag, err);
7009                 }
7010                 goto out;
7011         }
7012
7013         err = ufshcd_clear_cmd(hba, tag);
7014         if (err)
7015                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7016                         __func__, tag, err);
7017
7018 out:
7019         return err;
7020 }
7021
7022 /**
7023  * ufshcd_abort - scsi host template eh_abort_handler callback
7024  * @cmd: SCSI command pointer
7025  *
7026  * Returns SUCCESS/FAILED
7027  */
7028 static int ufshcd_abort(struct scsi_cmnd *cmd)
7029 {
7030         struct Scsi_Host *host = cmd->device->host;
7031         struct ufs_hba *hba = shost_priv(host);
7032         int tag = scsi_cmd_to_rq(cmd)->tag;
7033         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7034         unsigned long flags;
7035         int err = FAILED;
7036         u32 reg;
7037
7038         WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7039
7040         ufshcd_hold(hba, false);
7041         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7042         /* If command is already aborted/completed, return FAILED. */
7043         if (!(test_bit(tag, &hba->outstanding_reqs))) {
7044                 dev_err(hba->dev,
7045                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7046                         __func__, tag, hba->outstanding_reqs, reg);
7047                 goto release;
7048         }
7049
7050         /* Print Transfer Request of aborted task */
7051         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7052
7053         /*
7054          * Print detailed info about aborted request.
7055          * As more than one request might get aborted at the same time,
7056          * print full information only for the first aborted request in order
7057          * to reduce repeated printouts. For other aborted requests only print
7058          * basic details.
7059          */
7060         scsi_print_command(cmd);
7061         if (!hba->req_abort_count) {
7062                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7063                 ufshcd_print_evt_hist(hba);
7064                 ufshcd_print_host_state(hba);
7065                 ufshcd_print_pwr_info(hba);
7066                 ufshcd_print_trs(hba, 1 << tag, true);
7067         } else {
7068                 ufshcd_print_trs(hba, 1 << tag, false);
7069         }
7070         hba->req_abort_count++;
7071
7072         if (!(reg & (1 << tag))) {
7073                 dev_err(hba->dev,
7074                 "%s: cmd was completed, but without a notifying intr, tag = %d",
7075                 __func__, tag);
7076                 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7077                 goto release;
7078         }
7079
7080         /*
7081          * Task abort to the device W-LUN is illegal. When this command
7082          * will fail, due to spec violation, scsi err handling next step
7083          * will be to send LU reset which, again, is a spec violation.
7084          * To avoid these unnecessary/illegal steps, first we clean up
7085          * the lrb taken by this cmd and re-set it in outstanding_reqs,
7086          * then queue the eh_work and bail.
7087          */
7088         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7089                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7090
7091                 spin_lock_irqsave(host->host_lock, flags);
7092                 hba->force_reset = true;
7093                 ufshcd_schedule_eh_work(hba);
7094                 spin_unlock_irqrestore(host->host_lock, flags);
7095                 goto release;
7096         }
7097
7098         /* Skip task abort in case previous aborts failed and report failure */
7099         if (lrbp->req_abort_skip) {
7100                 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7101                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7102                 goto release;
7103         }
7104
7105         err = ufshcd_try_to_abort_task(hba, tag);
7106         if (err) {
7107                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7108                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7109                 err = FAILED;
7110                 goto release;
7111         }
7112
7113         lrbp->cmd = NULL;
7114         err = SUCCESS;
7115
7116 release:
7117         /* Matches the ufshcd_hold() call at the start of this function. */
7118         ufshcd_release(hba);
7119         return err;
7120 }
7121
7122 /**
7123  * ufshcd_host_reset_and_restore - reset and restore host controller
7124  * @hba: per-adapter instance
7125  *
7126  * Note that host controller reset may issue DME_RESET to
7127  * local and remote (device) Uni-Pro stack and the attributes
7128  * are reset to default state.
7129  *
7130  * Returns zero on success, non-zero on failure
7131  */
7132 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7133 {
7134         int err;
7135
7136         /*
7137          * Stop the host controller and complete the requests
7138          * cleared by h/w
7139          */
7140         ufshpb_reset_host(hba);
7141         ufshcd_hba_stop(hba);
7142         hba->silence_err_logs = true;
7143         ufshcd_complete_requests(hba);
7144         hba->silence_err_logs = false;
7145
7146         /* scale up clocks to max frequency before full reinitialization */
7147         ufshcd_set_clk_freq(hba, true);
7148
7149         err = ufshcd_hba_enable(hba);
7150
7151         /* Establish the link again and restore the device */
7152         if (!err)
7153                 err = ufshcd_probe_hba(hba, false);
7154
7155         if (err)
7156                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7157         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7158         return err;
7159 }
7160
7161 /**
7162  * ufshcd_reset_and_restore - reset and re-initialize host/device
7163  * @hba: per-adapter instance
7164  *
7165  * Reset and recover device, host and re-establish link. This
7166  * is helpful to recover the communication in fatal error conditions.
7167  *
7168  * Returns zero on success, non-zero on failure
7169  */
7170 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7171 {
7172         u32 saved_err = 0;
7173         u32 saved_uic_err = 0;
7174         int err = 0;
7175         unsigned long flags;
7176         int retries = MAX_HOST_RESET_RETRIES;
7177
7178         spin_lock_irqsave(hba->host->host_lock, flags);
7179         do {
7180                 /*
7181                  * This is a fresh start, cache and clear saved error first,
7182                  * in case new error generated during reset and restore.
7183                  */
7184                 saved_err |= hba->saved_err;
7185                 saved_uic_err |= hba->saved_uic_err;
7186                 hba->saved_err = 0;
7187                 hba->saved_uic_err = 0;
7188                 hba->force_reset = false;
7189                 hba->ufshcd_state = UFSHCD_STATE_RESET;
7190                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7191
7192                 /* Reset the attached device */
7193                 ufshcd_device_reset(hba);
7194
7195                 err = ufshcd_host_reset_and_restore(hba);
7196
7197                 spin_lock_irqsave(hba->host->host_lock, flags);
7198                 if (err)
7199                         continue;
7200                 /* Do not exit unless operational or dead */
7201                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7202                     hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7203                     hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7204                         err = -EAGAIN;
7205         } while (err && --retries);
7206
7207         /*
7208          * Inform scsi mid-layer that we did reset and allow to handle
7209          * Unit Attention properly.
7210          */
7211         scsi_report_bus_reset(hba->host, 0);
7212         if (err) {
7213                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7214                 hba->saved_err |= saved_err;
7215                 hba->saved_uic_err |= saved_uic_err;
7216         }
7217         spin_unlock_irqrestore(hba->host->host_lock, flags);
7218
7219         return err;
7220 }
7221
7222 /**
7223  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7224  * @cmd: SCSI command pointer
7225  *
7226  * Returns SUCCESS/FAILED
7227  */
7228 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7229 {
7230         int err = SUCCESS;
7231         unsigned long flags;
7232         struct ufs_hba *hba;
7233
7234         hba = shost_priv(cmd->device->host);
7235
7236         spin_lock_irqsave(hba->host->host_lock, flags);
7237         hba->force_reset = true;
7238         ufshcd_schedule_eh_work(hba);
7239         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7240         spin_unlock_irqrestore(hba->host->host_lock, flags);
7241
7242         flush_work(&hba->eh_work);
7243
7244         spin_lock_irqsave(hba->host->host_lock, flags);
7245         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7246                 err = FAILED;
7247         spin_unlock_irqrestore(hba->host->host_lock, flags);
7248
7249         return err;
7250 }
7251
7252 /**
7253  * ufshcd_get_max_icc_level - calculate the ICC level
7254  * @sup_curr_uA: max. current supported by the regulator
7255  * @start_scan: row at the desc table to start scan from
7256  * @buff: power descriptor buffer
7257  *
7258  * Returns calculated max ICC level for specific regulator
7259  */
7260 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7261 {
7262         int i;
7263         int curr_uA;
7264         u16 data;
7265         u16 unit;
7266
7267         for (i = start_scan; i >= 0; i--) {
7268                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
7269                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7270                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7271                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7272                 switch (unit) {
7273                 case UFSHCD_NANO_AMP:
7274                         curr_uA = curr_uA / 1000;
7275                         break;
7276                 case UFSHCD_MILI_AMP:
7277                         curr_uA = curr_uA * 1000;
7278                         break;
7279                 case UFSHCD_AMP:
7280                         curr_uA = curr_uA * 1000 * 1000;
7281                         break;
7282                 case UFSHCD_MICRO_AMP:
7283                 default:
7284                         break;
7285                 }
7286                 if (sup_curr_uA >= curr_uA)
7287                         break;
7288         }
7289         if (i < 0) {
7290                 i = 0;
7291                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7292         }
7293
7294         return (u32)i;
7295 }
7296
7297 /**
7298  * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7299  * In case regulators are not initialized we'll return 0
7300  * @hba: per-adapter instance
7301  * @desc_buf: power descriptor buffer to extract ICC levels from.
7302  * @len: length of desc_buff
7303  *
7304  * Returns calculated ICC level
7305  */
7306 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7307                                                         u8 *desc_buf, int len)
7308 {
7309         u32 icc_level = 0;
7310
7311         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7312                                                 !hba->vreg_info.vccq2) {
7313                 dev_err(hba->dev,
7314                         "%s: Regulator capability was not set, actvIccLevel=%d",
7315                                                         __func__, icc_level);
7316                 goto out;
7317         }
7318
7319         if (hba->vreg_info.vcc->max_uA)
7320                 icc_level = ufshcd_get_max_icc_level(
7321                                 hba->vreg_info.vcc->max_uA,
7322                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7323                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7324
7325         if (hba->vreg_info.vccq->max_uA)
7326                 icc_level = ufshcd_get_max_icc_level(
7327                                 hba->vreg_info.vccq->max_uA,
7328                                 icc_level,
7329                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7330
7331         if (hba->vreg_info.vccq2->max_uA)
7332                 icc_level = ufshcd_get_max_icc_level(
7333                                 hba->vreg_info.vccq2->max_uA,
7334                                 icc_level,
7335                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7336 out:
7337         return icc_level;
7338 }
7339
7340 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7341 {
7342         int ret;
7343         int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7344         u8 *desc_buf;
7345         u32 icc_level;
7346
7347         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7348         if (!desc_buf)
7349                 return;
7350
7351         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7352                                      desc_buf, buff_len);
7353         if (ret) {
7354                 dev_err(hba->dev,
7355                         "%s: Failed reading power descriptor.len = %d ret = %d",
7356                         __func__, buff_len, ret);
7357                 goto out;
7358         }
7359
7360         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7361                                                          buff_len);
7362         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7363
7364         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7365                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7366
7367         if (ret)
7368                 dev_err(hba->dev,
7369                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7370                         __func__, icc_level, ret);
7371
7372 out:
7373         kfree(desc_buf);
7374 }
7375
7376 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7377 {
7378         scsi_autopm_get_device(sdev);
7379         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7380         if (sdev->rpm_autosuspend)
7381                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7382                                                  RPM_AUTOSUSPEND_DELAY_MS);
7383         scsi_autopm_put_device(sdev);
7384 }
7385
7386 /**
7387  * ufshcd_scsi_add_wlus - Adds required W-LUs
7388  * @hba: per-adapter instance
7389  *
7390  * UFS device specification requires the UFS devices to support 4 well known
7391  * logical units:
7392  *      "REPORT_LUNS" (address: 01h)
7393  *      "UFS Device" (address: 50h)
7394  *      "RPMB" (address: 44h)
7395  *      "BOOT" (address: 30h)
7396  * UFS device's power management needs to be controlled by "POWER CONDITION"
7397  * field of SSU (START STOP UNIT) command. But this "power condition" field
7398  * will take effect only when its sent to "UFS device" well known logical unit
7399  * hence we require the scsi_device instance to represent this logical unit in
7400  * order for the UFS host driver to send the SSU command for power management.
7401  *
7402  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7403  * Block) LU so user space process can control this LU. User space may also
7404  * want to have access to BOOT LU.
7405  *
7406  * This function adds scsi device instances for each of all well known LUs
7407  * (except "REPORT LUNS" LU).
7408  *
7409  * Returns zero on success (all required W-LUs are added successfully),
7410  * non-zero error value on failure (if failed to add any of the required W-LU).
7411  */
7412 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7413 {
7414         int ret = 0;
7415         struct scsi_device *sdev_boot;
7416
7417         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7418                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7419         if (IS_ERR(hba->sdev_ufs_device)) {
7420                 ret = PTR_ERR(hba->sdev_ufs_device);
7421                 hba->sdev_ufs_device = NULL;
7422                 goto out;
7423         }
7424         scsi_device_put(hba->sdev_ufs_device);
7425
7426         hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7427                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7428         if (IS_ERR(hba->sdev_rpmb)) {
7429                 ret = PTR_ERR(hba->sdev_rpmb);
7430                 goto remove_sdev_ufs_device;
7431         }
7432         ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7433         scsi_device_put(hba->sdev_rpmb);
7434
7435         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7436                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7437         if (IS_ERR(sdev_boot)) {
7438                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7439         } else {
7440                 ufshcd_blk_pm_runtime_init(sdev_boot);
7441                 scsi_device_put(sdev_boot);
7442         }
7443         goto out;
7444
7445 remove_sdev_ufs_device:
7446         scsi_remove_device(hba->sdev_ufs_device);
7447 out:
7448         return ret;
7449 }
7450
7451 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7452 {
7453         struct ufs_dev_info *dev_info = &hba->dev_info;
7454         u8 lun;
7455         u32 d_lu_wb_buf_alloc;
7456         u32 ext_ufs_feature;
7457
7458         if (!ufshcd_is_wb_allowed(hba))
7459                 return;
7460         /*
7461          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7462          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7463          * enabled
7464          */
7465         if (!(dev_info->wspecversion >= 0x310 ||
7466               dev_info->wspecversion == 0x220 ||
7467              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7468                 goto wb_disabled;
7469
7470         if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7471             DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7472                 goto wb_disabled;
7473
7474         ext_ufs_feature = get_unaligned_be32(desc_buf +
7475                                         DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7476
7477         if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
7478                 goto wb_disabled;
7479
7480         /*
7481          * WB may be supported but not configured while provisioning. The spec
7482          * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7483          * buffer configured.
7484          */
7485         dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7486
7487         dev_info->b_presrv_uspc_en =
7488                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7489
7490         if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
7491                 if (!get_unaligned_be32(desc_buf +
7492                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
7493                         goto wb_disabled;
7494         } else {
7495                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7496                         d_lu_wb_buf_alloc = 0;
7497                         ufshcd_read_unit_desc_param(hba,
7498                                         lun,
7499                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7500                                         (u8 *)&d_lu_wb_buf_alloc,
7501                                         sizeof(d_lu_wb_buf_alloc));
7502                         if (d_lu_wb_buf_alloc) {
7503                                 dev_info->wb_dedicated_lu = lun;
7504                                 break;
7505                         }
7506                 }
7507
7508                 if (!d_lu_wb_buf_alloc)
7509                         goto wb_disabled;
7510         }
7511         return;
7512
7513 wb_disabled:
7514         hba->caps &= ~UFSHCD_CAP_WB_EN;
7515 }
7516
7517 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
7518 {
7519         struct ufs_dev_info *dev_info = &hba->dev_info;
7520         u32 ext_ufs_feature;
7521         u8 mask = 0;
7522
7523         if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
7524                 return;
7525
7526         ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7527
7528         if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
7529                 mask |= MASK_EE_TOO_LOW_TEMP;
7530
7531         if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
7532                 mask |= MASK_EE_TOO_HIGH_TEMP;
7533
7534         if (mask) {
7535                 ufshcd_enable_ee(hba, mask);
7536                 ufs_hwmon_probe(hba, mask);
7537         }
7538 }
7539
7540 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7541 {
7542         struct ufs_dev_fix *f;
7543         struct ufs_dev_info *dev_info = &hba->dev_info;
7544
7545         if (!fixups)
7546                 return;
7547
7548         for (f = fixups; f->quirk; f++) {
7549                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7550                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
7551                      ((dev_info->model &&
7552                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7553                       !strcmp(f->model, UFS_ANY_MODEL)))
7554                         hba->dev_quirks |= f->quirk;
7555         }
7556 }
7557 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7558
7559 static void ufs_fixup_device_setup(struct ufs_hba *hba)
7560 {
7561         /* fix by general quirk table */
7562         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7563
7564         /* allow vendors to fix quirks */
7565         ufshcd_vops_fixup_dev_quirks(hba);
7566 }
7567
7568 static int ufs_get_device_desc(struct ufs_hba *hba)
7569 {
7570         int err;
7571         u8 model_index;
7572         u8 b_ufs_feature_sup;
7573         u8 *desc_buf;
7574         struct ufs_dev_info *dev_info = &hba->dev_info;
7575
7576         desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7577         if (!desc_buf) {
7578                 err = -ENOMEM;
7579                 goto out;
7580         }
7581
7582         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7583                                      hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7584         if (err) {
7585                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7586                         __func__, err);
7587                 goto out;
7588         }
7589
7590         /*
7591          * getting vendor (manufacturerID) and Bank Index in big endian
7592          * format
7593          */
7594         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7595                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7596
7597         /* getting Specification Version in big endian format */
7598         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7599                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7600         b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
7601
7602         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7603
7604         if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7605             (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
7606                 bool hpb_en = false;
7607
7608                 ufshpb_get_dev_info(hba, desc_buf);
7609
7610                 if (!ufshpb_is_legacy(hba))
7611                         err = ufshcd_query_flag_retry(hba,
7612                                                       UPIU_QUERY_OPCODE_READ_FLAG,
7613                                                       QUERY_FLAG_IDN_HPB_EN, 0,
7614                                                       &hpb_en);
7615
7616                 if (ufshpb_is_legacy(hba) || (!err && hpb_en))
7617                         dev_info->hpb_enabled = true;
7618         }
7619
7620         err = ufshcd_read_string_desc(hba, model_index,
7621                                       &dev_info->model, SD_ASCII_STD);
7622         if (err < 0) {
7623                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7624                         __func__, err);
7625                 goto out;
7626         }
7627
7628         hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
7629                 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
7630
7631         ufs_fixup_device_setup(hba);
7632
7633         ufshcd_wb_probe(hba, desc_buf);
7634
7635         ufshcd_temp_notif_probe(hba, desc_buf);
7636
7637         /*
7638          * ufshcd_read_string_desc returns size of the string
7639          * reset the error value
7640          */
7641         err = 0;
7642
7643 out:
7644         kfree(desc_buf);
7645         return err;
7646 }
7647
7648 static void ufs_put_device_desc(struct ufs_hba *hba)
7649 {
7650         struct ufs_dev_info *dev_info = &hba->dev_info;
7651
7652         kfree(dev_info->model);
7653         dev_info->model = NULL;
7654 }
7655
7656 /**
7657  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7658  * @hba: per-adapter instance
7659  *
7660  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7661  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7662  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7663  * the hibern8 exit latency.
7664  *
7665  * Returns zero on success, non-zero error value on failure.
7666  */
7667 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7668 {
7669         int ret = 0;
7670         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7671
7672         ret = ufshcd_dme_peer_get(hba,
7673                                   UIC_ARG_MIB_SEL(
7674                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7675                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7676                                   &peer_rx_min_activatetime);
7677         if (ret)
7678                 goto out;
7679
7680         /* make sure proper unit conversion is applied */
7681         tuned_pa_tactivate =
7682                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7683                  / PA_TACTIVATE_TIME_UNIT_US);
7684         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7685                              tuned_pa_tactivate);
7686
7687 out:
7688         return ret;
7689 }
7690
7691 /**
7692  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7693  * @hba: per-adapter instance
7694  *
7695  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7696  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7697  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7698  * This optimal value can help reduce the hibern8 exit latency.
7699  *
7700  * Returns zero on success, non-zero error value on failure.
7701  */
7702 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7703 {
7704         int ret = 0;
7705         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7706         u32 max_hibern8_time, tuned_pa_hibern8time;
7707
7708         ret = ufshcd_dme_get(hba,
7709                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7710                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7711                                   &local_tx_hibern8_time_cap);
7712         if (ret)
7713                 goto out;
7714
7715         ret = ufshcd_dme_peer_get(hba,
7716                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7717                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7718                                   &peer_rx_hibern8_time_cap);
7719         if (ret)
7720                 goto out;
7721
7722         max_hibern8_time = max(local_tx_hibern8_time_cap,
7723                                peer_rx_hibern8_time_cap);
7724         /* make sure proper unit conversion is applied */
7725         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7726                                 / PA_HIBERN8_TIME_UNIT_US);
7727         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7728                              tuned_pa_hibern8time);
7729 out:
7730         return ret;
7731 }
7732
7733 /**
7734  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7735  * less than device PA_TACTIVATE time.
7736  * @hba: per-adapter instance
7737  *
7738  * Some UFS devices require host PA_TACTIVATE to be lower than device
7739  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7740  * for such devices.
7741  *
7742  * Returns zero on success, non-zero error value on failure.
7743  */
7744 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7745 {
7746         int ret = 0;
7747         u32 granularity, peer_granularity;
7748         u32 pa_tactivate, peer_pa_tactivate;
7749         u32 pa_tactivate_us, peer_pa_tactivate_us;
7750         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7751
7752         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7753                                   &granularity);
7754         if (ret)
7755                 goto out;
7756
7757         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7758                                   &peer_granularity);
7759         if (ret)
7760                 goto out;
7761
7762         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7763             (granularity > PA_GRANULARITY_MAX_VAL)) {
7764                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7765                         __func__, granularity);
7766                 return -EINVAL;
7767         }
7768
7769         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7770             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7771                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7772                         __func__, peer_granularity);
7773                 return -EINVAL;
7774         }
7775
7776         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7777         if (ret)
7778                 goto out;
7779
7780         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7781                                   &peer_pa_tactivate);
7782         if (ret)
7783                 goto out;
7784
7785         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7786         peer_pa_tactivate_us = peer_pa_tactivate *
7787                              gran_to_us_table[peer_granularity - 1];
7788
7789         if (pa_tactivate_us > peer_pa_tactivate_us) {
7790                 u32 new_peer_pa_tactivate;
7791
7792                 new_peer_pa_tactivate = pa_tactivate_us /
7793                                       gran_to_us_table[peer_granularity - 1];
7794                 new_peer_pa_tactivate++;
7795                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7796                                           new_peer_pa_tactivate);
7797         }
7798
7799 out:
7800         return ret;
7801 }
7802
7803 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7804 {
7805         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7806                 ufshcd_tune_pa_tactivate(hba);
7807                 ufshcd_tune_pa_hibern8time(hba);
7808         }
7809
7810         ufshcd_vops_apply_dev_quirks(hba);
7811
7812         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7813                 /* set 1ms timeout for PA_TACTIVATE */
7814                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7815
7816         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7817                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7818 }
7819
7820 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7821 {
7822         hba->ufs_stats.hibern8_exit_cnt = 0;
7823         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7824         hba->req_abort_count = 0;
7825 }
7826
7827 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7828 {
7829         int err;
7830         size_t buff_len;
7831         u8 *desc_buf;
7832
7833         buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7834         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7835         if (!desc_buf) {
7836                 err = -ENOMEM;
7837                 goto out;
7838         }
7839
7840         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7841                                      desc_buf, buff_len);
7842         if (err) {
7843                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7844                                 __func__, err);
7845                 goto out;
7846         }
7847
7848         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7849                 hba->dev_info.max_lu_supported = 32;
7850         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7851                 hba->dev_info.max_lu_supported = 8;
7852
7853         if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
7854                 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
7855                 ufshpb_get_geo_info(hba, desc_buf);
7856
7857 out:
7858         kfree(desc_buf);
7859         return err;
7860 }
7861
7862 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7863         {19200000, REF_CLK_FREQ_19_2_MHZ},
7864         {26000000, REF_CLK_FREQ_26_MHZ},
7865         {38400000, REF_CLK_FREQ_38_4_MHZ},
7866         {52000000, REF_CLK_FREQ_52_MHZ},
7867         {0, REF_CLK_FREQ_INVAL},
7868 };
7869
7870 static enum ufs_ref_clk_freq
7871 ufs_get_bref_clk_from_hz(unsigned long freq)
7872 {
7873         int i;
7874
7875         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7876                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7877                         return ufs_ref_clk_freqs[i].val;
7878
7879         return REF_CLK_FREQ_INVAL;
7880 }
7881
7882 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7883 {
7884         unsigned long freq;
7885
7886         freq = clk_get_rate(refclk);
7887
7888         hba->dev_ref_clk_freq =
7889                 ufs_get_bref_clk_from_hz(freq);
7890
7891         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7892                 dev_err(hba->dev,
7893                 "invalid ref_clk setting = %ld\n", freq);
7894 }
7895
7896 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7897 {
7898         int err;
7899         u32 ref_clk;
7900         u32 freq = hba->dev_ref_clk_freq;
7901
7902         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7903                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7904
7905         if (err) {
7906                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7907                         err);
7908                 goto out;
7909         }
7910
7911         if (ref_clk == freq)
7912                 goto out; /* nothing to update */
7913
7914         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7915                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7916
7917         if (err) {
7918                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7919                         ufs_ref_clk_freqs[freq].freq_hz);
7920                 goto out;
7921         }
7922
7923         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7924                         ufs_ref_clk_freqs[freq].freq_hz);
7925
7926 out:
7927         return err;
7928 }
7929
7930 static int ufshcd_device_params_init(struct ufs_hba *hba)
7931 {
7932         bool flag;
7933         int ret, i;
7934
7935          /* Init device descriptor sizes */
7936         for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7937                 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7938
7939         /* Init UFS geometry descriptor related parameters */
7940         ret = ufshcd_device_geo_params_init(hba);
7941         if (ret)
7942                 goto out;
7943
7944         /* Check and apply UFS device quirks */
7945         ret = ufs_get_device_desc(hba);
7946         if (ret) {
7947                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7948                         __func__, ret);
7949                 goto out;
7950         }
7951
7952         ufshcd_get_ref_clk_gating_wait(hba);
7953
7954         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7955                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7956                 hba->dev_info.f_power_on_wp_en = flag;
7957
7958         /* Probe maximum power mode co-supported by both UFS host and device */
7959         if (ufshcd_get_max_pwr_mode(hba))
7960                 dev_err(hba->dev,
7961                         "%s: Failed getting max supported power mode\n",
7962                         __func__);
7963 out:
7964         return ret;
7965 }
7966
7967 /**
7968  * ufshcd_add_lus - probe and add UFS logical units
7969  * @hba: per-adapter instance
7970  */
7971 static int ufshcd_add_lus(struct ufs_hba *hba)
7972 {
7973         int ret;
7974
7975         /* Add required well known logical units to scsi mid layer */
7976         ret = ufshcd_scsi_add_wlus(hba);
7977         if (ret)
7978                 goto out;
7979
7980         /* Initialize devfreq after UFS device is detected */
7981         if (ufshcd_is_clkscaling_supported(hba)) {
7982                 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7983                         &hba->pwr_info,
7984                         sizeof(struct ufs_pa_layer_attr));
7985                 hba->clk_scaling.saved_pwr_info.is_valid = true;
7986                 hba->clk_scaling.is_allowed = true;
7987
7988                 ret = ufshcd_devfreq_init(hba);
7989                 if (ret)
7990                         goto out;
7991
7992                 hba->clk_scaling.is_enabled = true;
7993                 ufshcd_init_clk_scaling_sysfs(hba);
7994         }
7995
7996         ufs_bsg_probe(hba);
7997         ufshpb_init(hba);
7998         scsi_scan_host(hba->host);
7999         pm_runtime_put_sync(hba->dev);
8000
8001 out:
8002         return ret;
8003 }
8004
8005 /**
8006  * ufshcd_probe_hba - probe hba to detect device and initialize it
8007  * @hba: per-adapter instance
8008  * @init_dev_params: whether or not to call ufshcd_device_params_init().
8009  *
8010  * Execute link-startup and verify device initialization
8011  */
8012 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8013 {
8014         int ret;
8015         unsigned long flags;
8016         ktime_t start = ktime_get();
8017
8018         hba->ufshcd_state = UFSHCD_STATE_RESET;
8019
8020         ret = ufshcd_link_startup(hba);
8021         if (ret)
8022                 goto out;
8023
8024         if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8025                 goto out;
8026
8027         /* Debug counters initialization */
8028         ufshcd_clear_dbg_ufs_stats(hba);
8029
8030         /* UniPro link is active now */
8031         ufshcd_set_link_active(hba);
8032
8033         /* Verify device initialization by sending NOP OUT UPIU */
8034         ret = ufshcd_verify_dev_init(hba);
8035         if (ret)
8036                 goto out;
8037
8038         /* Initiate UFS initialization, and waiting until completion */
8039         ret = ufshcd_complete_dev_init(hba);
8040         if (ret)
8041                 goto out;
8042
8043         /*
8044          * Initialize UFS device parameters used by driver, these
8045          * parameters are associated with UFS descriptors.
8046          */
8047         if (init_dev_params) {
8048                 ret = ufshcd_device_params_init(hba);
8049                 if (ret)
8050                         goto out;
8051         }
8052
8053         ufshcd_tune_unipro_params(hba);
8054
8055         /* UFS device is also active now */
8056         ufshcd_set_ufs_dev_active(hba);
8057         ufshcd_force_reset_auto_bkops(hba);
8058
8059         /* Gear up to HS gear if supported */
8060         if (hba->max_pwr_info.is_valid) {
8061                 /*
8062                  * Set the right value to bRefClkFreq before attempting to
8063                  * switch to HS gears.
8064                  */
8065                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8066                         ufshcd_set_dev_ref_clk(hba);
8067                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8068                 if (ret) {
8069                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8070                                         __func__, ret);
8071                         goto out;
8072                 }
8073                 ufshcd_print_pwr_info(hba);
8074         }
8075
8076         /*
8077          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8078          * and for removable UFS card as well, hence always set the parameter.
8079          * Note: Error handler may issue the device reset hence resetting
8080          * bActiveICCLevel as well so it is always safe to set this here.
8081          */
8082         ufshcd_set_active_icc_lvl(hba);
8083
8084         ufshcd_wb_config(hba);
8085         if (hba->ee_usr_mask)
8086                 ufshcd_write_ee_control(hba);
8087         /* Enable Auto-Hibernate if configured */
8088         ufshcd_auto_hibern8_enable(hba);
8089
8090         ufshpb_reset(hba);
8091 out:
8092         spin_lock_irqsave(hba->host->host_lock, flags);
8093         if (ret)
8094                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8095         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8096                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8097         spin_unlock_irqrestore(hba->host->host_lock, flags);
8098
8099         trace_ufshcd_init(dev_name(hba->dev), ret,
8100                 ktime_to_us(ktime_sub(ktime_get(), start)),
8101                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8102         return ret;
8103 }
8104
8105 /**
8106  * ufshcd_async_scan - asynchronous execution for probing hba
8107  * @data: data pointer to pass to this function
8108  * @cookie: cookie data
8109  */
8110 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8111 {
8112         struct ufs_hba *hba = (struct ufs_hba *)data;
8113         int ret;
8114
8115         down(&hba->host_sem);
8116         /* Initialize hba, detect and initialize UFS device */
8117         ret = ufshcd_probe_hba(hba, true);
8118         up(&hba->host_sem);
8119         if (ret)
8120                 goto out;
8121
8122         /* Probe and add UFS logical units  */
8123         ret = ufshcd_add_lus(hba);
8124 out:
8125         /*
8126          * If we failed to initialize the device or the device is not
8127          * present, turn off the power/clocks etc.
8128          */
8129         if (ret) {
8130                 pm_runtime_put_sync(hba->dev);
8131                 ufshcd_hba_exit(hba);
8132         }
8133 }
8134
8135 static const struct attribute_group *ufshcd_driver_groups[] = {
8136         &ufs_sysfs_unit_descriptor_group,
8137         &ufs_sysfs_lun_attributes_group,
8138 #ifdef CONFIG_SCSI_UFS_HPB
8139         &ufs_sysfs_hpb_stat_group,
8140         &ufs_sysfs_hpb_param_group,
8141 #endif
8142         NULL,
8143 };
8144
8145 static struct ufs_hba_variant_params ufs_hba_vps = {
8146         .hba_enable_delay_us            = 1000,
8147         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
8148         .devfreq_profile.polling_ms     = 100,
8149         .devfreq_profile.target         = ufshcd_devfreq_target,
8150         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8151         .ondemand_data.upthreshold      = 70,
8152         .ondemand_data.downdifferential = 5,
8153 };
8154
8155 static struct scsi_host_template ufshcd_driver_template = {
8156         .module                 = THIS_MODULE,
8157         .name                   = UFSHCD,
8158         .proc_name              = UFSHCD,
8159         .queuecommand           = ufshcd_queuecommand,
8160         .slave_alloc            = ufshcd_slave_alloc,
8161         .slave_configure        = ufshcd_slave_configure,
8162         .slave_destroy          = ufshcd_slave_destroy,
8163         .change_queue_depth     = ufshcd_change_queue_depth,
8164         .eh_abort_handler       = ufshcd_abort,
8165         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8166         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
8167         .this_id                = -1,
8168         .sg_tablesize           = SG_ALL,
8169         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
8170         .can_queue              = UFSHCD_CAN_QUEUE,
8171         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
8172         .max_host_blocked       = 1,
8173         .track_queue_depth      = 1,
8174         .sdev_groups            = ufshcd_driver_groups,
8175         .dma_boundary           = PAGE_SIZE - 1,
8176         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
8177 };
8178
8179 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8180                                    int ua)
8181 {
8182         int ret;
8183
8184         if (!vreg)
8185                 return 0;
8186
8187         /*
8188          * "set_load" operation shall be required on those regulators
8189          * which specifically configured current limitation. Otherwise
8190          * zero max_uA may cause unexpected behavior when regulator is
8191          * enabled or set as high power mode.
8192          */
8193         if (!vreg->max_uA)
8194                 return 0;
8195
8196         ret = regulator_set_load(vreg->reg, ua);
8197         if (ret < 0) {
8198                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8199                                 __func__, vreg->name, ua, ret);
8200         }
8201
8202         return ret;
8203 }
8204
8205 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8206                                          struct ufs_vreg *vreg)
8207 {
8208         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8209 }
8210
8211 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8212                                          struct ufs_vreg *vreg)
8213 {
8214         if (!vreg)
8215                 return 0;
8216
8217         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8218 }
8219
8220 static int ufshcd_config_vreg(struct device *dev,
8221                 struct ufs_vreg *vreg, bool on)
8222 {
8223         int ret = 0;
8224         struct regulator *reg;
8225         const char *name;
8226         int min_uV, uA_load;
8227
8228         BUG_ON(!vreg);
8229
8230         reg = vreg->reg;
8231         name = vreg->name;
8232
8233         if (regulator_count_voltages(reg) > 0) {
8234                 uA_load = on ? vreg->max_uA : 0;
8235                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8236                 if (ret)
8237                         goto out;
8238
8239                 if (vreg->min_uV && vreg->max_uV) {
8240                         min_uV = on ? vreg->min_uV : 0;
8241                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8242                         if (ret)
8243                                 dev_err(dev,
8244                                         "%s: %s set voltage failed, err=%d\n",
8245                                         __func__, name, ret);
8246                 }
8247         }
8248 out:
8249         return ret;
8250 }
8251
8252 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8253 {
8254         int ret = 0;
8255
8256         if (!vreg || vreg->enabled)
8257                 goto out;
8258
8259         ret = ufshcd_config_vreg(dev, vreg, true);
8260         if (!ret)
8261                 ret = regulator_enable(vreg->reg);
8262
8263         if (!ret)
8264                 vreg->enabled = true;
8265         else
8266                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8267                                 __func__, vreg->name, ret);
8268 out:
8269         return ret;
8270 }
8271
8272 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8273 {
8274         int ret = 0;
8275
8276         if (!vreg || !vreg->enabled || vreg->always_on)
8277                 goto out;
8278
8279         ret = regulator_disable(vreg->reg);
8280
8281         if (!ret) {
8282                 /* ignore errors on applying disable config */
8283                 ufshcd_config_vreg(dev, vreg, false);
8284                 vreg->enabled = false;
8285         } else {
8286                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8287                                 __func__, vreg->name, ret);
8288         }
8289 out:
8290         return ret;
8291 }
8292
8293 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8294 {
8295         int ret = 0;
8296         struct device *dev = hba->dev;
8297         struct ufs_vreg_info *info = &hba->vreg_info;
8298
8299         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8300         if (ret)
8301                 goto out;
8302
8303         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8304         if (ret)
8305                 goto out;
8306
8307         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8308
8309 out:
8310         if (ret) {
8311                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8312                 ufshcd_toggle_vreg(dev, info->vccq, false);
8313                 ufshcd_toggle_vreg(dev, info->vcc, false);
8314         }
8315         return ret;
8316 }
8317
8318 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8319 {
8320         struct ufs_vreg_info *info = &hba->vreg_info;
8321
8322         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8323 }
8324
8325 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8326 {
8327         int ret = 0;
8328
8329         if (!vreg)
8330                 goto out;
8331
8332         vreg->reg = devm_regulator_get(dev, vreg->name);
8333         if (IS_ERR(vreg->reg)) {
8334                 ret = PTR_ERR(vreg->reg);
8335                 dev_err(dev, "%s: %s get failed, err=%d\n",
8336                                 __func__, vreg->name, ret);
8337         }
8338 out:
8339         return ret;
8340 }
8341
8342 static int ufshcd_init_vreg(struct ufs_hba *hba)
8343 {
8344         int ret = 0;
8345         struct device *dev = hba->dev;
8346         struct ufs_vreg_info *info = &hba->vreg_info;
8347
8348         ret = ufshcd_get_vreg(dev, info->vcc);
8349         if (ret)
8350                 goto out;
8351
8352         ret = ufshcd_get_vreg(dev, info->vccq);
8353         if (!ret)
8354                 ret = ufshcd_get_vreg(dev, info->vccq2);
8355 out:
8356         return ret;
8357 }
8358
8359 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8360 {
8361         struct ufs_vreg_info *info = &hba->vreg_info;
8362
8363         if (info)
8364                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8365
8366         return 0;
8367 }
8368
8369 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8370 {
8371         int ret = 0;
8372         struct ufs_clk_info *clki;
8373         struct list_head *head = &hba->clk_list_head;
8374         unsigned long flags;
8375         ktime_t start = ktime_get();
8376         bool clk_state_changed = false;
8377
8378         if (list_empty(head))
8379                 goto out;
8380
8381         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8382         if (ret)
8383                 return ret;
8384
8385         list_for_each_entry(clki, head, list) {
8386                 if (!IS_ERR_OR_NULL(clki->clk)) {
8387                         /*
8388                          * Don't disable clocks which are needed
8389                          * to keep the link active.
8390                          */
8391                         if (ufshcd_is_link_active(hba) &&
8392                             clki->keep_link_active)
8393                                 continue;
8394
8395                         clk_state_changed = on ^ clki->enabled;
8396                         if (on && !clki->enabled) {
8397                                 ret = clk_prepare_enable(clki->clk);
8398                                 if (ret) {
8399                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8400                                                 __func__, clki->name, ret);
8401                                         goto out;
8402                                 }
8403                         } else if (!on && clki->enabled) {
8404                                 clk_disable_unprepare(clki->clk);
8405                         }
8406                         clki->enabled = on;
8407                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8408                                         clki->name, on ? "en" : "dis");
8409                 }
8410         }
8411
8412         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8413         if (ret)
8414                 return ret;
8415
8416 out:
8417         if (ret) {
8418                 list_for_each_entry(clki, head, list) {
8419                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8420                                 clk_disable_unprepare(clki->clk);
8421                 }
8422         } else if (!ret && on) {
8423                 spin_lock_irqsave(hba->host->host_lock, flags);
8424                 hba->clk_gating.state = CLKS_ON;
8425                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8426                                         hba->clk_gating.state);
8427                 spin_unlock_irqrestore(hba->host->host_lock, flags);
8428         }
8429
8430         if (clk_state_changed)
8431                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8432                         (on ? "on" : "off"),
8433                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8434         return ret;
8435 }
8436
8437 static int ufshcd_init_clocks(struct ufs_hba *hba)
8438 {
8439         int ret = 0;
8440         struct ufs_clk_info *clki;
8441         struct device *dev = hba->dev;
8442         struct list_head *head = &hba->clk_list_head;
8443
8444         if (list_empty(head))
8445                 goto out;
8446
8447         list_for_each_entry(clki, head, list) {
8448                 if (!clki->name)
8449                         continue;
8450
8451                 clki->clk = devm_clk_get(dev, clki->name);
8452                 if (IS_ERR(clki->clk)) {
8453                         ret = PTR_ERR(clki->clk);
8454                         dev_err(dev, "%s: %s clk get failed, %d\n",
8455                                         __func__, clki->name, ret);
8456                         goto out;
8457                 }
8458
8459                 /*
8460                  * Parse device ref clk freq as per device tree "ref_clk".
8461                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8462                  * in ufshcd_alloc_host().
8463                  */
8464                 if (!strcmp(clki->name, "ref_clk"))
8465                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8466
8467                 if (clki->max_freq) {
8468                         ret = clk_set_rate(clki->clk, clki->max_freq);
8469                         if (ret) {
8470                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8471                                         __func__, clki->name,
8472                                         clki->max_freq, ret);
8473                                 goto out;
8474                         }
8475                         clki->curr_freq = clki->max_freq;
8476                 }
8477                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8478                                 clki->name, clk_get_rate(clki->clk));
8479         }
8480 out:
8481         return ret;
8482 }
8483
8484 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8485 {
8486         int err = 0;
8487
8488         if (!hba->vops)
8489                 goto out;
8490
8491         err = ufshcd_vops_init(hba);
8492         if (err)
8493                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8494                         __func__, ufshcd_get_var_name(hba), err);
8495 out:
8496         return err;
8497 }
8498
8499 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8500 {
8501         if (!hba->vops)
8502                 return;
8503
8504         ufshcd_vops_exit(hba);
8505 }
8506
8507 static int ufshcd_hba_init(struct ufs_hba *hba)
8508 {
8509         int err;
8510
8511         /*
8512          * Handle host controller power separately from the UFS device power
8513          * rails as it will help controlling the UFS host controller power
8514          * collapse easily which is different than UFS device power collapse.
8515          * Also, enable the host controller power before we go ahead with rest
8516          * of the initialization here.
8517          */
8518         err = ufshcd_init_hba_vreg(hba);
8519         if (err)
8520                 goto out;
8521
8522         err = ufshcd_setup_hba_vreg(hba, true);
8523         if (err)
8524                 goto out;
8525
8526         err = ufshcd_init_clocks(hba);
8527         if (err)
8528                 goto out_disable_hba_vreg;
8529
8530         err = ufshcd_setup_clocks(hba, true);
8531         if (err)
8532                 goto out_disable_hba_vreg;
8533
8534         err = ufshcd_init_vreg(hba);
8535         if (err)
8536                 goto out_disable_clks;
8537
8538         err = ufshcd_setup_vreg(hba, true);
8539         if (err)
8540                 goto out_disable_clks;
8541
8542         err = ufshcd_variant_hba_init(hba);
8543         if (err)
8544                 goto out_disable_vreg;
8545
8546         ufs_debugfs_hba_init(hba);
8547
8548         hba->is_powered = true;
8549         goto out;
8550
8551 out_disable_vreg:
8552         ufshcd_setup_vreg(hba, false);
8553 out_disable_clks:
8554         ufshcd_setup_clocks(hba, false);
8555 out_disable_hba_vreg:
8556         ufshcd_setup_hba_vreg(hba, false);
8557 out:
8558         return err;
8559 }
8560
8561 static void ufshcd_hba_exit(struct ufs_hba *hba)
8562 {
8563         if (hba->is_powered) {
8564                 ufshcd_exit_clk_scaling(hba);
8565                 ufshcd_exit_clk_gating(hba);
8566                 if (hba->eh_wq)
8567                         destroy_workqueue(hba->eh_wq);
8568                 ufs_debugfs_hba_exit(hba);
8569                 ufshcd_variant_hba_exit(hba);
8570                 ufshcd_setup_vreg(hba, false);
8571                 ufshcd_setup_clocks(hba, false);
8572                 ufshcd_setup_hba_vreg(hba, false);
8573                 hba->is_powered = false;
8574                 ufs_put_device_desc(hba);
8575         }
8576 }
8577
8578 /**
8579  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8580  *                           power mode
8581  * @hba: per adapter instance
8582  * @pwr_mode: device power mode to set
8583  *
8584  * Returns 0 if requested power mode is set successfully
8585  * Returns non-zero if failed to set the requested power mode
8586  */
8587 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8588                                      enum ufs_dev_pwr_mode pwr_mode)
8589 {
8590         unsigned char cmd[6] = { START_STOP };
8591         struct scsi_sense_hdr sshdr;
8592         struct scsi_device *sdp;
8593         unsigned long flags;
8594         int ret, retries;
8595
8596         spin_lock_irqsave(hba->host->host_lock, flags);
8597         sdp = hba->sdev_ufs_device;
8598         if (sdp) {
8599                 ret = scsi_device_get(sdp);
8600                 if (!ret && !scsi_device_online(sdp)) {
8601                         ret = -ENODEV;
8602                         scsi_device_put(sdp);
8603                 }
8604         } else {
8605                 ret = -ENODEV;
8606         }
8607         spin_unlock_irqrestore(hba->host->host_lock, flags);
8608
8609         if (ret)
8610                 return ret;
8611
8612         /*
8613          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8614          * handling, which would wait for host to be resumed. Since we know
8615          * we are functional while we are here, skip host resume in error
8616          * handling context.
8617          */
8618         hba->host->eh_noresume = 1;
8619
8620         cmd[4] = pwr_mode << 4;
8621
8622         /*
8623          * Current function would be generally called from the power management
8624          * callbacks hence set the RQF_PM flag so that it doesn't resume the
8625          * already suspended childs.
8626          */
8627         for (retries = 3; retries > 0; --retries) {
8628                 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8629                                 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8630                 if (!scsi_status_is_check_condition(ret) ||
8631                                 !scsi_sense_valid(&sshdr) ||
8632                                 sshdr.sense_key != UNIT_ATTENTION)
8633                         break;
8634         }
8635         if (ret) {
8636                 sdev_printk(KERN_WARNING, sdp,
8637                             "START_STOP failed for power mode: %d, result %x\n",
8638                             pwr_mode, ret);
8639                 if (ret > 0 && scsi_sense_valid(&sshdr))
8640                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8641         }
8642
8643         if (!ret)
8644                 hba->curr_dev_pwr_mode = pwr_mode;
8645
8646         scsi_device_put(sdp);
8647         hba->host->eh_noresume = 0;
8648         return ret;
8649 }
8650
8651 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8652                                         enum uic_link_state req_link_state,
8653                                         int check_for_bkops)
8654 {
8655         int ret = 0;
8656
8657         if (req_link_state == hba->uic_link_state)
8658                 return 0;
8659
8660         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8661                 ret = ufshcd_uic_hibern8_enter(hba);
8662                 if (!ret) {
8663                         ufshcd_set_link_hibern8(hba);
8664                 } else {
8665                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8666                                         __func__, ret);
8667                         goto out;
8668                 }
8669         }
8670         /*
8671          * If autobkops is enabled, link can't be turned off because
8672          * turning off the link would also turn off the device, except in the
8673          * case of DeepSleep where the device is expected to remain powered.
8674          */
8675         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8676                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
8677                 /*
8678                  * Let's make sure that link is in low power mode, we are doing
8679                  * this currently by putting the link in Hibern8. Otherway to
8680                  * put the link in low power mode is to send the DME end point
8681                  * to device and then send the DME reset command to local
8682                  * unipro. But putting the link in hibern8 is much faster.
8683                  *
8684                  * Note also that putting the link in Hibern8 is a requirement
8685                  * for entering DeepSleep.
8686                  */
8687                 ret = ufshcd_uic_hibern8_enter(hba);
8688                 if (ret) {
8689                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8690                                         __func__, ret);
8691                         goto out;
8692                 }
8693                 /*
8694                  * Change controller state to "reset state" which
8695                  * should also put the link in off/reset state
8696                  */
8697                 ufshcd_hba_stop(hba);
8698                 /*
8699                  * TODO: Check if we need any delay to make sure that
8700                  * controller is reset
8701                  */
8702                 ufshcd_set_link_off(hba);
8703         }
8704
8705 out:
8706         return ret;
8707 }
8708
8709 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8710 {
8711         bool vcc_off = false;
8712
8713         /*
8714          * It seems some UFS devices may keep drawing more than sleep current
8715          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8716          * To avoid this situation, add 2ms delay before putting these UFS
8717          * rails in LPM mode.
8718          */
8719         if (!ufshcd_is_link_active(hba) &&
8720             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8721                 usleep_range(2000, 2100);
8722
8723         /*
8724          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8725          * power.
8726          *
8727          * If UFS device and link is in OFF state, all power supplies (VCC,
8728          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8729          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8730          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8731          *
8732          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8733          * in low power state which would save some power.
8734          *
8735          * If Write Booster is enabled and the device needs to flush the WB
8736          * buffer OR if bkops status is urgent for WB, keep Vcc on.
8737          */
8738         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8739             !hba->dev_info.is_lu_power_on_wp) {
8740                 ufshcd_setup_vreg(hba, false);
8741                 vcc_off = true;
8742         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8743                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8744                 vcc_off = true;
8745                 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
8746                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8747                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8748                 }
8749         }
8750
8751         /*
8752          * Some UFS devices require delay after VCC power rail is turned-off.
8753          */
8754         if (vcc_off && hba->vreg_info.vcc &&
8755                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8756                 usleep_range(5000, 5100);
8757 }
8758
8759 #ifdef CONFIG_PM
8760 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8761 {
8762         int ret = 0;
8763
8764         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8765             !hba->dev_info.is_lu_power_on_wp) {
8766                 ret = ufshcd_setup_vreg(hba, true);
8767         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8768                 if (!ufshcd_is_link_active(hba)) {
8769                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8770                         if (ret)
8771                                 goto vcc_disable;
8772                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8773                         if (ret)
8774                                 goto vccq_lpm;
8775                 }
8776                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8777         }
8778         goto out;
8779
8780 vccq_lpm:
8781         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8782 vcc_disable:
8783         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8784 out:
8785         return ret;
8786 }
8787 #endif /* CONFIG_PM */
8788
8789 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8790 {
8791         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8792                 ufshcd_setup_hba_vreg(hba, false);
8793 }
8794
8795 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8796 {
8797         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8798                 ufshcd_setup_hba_vreg(hba, true);
8799 }
8800
8801 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8802 {
8803         int ret = 0;
8804         int check_for_bkops;
8805         enum ufs_pm_level pm_lvl;
8806         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8807         enum uic_link_state req_link_state;
8808
8809         hba->pm_op_in_progress = true;
8810         if (pm_op != UFS_SHUTDOWN_PM) {
8811                 pm_lvl = pm_op == UFS_RUNTIME_PM ?
8812                          hba->rpm_lvl : hba->spm_lvl;
8813                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8814                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8815         } else {
8816                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8817                 req_link_state = UIC_LINK_OFF_STATE;
8818         }
8819
8820         ufshpb_suspend(hba);
8821
8822         /*
8823          * If we can't transition into any of the low power modes
8824          * just gate the clocks.
8825          */
8826         ufshcd_hold(hba, false);
8827         hba->clk_gating.is_suspended = true;
8828
8829         if (ufshcd_is_clkscaling_supported(hba))
8830                 ufshcd_clk_scaling_suspend(hba, true);
8831
8832         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8833                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8834                 goto vops_suspend;
8835         }
8836
8837         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8838             (req_link_state == hba->uic_link_state))
8839                 goto enable_scaling;
8840
8841         /* UFS device & link must be active before we enter in this function */
8842         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8843                 ret = -EINVAL;
8844                 goto enable_scaling;
8845         }
8846
8847         if (pm_op == UFS_RUNTIME_PM) {
8848                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8849                         /*
8850                          * The device is idle with no requests in the queue,
8851                          * allow background operations if bkops status shows
8852                          * that performance might be impacted.
8853                          */
8854                         ret = ufshcd_urgent_bkops(hba);
8855                         if (ret)
8856                                 goto enable_scaling;
8857                 } else {
8858                         /* make sure that auto bkops is disabled */
8859                         ufshcd_disable_auto_bkops(hba);
8860                 }
8861                 /*
8862                  * If device needs to do BKOP or WB buffer flush during
8863                  * Hibern8, keep device power mode as "active power mode"
8864                  * and VCC supply.
8865                  */
8866                 hba->dev_info.b_rpm_dev_flush_capable =
8867                         hba->auto_bkops_enabled ||
8868                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8869                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8870                         ufshcd_is_auto_hibern8_enabled(hba))) &&
8871                         ufshcd_wb_need_flush(hba));
8872         }
8873
8874         flush_work(&hba->eeh_work);
8875
8876         ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
8877         if (ret)
8878                 goto enable_scaling;
8879
8880         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8881                 if (pm_op != UFS_RUNTIME_PM)
8882                         /* ensure that bkops is disabled */
8883                         ufshcd_disable_auto_bkops(hba);
8884
8885                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8886                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8887                         if (ret)
8888                                 goto enable_scaling;
8889                 }
8890         }
8891
8892         /*
8893          * In the case of DeepSleep, the device is expected to remain powered
8894          * with the link off, so do not check for bkops.
8895          */
8896         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8897         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
8898         if (ret)
8899                 goto set_dev_active;
8900
8901 vops_suspend:
8902         /*
8903          * Call vendor specific suspend callback. As these callbacks may access
8904          * vendor specific host controller register space call them before the
8905          * host clocks are ON.
8906          */
8907         ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
8908         if (ret)
8909                 goto set_link_active;
8910         goto out;
8911
8912 set_link_active:
8913         /*
8914          * Device hardware reset is required to exit DeepSleep. Also, for
8915          * DeepSleep, the link is off so host reset and restore will be done
8916          * further below.
8917          */
8918         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8919                 ufshcd_device_reset(hba);
8920                 WARN_ON(!ufshcd_is_link_off(hba));
8921         }
8922         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8923                 ufshcd_set_link_active(hba);
8924         else if (ufshcd_is_link_off(hba))
8925                 ufshcd_host_reset_and_restore(hba);
8926 set_dev_active:
8927         /* Can also get here needing to exit DeepSleep */
8928         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8929                 ufshcd_device_reset(hba);
8930                 ufshcd_host_reset_and_restore(hba);
8931         }
8932         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8933                 ufshcd_disable_auto_bkops(hba);
8934 enable_scaling:
8935         if (ufshcd_is_clkscaling_supported(hba))
8936                 ufshcd_clk_scaling_suspend(hba, false);
8937
8938         hba->dev_info.b_rpm_dev_flush_capable = false;
8939 out:
8940         if (hba->dev_info.b_rpm_dev_flush_capable) {
8941                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8942                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8943         }
8944
8945         if (ret) {
8946                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
8947                 hba->clk_gating.is_suspended = false;
8948                 ufshcd_release(hba);
8949                 ufshpb_resume(hba);
8950         }
8951         hba->pm_op_in_progress = false;
8952         return ret;
8953 }
8954
8955 #ifdef CONFIG_PM
8956 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8957 {
8958         int ret;
8959         enum uic_link_state old_link_state = hba->uic_link_state;
8960
8961         hba->pm_op_in_progress = true;
8962
8963         /*
8964          * Call vendor specific resume callback. As these callbacks may access
8965          * vendor specific host controller register space call them when the
8966          * host clocks are ON.
8967          */
8968         ret = ufshcd_vops_resume(hba, pm_op);
8969         if (ret)
8970                 goto out;
8971
8972         /* For DeepSleep, the only supported option is to have the link off */
8973         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
8974
8975         if (ufshcd_is_link_hibern8(hba)) {
8976                 ret = ufshcd_uic_hibern8_exit(hba);
8977                 if (!ret) {
8978                         ufshcd_set_link_active(hba);
8979                 } else {
8980                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8981                                         __func__, ret);
8982                         goto vendor_suspend;
8983                 }
8984         } else if (ufshcd_is_link_off(hba)) {
8985                 /*
8986                  * A full initialization of the host and the device is
8987                  * required since the link was put to off during suspend.
8988                  * Note, in the case of DeepSleep, the device will exit
8989                  * DeepSleep due to device reset.
8990                  */
8991                 ret = ufshcd_reset_and_restore(hba);
8992                 /*
8993                  * ufshcd_reset_and_restore() should have already
8994                  * set the link state as active
8995                  */
8996                 if (ret || !ufshcd_is_link_active(hba))
8997                         goto vendor_suspend;
8998         }
8999
9000         if (!ufshcd_is_ufs_dev_active(hba)) {
9001                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9002                 if (ret)
9003                         goto set_old_link_state;
9004         }
9005
9006         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9007                 ufshcd_enable_auto_bkops(hba);
9008         else
9009                 /*
9010                  * If BKOPs operations are urgently needed at this moment then
9011                  * keep auto-bkops enabled or else disable it.
9012                  */
9013                 ufshcd_urgent_bkops(hba);
9014
9015         if (hba->ee_usr_mask)
9016                 ufshcd_write_ee_control(hba);
9017
9018         if (ufshcd_is_clkscaling_supported(hba))
9019                 ufshcd_clk_scaling_suspend(hba, false);
9020
9021         if (hba->dev_info.b_rpm_dev_flush_capable) {
9022                 hba->dev_info.b_rpm_dev_flush_capable = false;
9023                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9024         }
9025
9026         /* Enable Auto-Hibernate if configured */
9027         ufshcd_auto_hibern8_enable(hba);
9028
9029         ufshpb_resume(hba);
9030         goto out;
9031
9032 set_old_link_state:
9033         ufshcd_link_state_transition(hba, old_link_state, 0);
9034 vendor_suspend:
9035         ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9036         ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9037 out:
9038         if (ret)
9039                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9040         hba->clk_gating.is_suspended = false;
9041         ufshcd_release(hba);
9042         hba->pm_op_in_progress = false;
9043         return ret;
9044 }
9045
9046 static int ufshcd_wl_runtime_suspend(struct device *dev)
9047 {
9048         struct scsi_device *sdev = to_scsi_device(dev);
9049         struct ufs_hba *hba;
9050         int ret;
9051         ktime_t start = ktime_get();
9052
9053         hba = shost_priv(sdev->host);
9054
9055         ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9056         if (ret)
9057                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9058
9059         trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9060                 ktime_to_us(ktime_sub(ktime_get(), start)),
9061                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9062
9063         return ret;
9064 }
9065
9066 static int ufshcd_wl_runtime_resume(struct device *dev)
9067 {
9068         struct scsi_device *sdev = to_scsi_device(dev);
9069         struct ufs_hba *hba;
9070         int ret = 0;
9071         ktime_t start = ktime_get();
9072
9073         hba = shost_priv(sdev->host);
9074
9075         ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9076         if (ret)
9077                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9078
9079         trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9080                 ktime_to_us(ktime_sub(ktime_get(), start)),
9081                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9082
9083         return ret;
9084 }
9085 #endif
9086
9087 #ifdef CONFIG_PM_SLEEP
9088 static int ufshcd_wl_suspend(struct device *dev)
9089 {
9090         struct scsi_device *sdev = to_scsi_device(dev);
9091         struct ufs_hba *hba;
9092         int ret = 0;
9093         ktime_t start = ktime_get();
9094
9095         hba = shost_priv(sdev->host);
9096         down(&hba->host_sem);
9097
9098         if (pm_runtime_suspended(dev))
9099                 goto out;
9100
9101         ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9102         if (ret) {
9103                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__,  ret);
9104                 up(&hba->host_sem);
9105         }
9106
9107 out:
9108         if (!ret)
9109                 hba->is_sys_suspended = true;
9110         trace_ufshcd_wl_suspend(dev_name(dev), ret,
9111                 ktime_to_us(ktime_sub(ktime_get(), start)),
9112                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9113
9114         return ret;
9115 }
9116
9117 static int ufshcd_wl_resume(struct device *dev)
9118 {
9119         struct scsi_device *sdev = to_scsi_device(dev);
9120         struct ufs_hba *hba;
9121         int ret = 0;
9122         ktime_t start = ktime_get();
9123
9124         hba = shost_priv(sdev->host);
9125
9126         if (pm_runtime_suspended(dev))
9127                 goto out;
9128
9129         ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9130         if (ret)
9131                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9132 out:
9133         trace_ufshcd_wl_resume(dev_name(dev), ret,
9134                 ktime_to_us(ktime_sub(ktime_get(), start)),
9135                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9136         if (!ret)
9137                 hba->is_sys_suspended = false;
9138         up(&hba->host_sem);
9139         return ret;
9140 }
9141 #endif
9142
9143 static void ufshcd_wl_shutdown(struct device *dev)
9144 {
9145         struct scsi_device *sdev = to_scsi_device(dev);
9146         struct ufs_hba *hba;
9147
9148         hba = shost_priv(sdev->host);
9149
9150         down(&hba->host_sem);
9151         hba->shutting_down = true;
9152         up(&hba->host_sem);
9153
9154         /* Turn on everything while shutting down */
9155         ufshcd_rpm_get_sync(hba);
9156         scsi_device_quiesce(sdev);
9157         shost_for_each_device(sdev, hba->host) {
9158                 if (sdev == hba->sdev_ufs_device)
9159                         continue;
9160                 scsi_device_quiesce(sdev);
9161         }
9162         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9163 }
9164
9165 /**
9166  * ufshcd_suspend - helper function for suspend operations
9167  * @hba: per adapter instance
9168  *
9169  * This function will put disable irqs, turn off clocks
9170  * and set vreg and hba-vreg in lpm mode.
9171  */
9172 static int ufshcd_suspend(struct ufs_hba *hba)
9173 {
9174         int ret;
9175
9176         if (!hba->is_powered)
9177                 return 0;
9178         /*
9179          * Disable the host irq as host controller as there won't be any
9180          * host controller transaction expected till resume.
9181          */
9182         ufshcd_disable_irq(hba);
9183         ret = ufshcd_setup_clocks(hba, false);
9184         if (ret) {
9185                 ufshcd_enable_irq(hba);
9186                 return ret;
9187         }
9188         if (ufshcd_is_clkgating_allowed(hba)) {
9189                 hba->clk_gating.state = CLKS_OFF;
9190                 trace_ufshcd_clk_gating(dev_name(hba->dev),
9191                                         hba->clk_gating.state);
9192         }
9193
9194         ufshcd_vreg_set_lpm(hba);
9195         /* Put the host controller in low power mode if possible */
9196         ufshcd_hba_vreg_set_lpm(hba);
9197         return ret;
9198 }
9199
9200 #ifdef CONFIG_PM
9201 /**
9202  * ufshcd_resume - helper function for resume operations
9203  * @hba: per adapter instance
9204  *
9205  * This function basically turns on the regulators, clocks and
9206  * irqs of the hba.
9207  *
9208  * Returns 0 for success and non-zero for failure
9209  */
9210 static int ufshcd_resume(struct ufs_hba *hba)
9211 {
9212         int ret;
9213
9214         if (!hba->is_powered)
9215                 return 0;
9216
9217         ufshcd_hba_vreg_set_hpm(hba);
9218         ret = ufshcd_vreg_set_hpm(hba);
9219         if (ret)
9220                 goto out;
9221
9222         /* Make sure clocks are enabled before accessing controller */
9223         ret = ufshcd_setup_clocks(hba, true);
9224         if (ret)
9225                 goto disable_vreg;
9226
9227         /* enable the host irq as host controller would be active soon */
9228         ufshcd_enable_irq(hba);
9229         goto out;
9230
9231 disable_vreg:
9232         ufshcd_vreg_set_lpm(hba);
9233 out:
9234         if (ret)
9235                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
9236         return ret;
9237 }
9238 #endif /* CONFIG_PM */
9239
9240 #ifdef CONFIG_PM_SLEEP
9241 /**
9242  * ufshcd_system_suspend - system suspend callback
9243  * @dev: Device associated with the UFS controller.
9244  *
9245  * Executed before putting the system into a sleep state in which the contents
9246  * of main memory are preserved.
9247  *
9248  * Returns 0 for success and non-zero for failure
9249  */
9250 int ufshcd_system_suspend(struct device *dev)
9251 {
9252         struct ufs_hba *hba = dev_get_drvdata(dev);
9253         int ret = 0;
9254         ktime_t start = ktime_get();
9255
9256         if (pm_runtime_suspended(hba->dev))
9257                 goto out;
9258
9259         ret = ufshcd_suspend(hba);
9260 out:
9261         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9262                 ktime_to_us(ktime_sub(ktime_get(), start)),
9263                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9264         return ret;
9265 }
9266 EXPORT_SYMBOL(ufshcd_system_suspend);
9267
9268 /**
9269  * ufshcd_system_resume - system resume callback
9270  * @dev: Device associated with the UFS controller.
9271  *
9272  * Executed after waking the system up from a sleep state in which the contents
9273  * of main memory were preserved.
9274  *
9275  * Returns 0 for success and non-zero for failure
9276  */
9277 int ufshcd_system_resume(struct device *dev)
9278 {
9279         struct ufs_hba *hba = dev_get_drvdata(dev);
9280         ktime_t start = ktime_get();
9281         int ret = 0;
9282
9283         if (pm_runtime_suspended(hba->dev))
9284                 goto out;
9285
9286         ret = ufshcd_resume(hba);
9287
9288 out:
9289         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9290                 ktime_to_us(ktime_sub(ktime_get(), start)),
9291                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9292
9293         return ret;
9294 }
9295 EXPORT_SYMBOL(ufshcd_system_resume);
9296 #endif /* CONFIG_PM_SLEEP */
9297
9298 #ifdef CONFIG_PM
9299 /**
9300  * ufshcd_runtime_suspend - runtime suspend callback
9301  * @dev: Device associated with the UFS controller.
9302  *
9303  * Check the description of ufshcd_suspend() function for more details.
9304  *
9305  * Returns 0 for success and non-zero for failure
9306  */
9307 int ufshcd_runtime_suspend(struct device *dev)
9308 {
9309         struct ufs_hba *hba = dev_get_drvdata(dev);
9310         int ret;
9311         ktime_t start = ktime_get();
9312
9313         ret = ufshcd_suspend(hba);
9314
9315         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9316                 ktime_to_us(ktime_sub(ktime_get(), start)),
9317                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9318         return ret;
9319 }
9320 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9321
9322 /**
9323  * ufshcd_runtime_resume - runtime resume routine
9324  * @dev: Device associated with the UFS controller.
9325  *
9326  * This function basically brings controller
9327  * to active state. Following operations are done in this function:
9328  *
9329  * 1. Turn on all the controller related clocks
9330  * 2. Turn ON VCC rail
9331  */
9332 int ufshcd_runtime_resume(struct device *dev)
9333 {
9334         struct ufs_hba *hba = dev_get_drvdata(dev);
9335         int ret;
9336         ktime_t start = ktime_get();
9337
9338         ret = ufshcd_resume(hba);
9339
9340         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9341                 ktime_to_us(ktime_sub(ktime_get(), start)),
9342                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9343         return ret;
9344 }
9345 EXPORT_SYMBOL(ufshcd_runtime_resume);
9346 #endif /* CONFIG_PM */
9347
9348 /**
9349  * ufshcd_shutdown - shutdown routine
9350  * @hba: per adapter instance
9351  *
9352  * This function would turn off both UFS device and UFS hba
9353  * regulators. It would also disable clocks.
9354  *
9355  * Returns 0 always to allow force shutdown even in case of errors.
9356  */
9357 int ufshcd_shutdown(struct ufs_hba *hba)
9358 {
9359         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9360                 goto out;
9361
9362         pm_runtime_get_sync(hba->dev);
9363
9364         ufshcd_suspend(hba);
9365 out:
9366         hba->is_powered = false;
9367         /* allow force shutdown even in case of errors */
9368         return 0;
9369 }
9370 EXPORT_SYMBOL(ufshcd_shutdown);
9371
9372 /**
9373  * ufshcd_remove - de-allocate SCSI host and host memory space
9374  *              data structure memory
9375  * @hba: per adapter instance
9376  */
9377 void ufshcd_remove(struct ufs_hba *hba)
9378 {
9379         if (hba->sdev_ufs_device)
9380                 ufshcd_rpm_get_sync(hba);
9381         ufs_hwmon_remove(hba);
9382         ufs_bsg_remove(hba);
9383         ufshpb_remove(hba);
9384         ufs_sysfs_remove_nodes(hba->dev);
9385         blk_cleanup_queue(hba->tmf_queue);
9386         blk_mq_free_tag_set(&hba->tmf_tag_set);
9387         blk_cleanup_queue(hba->cmd_queue);
9388         scsi_remove_host(hba->host);
9389         /* disable interrupts */
9390         ufshcd_disable_intr(hba, hba->intr_mask);
9391         ufshcd_hba_stop(hba);
9392         ufshcd_hba_exit(hba);
9393 }
9394 EXPORT_SYMBOL_GPL(ufshcd_remove);
9395
9396 /**
9397  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9398  * @hba: pointer to Host Bus Adapter (HBA)
9399  */
9400 void ufshcd_dealloc_host(struct ufs_hba *hba)
9401 {
9402         scsi_host_put(hba->host);
9403 }
9404 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9405
9406 /**
9407  * ufshcd_set_dma_mask - Set dma mask based on the controller
9408  *                       addressing capability
9409  * @hba: per adapter instance
9410  *
9411  * Returns 0 for success, non-zero for failure
9412  */
9413 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9414 {
9415         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9416                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9417                         return 0;
9418         }
9419         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9420 }
9421
9422 /**
9423  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9424  * @dev: pointer to device handle
9425  * @hba_handle: driver private handle
9426  * Returns 0 on success, non-zero value on failure
9427  */
9428 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9429 {
9430         struct Scsi_Host *host;
9431         struct ufs_hba *hba;
9432         int err = 0;
9433
9434         if (!dev) {
9435                 dev_err(dev,
9436                 "Invalid memory reference for dev is NULL\n");
9437                 err = -ENODEV;
9438                 goto out_error;
9439         }
9440
9441         host = scsi_host_alloc(&ufshcd_driver_template,
9442                                 sizeof(struct ufs_hba));
9443         if (!host) {
9444                 dev_err(dev, "scsi_host_alloc failed\n");
9445                 err = -ENOMEM;
9446                 goto out_error;
9447         }
9448         hba = shost_priv(host);
9449         hba->host = host;
9450         hba->dev = dev;
9451         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9452         hba->nop_out_timeout = NOP_OUT_TIMEOUT;
9453         INIT_LIST_HEAD(&hba->clk_list_head);
9454         spin_lock_init(&hba->outstanding_lock);
9455
9456         *hba_handle = hba;
9457
9458 out_error:
9459         return err;
9460 }
9461 EXPORT_SYMBOL(ufshcd_alloc_host);
9462
9463 /* This function exists because blk_mq_alloc_tag_set() requires this. */
9464 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9465                                      const struct blk_mq_queue_data *qd)
9466 {
9467         WARN_ON_ONCE(true);
9468         return BLK_STS_NOTSUPP;
9469 }
9470
9471 static const struct blk_mq_ops ufshcd_tmf_ops = {
9472         .queue_rq = ufshcd_queue_tmf,
9473 };
9474
9475 /**
9476  * ufshcd_init - Driver initialization routine
9477  * @hba: per-adapter instance
9478  * @mmio_base: base register address
9479  * @irq: Interrupt line of device
9480  * Returns 0 on success, non-zero value on failure
9481  */
9482 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9483 {
9484         int err;
9485         struct Scsi_Host *host = hba->host;
9486         struct device *dev = hba->dev;
9487         char eh_wq_name[sizeof("ufs_eh_wq_00")];
9488
9489         if (!mmio_base) {
9490                 dev_err(hba->dev,
9491                 "Invalid memory reference for mmio_base is NULL\n");
9492                 err = -ENODEV;
9493                 goto out_error;
9494         }
9495
9496         hba->mmio_base = mmio_base;
9497         hba->irq = irq;
9498         hba->vps = &ufs_hba_vps;
9499
9500         err = ufshcd_hba_init(hba);
9501         if (err)
9502                 goto out_error;
9503
9504         /* Read capabilities registers */
9505         err = ufshcd_hba_capabilities(hba);
9506         if (err)
9507                 goto out_disable;
9508
9509         /* Get UFS version supported by the controller */
9510         hba->ufs_version = ufshcd_get_ufs_version(hba);
9511
9512         /* Get Interrupt bit mask per version */
9513         hba->intr_mask = ufshcd_get_intr_mask(hba);
9514
9515         err = ufshcd_set_dma_mask(hba);
9516         if (err) {
9517                 dev_err(hba->dev, "set dma mask failed\n");
9518                 goto out_disable;
9519         }
9520
9521         /* Allocate memory for host memory space */
9522         err = ufshcd_memory_alloc(hba);
9523         if (err) {
9524                 dev_err(hba->dev, "Memory allocation failed\n");
9525                 goto out_disable;
9526         }
9527
9528         /* Configure LRB */
9529         ufshcd_host_memory_configure(hba);
9530
9531         host->can_queue = hba->nutrs;
9532         host->cmd_per_lun = hba->nutrs;
9533         host->max_id = UFSHCD_MAX_ID;
9534         host->max_lun = UFS_MAX_LUNS;
9535         host->max_channel = UFSHCD_MAX_CHANNEL;
9536         host->unique_id = host->host_no;
9537         host->max_cmd_len = UFS_CDB_SIZE;
9538
9539         hba->max_pwr_info.is_valid = false;
9540
9541         /* Initialize work queues */
9542         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9543                  hba->host->host_no);
9544         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9545         if (!hba->eh_wq) {
9546                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9547                         __func__);
9548                 err = -ENOMEM;
9549                 goto out_disable;
9550         }
9551         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9552         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9553
9554         sema_init(&hba->host_sem, 1);
9555
9556         /* Initialize UIC command mutex */
9557         mutex_init(&hba->uic_cmd_mutex);
9558
9559         /* Initialize mutex for device management commands */
9560         mutex_init(&hba->dev_cmd.lock);
9561
9562         /* Initialize mutex for exception event control */
9563         mutex_init(&hba->ee_ctrl_mutex);
9564
9565         init_rwsem(&hba->clk_scaling_lock);
9566
9567         ufshcd_init_clk_gating(hba);
9568
9569         ufshcd_init_clk_scaling(hba);
9570
9571         /*
9572          * In order to avoid any spurious interrupt immediately after
9573          * registering UFS controller interrupt handler, clear any pending UFS
9574          * interrupt status and disable all the UFS interrupts.
9575          */
9576         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9577                       REG_INTERRUPT_STATUS);
9578         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9579         /*
9580          * Make sure that UFS interrupts are disabled and any pending interrupt
9581          * status is cleared before registering UFS interrupt handler.
9582          */
9583         mb();
9584
9585         /* IRQ registration */
9586         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9587         if (err) {
9588                 dev_err(hba->dev, "request irq failed\n");
9589                 goto out_disable;
9590         } else {
9591                 hba->is_irq_enabled = true;
9592         }
9593
9594         err = scsi_add_host(host, hba->dev);
9595         if (err) {
9596                 dev_err(hba->dev, "scsi_add_host failed\n");
9597                 goto out_disable;
9598         }
9599
9600         hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9601         if (IS_ERR(hba->cmd_queue)) {
9602                 err = PTR_ERR(hba->cmd_queue);
9603                 goto out_remove_scsi_host;
9604         }
9605
9606         hba->tmf_tag_set = (struct blk_mq_tag_set) {
9607                 .nr_hw_queues   = 1,
9608                 .queue_depth    = hba->nutmrs,
9609                 .ops            = &ufshcd_tmf_ops,
9610                 .flags          = BLK_MQ_F_NO_SCHED,
9611         };
9612         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9613         if (err < 0)
9614                 goto free_cmd_queue;
9615         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9616         if (IS_ERR(hba->tmf_queue)) {
9617                 err = PTR_ERR(hba->tmf_queue);
9618                 goto free_tmf_tag_set;
9619         }
9620         hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
9621                                     sizeof(*hba->tmf_rqs), GFP_KERNEL);
9622         if (!hba->tmf_rqs) {
9623                 err = -ENOMEM;
9624                 goto free_tmf_queue;
9625         }
9626
9627         /* Reset the attached device */
9628         ufshcd_device_reset(hba);
9629
9630         ufshcd_init_crypto(hba);
9631
9632         /* Host controller enable */
9633         err = ufshcd_hba_enable(hba);
9634         if (err) {
9635                 dev_err(hba->dev, "Host controller enable failed\n");
9636                 ufshcd_print_evt_hist(hba);
9637                 ufshcd_print_host_state(hba);
9638                 goto free_tmf_queue;
9639         }
9640
9641         /*
9642          * Set the default power management level for runtime and system PM.
9643          * Default power saving mode is to keep UFS link in Hibern8 state
9644          * and UFS device in sleep state.
9645          */
9646         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9647                                                 UFS_SLEEP_PWR_MODE,
9648                                                 UIC_LINK_HIBERN8_STATE);
9649         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9650                                                 UFS_SLEEP_PWR_MODE,
9651                                                 UIC_LINK_HIBERN8_STATE);
9652
9653         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9654                           ufshcd_rpm_dev_flush_recheck_work);
9655
9656         /* Set the default auto-hiberate idle timer value to 150 ms */
9657         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9658                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9659                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9660         }
9661
9662         /* Hold auto suspend until async scan completes */
9663         pm_runtime_get_sync(dev);
9664         atomic_set(&hba->scsi_block_reqs_cnt, 0);
9665         /*
9666          * We are assuming that device wasn't put in sleep/power-down
9667          * state exclusively during the boot stage before kernel.
9668          * This assumption helps avoid doing link startup twice during
9669          * ufshcd_probe_hba().
9670          */
9671         ufshcd_set_ufs_dev_active(hba);
9672
9673         async_schedule(ufshcd_async_scan, hba);
9674         ufs_sysfs_add_nodes(hba->dev);
9675
9676         device_enable_async_suspend(dev);
9677         return 0;
9678
9679 free_tmf_queue:
9680         blk_cleanup_queue(hba->tmf_queue);
9681 free_tmf_tag_set:
9682         blk_mq_free_tag_set(&hba->tmf_tag_set);
9683 free_cmd_queue:
9684         blk_cleanup_queue(hba->cmd_queue);
9685 out_remove_scsi_host:
9686         scsi_remove_host(hba->host);
9687 out_disable:
9688         hba->is_irq_enabled = false;
9689         ufshcd_hba_exit(hba);
9690 out_error:
9691         return err;
9692 }
9693 EXPORT_SYMBOL_GPL(ufshcd_init);
9694
9695 void ufshcd_resume_complete(struct device *dev)
9696 {
9697         struct ufs_hba *hba = dev_get_drvdata(dev);
9698
9699         if (hba->complete_put) {
9700                 ufshcd_rpm_put(hba);
9701                 hba->complete_put = false;
9702         }
9703 }
9704 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
9705
9706 int ufshcd_suspend_prepare(struct device *dev)
9707 {
9708         struct ufs_hba *hba = dev_get_drvdata(dev);
9709         int ret;
9710
9711         /*
9712          * SCSI assumes that runtime-pm and system-pm for scsi drivers
9713          * are same. And it doesn't wake up the device for system-suspend
9714          * if it's runtime suspended. But ufs doesn't follow that.
9715          * Refer ufshcd_resume_complete()
9716          */
9717         if (hba->sdev_ufs_device) {
9718                 ret = ufshcd_rpm_get_sync(hba);
9719                 if (ret < 0 && ret != -EACCES) {
9720                         ufshcd_rpm_put(hba);
9721                         return ret;
9722                 }
9723                 hba->complete_put = true;
9724         }
9725         return 0;
9726 }
9727 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
9728
9729 #ifdef CONFIG_PM_SLEEP
9730 static int ufshcd_wl_poweroff(struct device *dev)
9731 {
9732         struct scsi_device *sdev = to_scsi_device(dev);
9733         struct ufs_hba *hba = shost_priv(sdev->host);
9734
9735         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9736         return 0;
9737 }
9738 #endif
9739
9740 static int ufshcd_wl_probe(struct device *dev)
9741 {
9742         struct scsi_device *sdev = to_scsi_device(dev);
9743
9744         if (!is_device_wlun(sdev))
9745                 return -ENODEV;
9746
9747         blk_pm_runtime_init(sdev->request_queue, dev);
9748         pm_runtime_set_autosuspend_delay(dev, 0);
9749         pm_runtime_allow(dev);
9750
9751         return  0;
9752 }
9753
9754 static int ufshcd_wl_remove(struct device *dev)
9755 {
9756         pm_runtime_forbid(dev);
9757         return 0;
9758 }
9759
9760 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
9761 #ifdef CONFIG_PM_SLEEP
9762         .suspend = ufshcd_wl_suspend,
9763         .resume = ufshcd_wl_resume,
9764         .freeze = ufshcd_wl_suspend,
9765         .thaw = ufshcd_wl_resume,
9766         .poweroff = ufshcd_wl_poweroff,
9767         .restore = ufshcd_wl_resume,
9768 #endif
9769         SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
9770 };
9771
9772 /*
9773  * ufs_dev_wlun_template - describes ufs device wlun
9774  * ufs-device wlun - used to send pm commands
9775  * All luns are consumers of ufs-device wlun.
9776  *
9777  * Currently, no sd driver is present for wluns.
9778  * Hence the no specific pm operations are performed.
9779  * With ufs design, SSU should be sent to ufs-device wlun.
9780  * Hence register a scsi driver for ufs wluns only.
9781  */
9782 static struct scsi_driver ufs_dev_wlun_template = {
9783         .gendrv = {
9784                 .name = "ufs_device_wlun",
9785                 .owner = THIS_MODULE,
9786                 .probe = ufshcd_wl_probe,
9787                 .remove = ufshcd_wl_remove,
9788                 .pm = &ufshcd_wl_pm_ops,
9789                 .shutdown = ufshcd_wl_shutdown,
9790         },
9791 };
9792
9793 static int __init ufshcd_core_init(void)
9794 {
9795         int ret;
9796
9797         /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
9798         static_assert(sizeof(struct utp_transfer_cmd_desc) ==
9799                       2 * ALIGNED_UPIU_SIZE +
9800                               SG_ALL * sizeof(struct ufshcd_sg_entry));
9801
9802         ufs_debugfs_init();
9803
9804         ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
9805         if (ret)
9806                 ufs_debugfs_exit();
9807         return ret;
9808 }
9809
9810 static void __exit ufshcd_core_exit(void)
9811 {
9812         ufs_debugfs_exit();
9813         scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
9814 }
9815
9816 module_init(ufshcd_core_init);
9817 module_exit(ufshcd_core_exit);
9818
9819 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9820 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9821 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9822 MODULE_LICENSE("GPL");
9823 MODULE_VERSION(UFSHCD_DRIVER_VERSION);