Merge tag 'x86_build_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13
14 #include "ufshcd.h"
15 #include "ufshpb.h"
16 #include "../sd.h"
17
18 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
19 #define READ_TO_MS 1000
20 #define READ_TO_EXPIRIES 100
21 #define POLLING_INTERVAL_MS 200
22 #define THROTTLE_MAP_REQ_DEFAULT 1
23
24 /* memory management */
25 static struct kmem_cache *ufshpb_mctx_cache;
26 static mempool_t *ufshpb_mctx_pool;
27 static mempool_t *ufshpb_page_pool;
28 /* A cache size of 2MB can cache ppn in the 1GB range. */
29 static unsigned int ufshpb_host_map_kbytes = 2048;
30 static int tot_active_srgn_pages;
31
32 static struct workqueue_struct *ufshpb_wq;
33
34 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
35                                       int srgn_idx);
36
37 bool ufshpb_is_allowed(struct ufs_hba *hba)
38 {
39         return !(hba->ufshpb_dev.hpb_disabled);
40 }
41
42 /* HPB version 1.0 is called as legacy version. */
43 bool ufshpb_is_legacy(struct ufs_hba *hba)
44 {
45         return hba->ufshpb_dev.is_legacy;
46 }
47
48 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
49 {
50         return sdev->hostdata;
51 }
52
53 static int ufshpb_get_state(struct ufshpb_lu *hpb)
54 {
55         return atomic_read(&hpb->hpb_state);
56 }
57
58 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
59 {
60         atomic_set(&hpb->hpb_state, state);
61 }
62
63 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
64                                 struct ufshpb_subregion *srgn)
65 {
66         return rgn->rgn_state != HPB_RGN_INACTIVE &&
67                 srgn->srgn_state == HPB_SRGN_VALID;
68 }
69
70 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
71 {
72         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
73 }
74
75 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
76 {
77         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
78                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
79 }
80
81 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
82 {
83         return transfer_len <= hpb->pre_req_max_tr_len;
84 }
85
86 static bool ufshpb_is_general_lun(int lun)
87 {
88         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
89 }
90
91 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
92 {
93         if (hpb->lu_pinned_end != PINNED_NOT_SET &&
94             rgn_idx >= hpb->lu_pinned_start &&
95             rgn_idx <= hpb->lu_pinned_end)
96                 return true;
97
98         return false;
99 }
100
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
102 {
103         bool ret = false;
104         unsigned long flags;
105
106         if (ufshpb_get_state(hpb) != HPB_PRESENT)
107                 return;
108
109         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
111                 ret = true;
112         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
113
114         if (ret)
115                 queue_work(ufshpb_wq, &hpb->map_work);
116 }
117
118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119                                     struct ufshcd_lrb *lrbp,
120                                     struct utp_hpb_rsp *rsp_field)
121 {
122         /* Check HPB_UPDATE_ALERT */
123         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124               UPIU_HEADER_DWORD(0, 2, 0, 0)))
125                 return false;
126
127         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128             rsp_field->desc_type != DEV_DES_TYPE ||
129             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132             rsp_field->hpb_op == HPB_RSP_NONE ||
133             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
135                 return false;
136
137         if (!ufshpb_is_general_lun(rsp_field->lun)) {
138                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
139                          lrbp->lun);
140                 return false;
141         }
142
143         return true;
144 }
145
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147                                int srgn_offset, int cnt, bool set_dirty)
148 {
149         struct ufshpb_region *rgn;
150         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
151         int set_bit_len;
152         int bitmap_len;
153         unsigned long flags;
154
155 next_srgn:
156         rgn = hpb->rgn_tbl + rgn_idx;
157         srgn = rgn->srgn_tbl + srgn_idx;
158
159         if (likely(!srgn->is_last))
160                 bitmap_len = hpb->entries_per_srgn;
161         else
162                 bitmap_len = hpb->last_srgn_entries;
163
164         if ((srgn_offset + cnt) > bitmap_len)
165                 set_bit_len = bitmap_len - srgn_offset;
166         else
167                 set_bit_len = cnt;
168
169         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
171                 if (set_dirty) {
172                         if (srgn->srgn_state == HPB_SRGN_VALID)
173                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
174                                            set_bit_len);
175                 } else if (hpb->is_hcm) {
176                          /* rewind the read timer for lru regions */
177                         rgn->read_timeout = ktime_add_ms(ktime_get(),
178                                         rgn->hpb->params.read_timeout_ms);
179                         rgn->read_timeout_expiries =
180                                 rgn->hpb->params.read_timeout_expiries;
181                 }
182         }
183         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
184
185         if (hpb->is_hcm && prev_srgn != srgn) {
186                 bool activate = false;
187
188                 spin_lock(&rgn->rgn_lock);
189                 if (set_dirty) {
190                         rgn->reads -= srgn->reads;
191                         srgn->reads = 0;
192                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
193                 } else {
194                         srgn->reads++;
195                         rgn->reads++;
196                         if (srgn->reads == hpb->params.activation_thld)
197                                 activate = true;
198                 }
199                 spin_unlock(&rgn->rgn_lock);
200
201                 if (activate ||
202                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
208                 }
209
210                 prev_srgn = srgn;
211         }
212
213         srgn_offset = 0;
214         if (++srgn_idx == hpb->srgns_per_rgn) {
215                 srgn_idx = 0;
216                 rgn_idx++;
217         }
218
219         cnt -= set_bit_len;
220         if (cnt > 0)
221                 goto next_srgn;
222 }
223
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225                                   int srgn_idx, int srgn_offset, int cnt)
226 {
227         struct ufshpb_region *rgn;
228         struct ufshpb_subregion *srgn;
229         int bitmap_len;
230         int bit_len;
231
232 next_srgn:
233         rgn = hpb->rgn_tbl + rgn_idx;
234         srgn = rgn->srgn_tbl + srgn_idx;
235
236         if (likely(!srgn->is_last))
237                 bitmap_len = hpb->entries_per_srgn;
238         else
239                 bitmap_len = hpb->last_srgn_entries;
240
241         if (!ufshpb_is_valid_srgn(rgn, srgn))
242                 return true;
243
244         /*
245          * If the region state is active, mctx must be allocated.
246          * In this case, check whether the region is evicted or
247          * mctx allocation fail.
248          */
249         if (unlikely(!srgn->mctx)) {
250                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251                         "no mctx in region %d subregion %d.\n",
252                         srgn->rgn_idx, srgn->srgn_idx);
253                 return true;
254         }
255
256         if ((srgn_offset + cnt) > bitmap_len)
257                 bit_len = bitmap_len - srgn_offset;
258         else
259                 bit_len = cnt;
260
261         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262                           srgn_offset) < bit_len + srgn_offset)
263                 return true;
264
265         srgn_offset = 0;
266         if (++srgn_idx == hpb->srgns_per_rgn) {
267                 srgn_idx = 0;
268                 rgn_idx++;
269         }
270
271         cnt -= bit_len;
272         if (cnt > 0)
273                 goto next_srgn;
274
275         return false;
276 }
277
278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
279 {
280         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
281 }
282
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284                                      struct ufshpb_map_ctx *mctx, int pos,
285                                      int len, __be64 *ppn_buf)
286 {
287         struct page *page;
288         int index, offset;
289         int copied;
290
291         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
293
294         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
295                 copied = len;
296         else
297                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
298
299         page = mctx->m_page[index];
300         if (unlikely(!page)) {
301                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302                         "error. cannot find page in mctx\n");
303                 return -ENOMEM;
304         }
305
306         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307                copied * HPB_ENTRY_SIZE);
308
309         return copied;
310 }
311
312 static void
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314                         int *srgn_idx, int *offset)
315 {
316         int rgn_offset;
317
318         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319         rgn_offset = lpn & hpb->entries_per_rgn_mask;
320         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321         *offset = rgn_offset & hpb->entries_per_srgn_mask;
322 }
323
324 static void
325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326                             __be64 ppn, u8 transfer_len)
327 {
328         unsigned char *cdb = lrbp->cmd->cmnd;
329         __be64 ppn_tmp = ppn;
330         cdb[0] = UFSHPB_READ;
331
332         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333                 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
334
335         /* ppn value is stored as big-endian in the host memory */
336         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337         cdb[14] = transfer_len;
338         cdb[15] = 0;
339
340         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
341 }
342
343 /*
344  * This function will set up HPB read command using host-side L2P map data.
345  */
346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
347 {
348         struct ufshpb_lu *hpb;
349         struct ufshpb_region *rgn;
350         struct ufshpb_subregion *srgn;
351         struct scsi_cmnd *cmd = lrbp->cmd;
352         u32 lpn;
353         __be64 ppn;
354         unsigned long flags;
355         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
356         int err = 0;
357
358         hpb = ufshpb_get_hpb_data(cmd->device);
359         if (!hpb)
360                 return -ENODEV;
361
362         if (ufshpb_get_state(hpb) == HPB_INIT)
363                 return -ENODEV;
364
365         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367                            "%s: ufshpb state is not PRESENT", __func__);
368                 return -ENODEV;
369         }
370
371         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372             (!ufshpb_is_write_or_discard(cmd) &&
373              !ufshpb_is_read_cmd(cmd)))
374                 return 0;
375
376         transfer_len = sectors_to_logical(cmd->device,
377                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378         if (unlikely(!transfer_len))
379                 return 0;
380
381         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383         rgn = hpb->rgn_tbl + rgn_idx;
384         srgn = rgn->srgn_tbl + srgn_idx;
385
386         /* If command type is WRITE or DISCARD, set bitmap as drity */
387         if (ufshpb_is_write_or_discard(cmd)) {
388                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
389                                    transfer_len, true);
390                 return 0;
391         }
392
393         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
394                 return 0;
395
396         if (hpb->is_hcm) {
397                 /*
398                  * in host control mode, reads are the main source for
399                  * activation trials.
400                  */
401                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402                                    transfer_len, false);
403
404                 /* keep those counters normalized */
405                 if (rgn->reads > hpb->entries_per_srgn)
406                         schedule_work(&hpb->ufshpb_normalization_work);
407         }
408
409         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
411                                    transfer_len)) {
412                 hpb->stats.miss_cnt++;
413                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
414                 return 0;
415         }
416
417         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419         if (unlikely(err < 0)) {
420                 /*
421                  * In this case, the region state is active,
422                  * but the ppn table is not allocated.
423                  * Make sure that ppn table must be allocated on
424                  * active state.
425                  */
426                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
427                 return err;
428         }
429
430         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
431
432         hpb->stats.hit_cnt++;
433         return 0;
434 }
435
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
437                                          int rgn_idx, enum req_opf dir,
438                                          bool atomic)
439 {
440         struct ufshpb_req *rq;
441         struct request *req;
442         int retries = HPB_MAP_REQ_RETRIES;
443
444         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
445         if (!rq)
446                 return NULL;
447
448 retry:
449         req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
450                               BLK_MQ_REQ_NOWAIT);
451
452         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
453                 usleep_range(3000, 3100);
454                 goto retry;
455         }
456
457         if (IS_ERR(req))
458                 goto free_rq;
459
460         rq->hpb = hpb;
461         rq->req = req;
462         rq->rb.rgn_idx = rgn_idx;
463
464         return rq;
465
466 free_rq:
467         kmem_cache_free(hpb->map_req_cache, rq);
468         return NULL;
469 }
470
471 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
472 {
473         blk_mq_free_request(rq->req);
474         kmem_cache_free(hpb->map_req_cache, rq);
475 }
476
477 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
478                                              struct ufshpb_subregion *srgn)
479 {
480         struct ufshpb_req *map_req;
481         struct bio *bio;
482         unsigned long flags;
483
484         if (hpb->is_hcm &&
485             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
486                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
487                          "map_req throttle. inflight %d throttle %d",
488                          hpb->num_inflight_map_req,
489                          hpb->params.inflight_map_req);
490                 return NULL;
491         }
492
493         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
494         if (!map_req)
495                 return NULL;
496
497         bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
498         if (!bio) {
499                 ufshpb_put_req(hpb, map_req);
500                 return NULL;
501         }
502
503         map_req->bio = bio;
504
505         map_req->rb.srgn_idx = srgn->srgn_idx;
506         map_req->rb.mctx = srgn->mctx;
507
508         spin_lock_irqsave(&hpb->param_lock, flags);
509         hpb->num_inflight_map_req++;
510         spin_unlock_irqrestore(&hpb->param_lock, flags);
511
512         return map_req;
513 }
514
515 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
516                                struct ufshpb_req *map_req)
517 {
518         unsigned long flags;
519
520         bio_put(map_req->bio);
521         ufshpb_put_req(hpb, map_req);
522
523         spin_lock_irqsave(&hpb->param_lock, flags);
524         hpb->num_inflight_map_req--;
525         spin_unlock_irqrestore(&hpb->param_lock, flags);
526 }
527
528 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
529                                      struct ufshpb_subregion *srgn)
530 {
531         struct ufshpb_region *rgn;
532         u32 num_entries = hpb->entries_per_srgn;
533
534         if (!srgn->mctx) {
535                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
536                         "no mctx in region %d subregion %d.\n",
537                         srgn->rgn_idx, srgn->srgn_idx);
538                 return -1;
539         }
540
541         if (unlikely(srgn->is_last))
542                 num_entries = hpb->last_srgn_entries;
543
544         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
545
546         rgn = hpb->rgn_tbl + srgn->rgn_idx;
547         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
548
549         return 0;
550 }
551
552 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
553                                       int srgn_idx)
554 {
555         struct ufshpb_region *rgn;
556         struct ufshpb_subregion *srgn;
557
558         rgn = hpb->rgn_tbl + rgn_idx;
559         srgn = rgn->srgn_tbl + srgn_idx;
560
561         list_del_init(&rgn->list_inact_rgn);
562
563         if (list_empty(&srgn->list_act_srgn))
564                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
565
566         hpb->stats.rb_active_cnt++;
567 }
568
569 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
570 {
571         struct ufshpb_region *rgn;
572         struct ufshpb_subregion *srgn;
573         int srgn_idx;
574
575         rgn = hpb->rgn_tbl + rgn_idx;
576
577         for_each_sub_region(rgn, srgn_idx, srgn)
578                 list_del_init(&srgn->list_act_srgn);
579
580         if (list_empty(&rgn->list_inact_rgn))
581                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
582
583         hpb->stats.rb_inactive_cnt++;
584 }
585
586 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
587                                       struct ufshpb_subregion *srgn)
588 {
589         struct ufshpb_region *rgn;
590
591         /*
592          * If there is no mctx in subregion
593          * after I/O progress for HPB_READ_BUFFER, the region to which the
594          * subregion belongs was evicted.
595          * Make sure the region must not evict in I/O progress
596          */
597         if (!srgn->mctx) {
598                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
599                         "no mctx in region %d subregion %d.\n",
600                         srgn->rgn_idx, srgn->srgn_idx);
601                 srgn->srgn_state = HPB_SRGN_INVALID;
602                 return;
603         }
604
605         rgn = hpb->rgn_tbl + srgn->rgn_idx;
606
607         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
608                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
609                         "region %d subregion %d evicted\n",
610                         srgn->rgn_idx, srgn->srgn_idx);
611                 srgn->srgn_state = HPB_SRGN_INVALID;
612                 return;
613         }
614         srgn->srgn_state = HPB_SRGN_VALID;
615 }
616
617 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
618 {
619         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
620
621         ufshpb_put_req(umap_req->hpb, umap_req);
622 }
623
624 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
625 {
626         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
627         struct ufshpb_lu *hpb = map_req->hpb;
628         struct ufshpb_subregion *srgn;
629         unsigned long flags;
630
631         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
632                 map_req->rb.srgn_idx;
633
634         ufshpb_clear_dirty_bitmap(hpb, srgn);
635         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
636         ufshpb_activate_subregion(hpb, srgn);
637         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
638
639         ufshpb_put_map_req(map_req->hpb, map_req);
640 }
641
642 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
643 {
644         cdb[0] = UFSHPB_WRITE_BUFFER;
645         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
646                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
647         if (rgn)
648                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
649         cdb[9] = 0x00;
650 }
651
652 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
653                                     int srgn_idx, int srgn_mem_size)
654 {
655         cdb[0] = UFSHPB_READ_BUFFER;
656         cdb[1] = UFSHPB_READ_BUFFER_ID;
657
658         put_unaligned_be16(rgn_idx, &cdb[2]);
659         put_unaligned_be16(srgn_idx, &cdb[4]);
660         put_unaligned_be24(srgn_mem_size, &cdb[6]);
661
662         cdb[9] = 0x00;
663 }
664
665 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
666                                    struct ufshpb_req *umap_req,
667                                    struct ufshpb_region *rgn)
668 {
669         struct request *req = umap_req->req;
670         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
671
672         req->timeout = 0;
673         req->end_io_data = umap_req;
674
675         ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
676         scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
677
678         blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
679
680         hpb->stats.umap_req_cnt++;
681 }
682
683 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
684                                   struct ufshpb_req *map_req, bool last)
685 {
686         struct request_queue *q;
687         struct request *req;
688         struct scsi_cmnd *scmd;
689         int mem_size = hpb->srgn_mem_size;
690         int ret = 0;
691         int i;
692
693         q = hpb->sdev_ufs_lu->request_queue;
694         for (i = 0; i < hpb->pages_per_srgn; i++) {
695                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
696                                       PAGE_SIZE, 0);
697                 if (ret != PAGE_SIZE) {
698                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
699                                    "bio_add_pc_page fail %d - %d\n",
700                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
701                         return ret;
702                 }
703         }
704
705         req = map_req->req;
706
707         blk_rq_append_bio(req, map_req->bio);
708
709         req->end_io_data = map_req;
710
711         if (unlikely(last))
712                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
713
714         scmd = blk_mq_rq_to_pdu(req);
715         ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
716                                 map_req->rb.srgn_idx, mem_size);
717         scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
718
719         blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
720
721         hpb->stats.map_req_cnt++;
722         return 0;
723 }
724
725 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
726                                                  bool last)
727 {
728         struct ufshpb_map_ctx *mctx;
729         u32 num_entries = hpb->entries_per_srgn;
730         int i, j;
731
732         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
733         if (!mctx)
734                 return NULL;
735
736         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
737         if (!mctx->m_page)
738                 goto release_mctx;
739
740         if (unlikely(last))
741                 num_entries = hpb->last_srgn_entries;
742
743         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
744         if (!mctx->ppn_dirty)
745                 goto release_m_page;
746
747         for (i = 0; i < hpb->pages_per_srgn; i++) {
748                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
749                 if (!mctx->m_page[i]) {
750                         for (j = 0; j < i; j++)
751                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
752                         goto release_ppn_dirty;
753                 }
754                 clear_page(page_address(mctx->m_page[i]));
755         }
756
757         return mctx;
758
759 release_ppn_dirty:
760         bitmap_free(mctx->ppn_dirty);
761 release_m_page:
762         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
763 release_mctx:
764         mempool_free(mctx, ufshpb_mctx_pool);
765         return NULL;
766 }
767
768 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
769                                struct ufshpb_map_ctx *mctx)
770 {
771         int i;
772
773         for (i = 0; i < hpb->pages_per_srgn; i++)
774                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
775
776         bitmap_free(mctx->ppn_dirty);
777         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
778         mempool_free(mctx, ufshpb_mctx_pool);
779 }
780
781 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
782                                           struct ufshpb_region *rgn)
783 {
784         struct ufshpb_subregion *srgn;
785         int srgn_idx;
786
787         for_each_sub_region(rgn, srgn_idx, srgn)
788                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
789                         return -EPERM;
790
791         return 0;
792 }
793
794 static void ufshpb_read_to_handler(struct work_struct *work)
795 {
796         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
797                                              ufshpb_read_to_work.work);
798         struct victim_select_info *lru_info = &hpb->lru_info;
799         struct ufshpb_region *rgn, *next_rgn;
800         unsigned long flags;
801         unsigned int poll;
802         LIST_HEAD(expired_list);
803
804         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
805                 return;
806
807         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
808
809         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
810                                  list_lru_rgn) {
811                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
812
813                 if (timedout) {
814                         rgn->read_timeout_expiries--;
815                         if (is_rgn_dirty(rgn) ||
816                             rgn->read_timeout_expiries == 0)
817                                 list_add(&rgn->list_expired_rgn, &expired_list);
818                         else
819                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
820                                                 hpb->params.read_timeout_ms);
821                 }
822         }
823
824         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
825
826         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
827                                  list_expired_rgn) {
828                 list_del_init(&rgn->list_expired_rgn);
829                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
830                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
831                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
832         }
833
834         ufshpb_kick_map_work(hpb);
835
836         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
837
838         poll = hpb->params.timeout_polling_interval_ms;
839         schedule_delayed_work(&hpb->ufshpb_read_to_work,
840                               msecs_to_jiffies(poll));
841 }
842
843 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
844                                 struct ufshpb_region *rgn)
845 {
846         rgn->rgn_state = HPB_RGN_ACTIVE;
847         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
848         atomic_inc(&lru_info->active_cnt);
849         if (rgn->hpb->is_hcm) {
850                 rgn->read_timeout =
851                         ktime_add_ms(ktime_get(),
852                                      rgn->hpb->params.read_timeout_ms);
853                 rgn->read_timeout_expiries =
854                         rgn->hpb->params.read_timeout_expiries;
855         }
856 }
857
858 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
859                                 struct ufshpb_region *rgn)
860 {
861         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
862 }
863
864 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
865 {
866         struct victim_select_info *lru_info = &hpb->lru_info;
867         struct ufshpb_region *rgn, *victim_rgn = NULL;
868
869         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
870                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
871                         continue;
872
873                 /*
874                  * in host control mode, verify that the exiting region
875                  * has fewer reads
876                  */
877                 if (hpb->is_hcm &&
878                     rgn->reads > hpb->params.eviction_thld_exit)
879                         continue;
880
881                 victim_rgn = rgn;
882                 break;
883         }
884
885         if (!victim_rgn)
886                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
887                         "%s: no region allocated\n",
888                         __func__);
889
890         return victim_rgn;
891 }
892
893 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
894                                     struct ufshpb_region *rgn)
895 {
896         list_del_init(&rgn->list_lru_rgn);
897         rgn->rgn_state = HPB_RGN_INACTIVE;
898         atomic_dec(&lru_info->active_cnt);
899 }
900
901 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
902                                           struct ufshpb_subregion *srgn)
903 {
904         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
905                 ufshpb_put_map_ctx(hpb, srgn->mctx);
906                 srgn->srgn_state = HPB_SRGN_UNUSED;
907                 srgn->mctx = NULL;
908         }
909 }
910
911 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
912                                  struct ufshpb_region *rgn,
913                                  bool atomic)
914 {
915         struct ufshpb_req *umap_req;
916         int rgn_idx = rgn ? rgn->rgn_idx : 0;
917
918         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
919         if (!umap_req)
920                 return -ENOMEM;
921
922         ufshpb_execute_umap_req(hpb, umap_req, rgn);
923
924         return 0;
925 }
926
927 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
928                                         struct ufshpb_region *rgn)
929 {
930         return ufshpb_issue_umap_req(hpb, rgn, true);
931 }
932
933 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
934 {
935         return ufshpb_issue_umap_req(hpb, NULL, false);
936 }
937
938 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
939                                  struct ufshpb_region *rgn)
940 {
941         struct victim_select_info *lru_info;
942         struct ufshpb_subregion *srgn;
943         int srgn_idx;
944
945         lru_info = &hpb->lru_info;
946
947         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
948
949         ufshpb_cleanup_lru_info(lru_info, rgn);
950
951         for_each_sub_region(rgn, srgn_idx, srgn)
952                 ufshpb_purge_active_subregion(hpb, srgn);
953 }
954
955 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
956 {
957         unsigned long flags;
958         int ret = 0;
959
960         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
961         if (rgn->rgn_state == HPB_RGN_PINNED) {
962                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
963                          "pinned region cannot drop-out. region %d\n",
964                          rgn->rgn_idx);
965                 goto out;
966         }
967
968         if (!list_empty(&rgn->list_lru_rgn)) {
969                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
970                         ret = -EBUSY;
971                         goto out;
972                 }
973
974                 if (hpb->is_hcm) {
975                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
976                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
977                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
978                         if (ret)
979                                 goto out;
980                 }
981
982                 __ufshpb_evict_region(hpb, rgn);
983         }
984 out:
985         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
986         return ret;
987 }
988
989 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
990                                 struct ufshpb_region *rgn,
991                                 struct ufshpb_subregion *srgn)
992 {
993         struct ufshpb_req *map_req;
994         unsigned long flags;
995         int ret;
996         int err = -EAGAIN;
997         bool alloc_required = false;
998         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
999
1000         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1001
1002         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1003                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1004                            "%s: ufshpb state is not PRESENT\n", __func__);
1005                 goto unlock_out;
1006         }
1007
1008         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1009             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1010                 err = 0;
1011                 goto unlock_out;
1012         }
1013
1014         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1015                 alloc_required = true;
1016
1017         /*
1018          * If the subregion is already ISSUED state,
1019          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1020          * the device and HPB response for map loading is received.
1021          * In this case, after finishing the HPB_READ_BUFFER,
1022          * the next HPB_READ_BUFFER is performed again to obtain the latest
1023          * map data.
1024          */
1025         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1026                 goto unlock_out;
1027
1028         srgn->srgn_state = HPB_SRGN_ISSUED;
1029         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1030
1031         if (alloc_required) {
1032                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1033                 if (!srgn->mctx) {
1034                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1035                             "get map_ctx failed. region %d - %d\n",
1036                             rgn->rgn_idx, srgn->srgn_idx);
1037                         state = HPB_SRGN_UNUSED;
1038                         goto change_srgn_state;
1039                 }
1040         }
1041
1042         map_req = ufshpb_get_map_req(hpb, srgn);
1043         if (!map_req)
1044                 goto change_srgn_state;
1045
1046
1047         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1048         if (ret) {
1049                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1050                            "%s: issue map_req failed: %d, region %d - %d\n",
1051                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1052                 goto free_map_req;
1053         }
1054         return 0;
1055
1056 free_map_req:
1057         ufshpb_put_map_req(hpb, map_req);
1058 change_srgn_state:
1059         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1060         srgn->srgn_state = state;
1061 unlock_out:
1062         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1063         return err;
1064 }
1065
1066 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1067 {
1068         struct ufshpb_region *victim_rgn = NULL;
1069         struct victim_select_info *lru_info = &hpb->lru_info;
1070         unsigned long flags;
1071         int ret = 0;
1072
1073         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1074         /*
1075          * If region belongs to lru_list, just move the region
1076          * to the front of lru list because the state of the region
1077          * is already active-state.
1078          */
1079         if (!list_empty(&rgn->list_lru_rgn)) {
1080                 ufshpb_hit_lru_info(lru_info, rgn);
1081                 goto out;
1082         }
1083
1084         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1085                 if (atomic_read(&lru_info->active_cnt) ==
1086                     lru_info->max_lru_active_cnt) {
1087                         /*
1088                          * If the maximum number of active regions
1089                          * is exceeded, evict the least recently used region.
1090                          * This case may occur when the device responds
1091                          * to the eviction information late.
1092                          * It is okay to evict the least recently used region,
1093                          * because the device could detect this region
1094                          * by not issuing HPB_READ
1095                          *
1096                          * in host control mode, verify that the entering
1097                          * region has enough reads
1098                          */
1099                         if (hpb->is_hcm &&
1100                             rgn->reads < hpb->params.eviction_thld_enter) {
1101                                 ret = -EACCES;
1102                                 goto out;
1103                         }
1104
1105                         victim_rgn = ufshpb_victim_lru_info(hpb);
1106                         if (!victim_rgn) {
1107                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1108                                     "cannot get victim region %s\n",
1109                                     hpb->is_hcm ? "" : "error");
1110                                 ret = -ENOMEM;
1111                                 goto out;
1112                         }
1113
1114                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1115                                 "LRU full (%d), choose victim %d\n",
1116                                 atomic_read(&lru_info->active_cnt),
1117                                 victim_rgn->rgn_idx);
1118
1119                         if (hpb->is_hcm) {
1120                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1121                                                        flags);
1122                                 ret = ufshpb_issue_umap_single_req(hpb,
1123                                                                 victim_rgn);
1124                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1125                                                   flags);
1126                                 if (ret)
1127                                         goto out;
1128                         }
1129
1130                         __ufshpb_evict_region(hpb, victim_rgn);
1131                 }
1132
1133                 /*
1134                  * When a region is added to lru_info list_head,
1135                  * it is guaranteed that the subregion has been
1136                  * assigned all mctx. If failed, try to receive mctx again
1137                  * without being added to lru_info list_head
1138                  */
1139                 ufshpb_add_lru_info(lru_info, rgn);
1140         }
1141 out:
1142         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1143         return ret;
1144 }
1145
1146 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1147                                          struct utp_hpb_rsp *rsp_field)
1148 {
1149         struct ufshpb_region *rgn;
1150         struct ufshpb_subregion *srgn;
1151         int i, rgn_i, srgn_i;
1152
1153         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1154         /*
1155          * If the active region and the inactive region are the same,
1156          * we will inactivate this region.
1157          * The device could check this (region inactivated) and
1158          * will response the proper active region information
1159          */
1160         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1161                 rgn_i =
1162                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1163                 srgn_i =
1164                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1165
1166                 rgn = hpb->rgn_tbl + rgn_i;
1167                 if (hpb->is_hcm &&
1168                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1169                         /*
1170                          * in host control mode, subregion activation
1171                          * recommendations are only allowed to active regions.
1172                          * Also, ignore recommendations for dirty regions - the
1173                          * host will make decisions concerning those by himself
1174                          */
1175                         continue;
1176                 }
1177
1178                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1179                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1180
1181                 spin_lock(&hpb->rsp_list_lock);
1182                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1183                 spin_unlock(&hpb->rsp_list_lock);
1184
1185                 srgn = rgn->srgn_tbl + srgn_i;
1186
1187                 /* blocking HPB_READ */
1188                 spin_lock(&hpb->rgn_state_lock);
1189                 if (srgn->srgn_state == HPB_SRGN_VALID)
1190                         srgn->srgn_state = HPB_SRGN_INVALID;
1191                 spin_unlock(&hpb->rgn_state_lock);
1192         }
1193
1194         if (hpb->is_hcm) {
1195                 /*
1196                  * in host control mode the device is not allowed to inactivate
1197                  * regions
1198                  */
1199                 goto out;
1200         }
1201
1202         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1203                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1204                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1205                         "inactivate(%d) region %d\n", i, rgn_i);
1206
1207                 spin_lock(&hpb->rsp_list_lock);
1208                 ufshpb_update_inactive_info(hpb, rgn_i);
1209                 spin_unlock(&hpb->rsp_list_lock);
1210
1211                 rgn = hpb->rgn_tbl + rgn_i;
1212
1213                 spin_lock(&hpb->rgn_state_lock);
1214                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1215                         for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1216                                 srgn = rgn->srgn_tbl + srgn_i;
1217                                 if (srgn->srgn_state == HPB_SRGN_VALID)
1218                                         srgn->srgn_state = HPB_SRGN_INVALID;
1219                         }
1220                 }
1221                 spin_unlock(&hpb->rgn_state_lock);
1222
1223         }
1224
1225 out:
1226         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1227                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1228
1229         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1230                 queue_work(ufshpb_wq, &hpb->map_work);
1231 }
1232
1233 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1234 {
1235         struct victim_select_info *lru_info = &hpb->lru_info;
1236         struct ufshpb_region *rgn;
1237         unsigned long flags;
1238
1239         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1240
1241         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1242                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1243
1244         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1245 }
1246
1247 /*
1248  * This function will parse recommended active subregion information in sense
1249  * data field of response UPIU with SAM_STAT_GOOD state.
1250  */
1251 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1252 {
1253         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1254         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1255         int data_seg_len;
1256
1257         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1258                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1259
1260         /* If data segment length is zero, rsp_field is not valid */
1261         if (!data_seg_len)
1262                 return;
1263
1264         if (unlikely(lrbp->lun != rsp_field->lun)) {
1265                 struct scsi_device *sdev;
1266                 bool found = false;
1267
1268                 __shost_for_each_device(sdev, hba->host) {
1269                         hpb = ufshpb_get_hpb_data(sdev);
1270
1271                         if (!hpb)
1272                                 continue;
1273
1274                         if (rsp_field->lun == hpb->lun) {
1275                                 found = true;
1276                                 break;
1277                         }
1278                 }
1279
1280                 if (!found)
1281                         return;
1282         }
1283
1284         if (!hpb)
1285                 return;
1286
1287         if (ufshpb_get_state(hpb) == HPB_INIT)
1288                 return;
1289
1290         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1291             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1292                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1293                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1294                            __func__);
1295                 return;
1296         }
1297
1298         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1299
1300         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1301                 return;
1302
1303         hpb->stats.rb_noti_cnt++;
1304
1305         switch (rsp_field->hpb_op) {
1306         case HPB_RSP_REQ_REGION_UPDATE:
1307                 if (data_seg_len != DEV_DATA_SEG_LEN)
1308                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1309                                  "%s: data seg length is not same.\n",
1310                                  __func__);
1311                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1312                 break;
1313         case HPB_RSP_DEV_RESET:
1314                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1315                          "UFS device lost HPB information during PM.\n");
1316
1317                 if (hpb->is_hcm) {
1318                         struct scsi_device *sdev;
1319
1320                         __shost_for_each_device(sdev, hba->host) {
1321                                 struct ufshpb_lu *h = sdev->hostdata;
1322
1323                                 if (h)
1324                                         ufshpb_dev_reset_handler(h);
1325                         }
1326                 }
1327
1328                 break;
1329         default:
1330                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1331                            "hpb_op is not available: %d\n",
1332                            rsp_field->hpb_op);
1333                 break;
1334         }
1335 }
1336
1337 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1338                                    struct ufshpb_region *rgn,
1339                                    struct ufshpb_subregion *srgn)
1340 {
1341         if (!list_empty(&rgn->list_inact_rgn))
1342                 return;
1343
1344         if (!list_empty(&srgn->list_act_srgn)) {
1345                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1346                 return;
1347         }
1348
1349         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1350 }
1351
1352 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1353                                           struct ufshpb_region *rgn,
1354                                           struct list_head *pending_list)
1355 {
1356         struct ufshpb_subregion *srgn;
1357         int srgn_idx;
1358
1359         if (!list_empty(&rgn->list_inact_rgn))
1360                 return;
1361
1362         for_each_sub_region(rgn, srgn_idx, srgn)
1363                 if (!list_empty(&srgn->list_act_srgn))
1364                         return;
1365
1366         list_add_tail(&rgn->list_inact_rgn, pending_list);
1367 }
1368
1369 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1370 {
1371         struct ufshpb_region *rgn;
1372         struct ufshpb_subregion *srgn;
1373         unsigned long flags;
1374         int ret = 0;
1375
1376         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1377         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1378                                                 struct ufshpb_subregion,
1379                                                 list_act_srgn))) {
1380                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1381                         break;
1382
1383                 list_del_init(&srgn->list_act_srgn);
1384                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1385
1386                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1387                 ret = ufshpb_add_region(hpb, rgn);
1388                 if (ret)
1389                         goto active_failed;
1390
1391                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1392                 if (ret) {
1393                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1394                             "issue map_req failed. ret %d, region %d - %d\n",
1395                             ret, rgn->rgn_idx, srgn->srgn_idx);
1396                         goto active_failed;
1397                 }
1398                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1399         }
1400         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1401         return;
1402
1403 active_failed:
1404         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1405                    rgn->rgn_idx, srgn->srgn_idx);
1406         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1407         ufshpb_add_active_list(hpb, rgn, srgn);
1408         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1409 }
1410
1411 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1412 {
1413         struct ufshpb_region *rgn;
1414         unsigned long flags;
1415         int ret;
1416         LIST_HEAD(pending_list);
1417
1418         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1419         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1420                                                struct ufshpb_region,
1421                                                list_inact_rgn))) {
1422                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1423                         break;
1424
1425                 list_del_init(&rgn->list_inact_rgn);
1426                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1427
1428                 ret = ufshpb_evict_region(hpb, rgn);
1429                 if (ret) {
1430                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1431                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1432                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1433                 }
1434
1435                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1436         }
1437
1438         list_splice(&pending_list, &hpb->lh_inact_rgn);
1439         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1440 }
1441
1442 static void ufshpb_normalization_work_handler(struct work_struct *work)
1443 {
1444         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1445                                              ufshpb_normalization_work);
1446         int rgn_idx;
1447         u8 factor = hpb->params.normalization_factor;
1448
1449         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1450                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1451                 int srgn_idx;
1452
1453                 spin_lock(&rgn->rgn_lock);
1454                 rgn->reads = 0;
1455                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1456                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1457
1458                         srgn->reads >>= factor;
1459                         rgn->reads += srgn->reads;
1460                 }
1461                 spin_unlock(&rgn->rgn_lock);
1462
1463                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1464                         continue;
1465
1466                 /* if region is active but has no reads - inactivate it */
1467                 spin_lock(&hpb->rsp_list_lock);
1468                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1469                 spin_unlock(&hpb->rsp_list_lock);
1470         }
1471 }
1472
1473 static void ufshpb_map_work_handler(struct work_struct *work)
1474 {
1475         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1476
1477         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1478                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1479                            "%s: ufshpb state is not PRESENT\n", __func__);
1480                 return;
1481         }
1482
1483         ufshpb_run_inactive_region_list(hpb);
1484         ufshpb_run_active_subregion_list(hpb);
1485 }
1486
1487 /*
1488  * this function doesn't need to hold lock due to be called in init.
1489  * (rgn_state_lock, rsp_list_lock, etc..)
1490  */
1491 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1492                                             struct ufshpb_lu *hpb,
1493                                             struct ufshpb_region *rgn)
1494 {
1495         struct ufshpb_subregion *srgn;
1496         int srgn_idx, i;
1497         int err = 0;
1498
1499         for_each_sub_region(rgn, srgn_idx, srgn) {
1500                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1501                 srgn->srgn_state = HPB_SRGN_INVALID;
1502                 if (!srgn->mctx) {
1503                         err = -ENOMEM;
1504                         dev_err(hba->dev,
1505                                 "alloc mctx for pinned region failed\n");
1506                         goto release;
1507                 }
1508
1509                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1510         }
1511
1512         rgn->rgn_state = HPB_RGN_PINNED;
1513         return 0;
1514
1515 release:
1516         for (i = 0; i < srgn_idx; i++) {
1517                 srgn = rgn->srgn_tbl + i;
1518                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1519         }
1520         return err;
1521 }
1522
1523 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1524                                       struct ufshpb_region *rgn, bool last)
1525 {
1526         int srgn_idx;
1527         struct ufshpb_subregion *srgn;
1528
1529         for_each_sub_region(rgn, srgn_idx, srgn) {
1530                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1531
1532                 srgn->rgn_idx = rgn->rgn_idx;
1533                 srgn->srgn_idx = srgn_idx;
1534                 srgn->srgn_state = HPB_SRGN_UNUSED;
1535         }
1536
1537         if (unlikely(last && hpb->last_srgn_entries))
1538                 srgn->is_last = true;
1539 }
1540
1541 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1542                                       struct ufshpb_region *rgn, int srgn_cnt)
1543 {
1544         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1545                                  GFP_KERNEL);
1546         if (!rgn->srgn_tbl)
1547                 return -ENOMEM;
1548
1549         rgn->srgn_cnt = srgn_cnt;
1550         return 0;
1551 }
1552
1553 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1554                                      struct ufshpb_lu *hpb,
1555                                      struct ufshpb_dev_info *hpb_dev_info,
1556                                      struct ufshpb_lu_info *hpb_lu_info)
1557 {
1558         u32 entries_per_rgn;
1559         u64 rgn_mem_size, tmp;
1560
1561         if (ufshpb_is_legacy(hba))
1562                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1563         else
1564                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1565
1566         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1567         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1568                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1569                 : PINNED_NOT_SET;
1570         hpb->lru_info.max_lru_active_cnt =
1571                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1572
1573         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1574                         * HPB_ENTRY_SIZE;
1575         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1576         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1577                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1578
1579         tmp = rgn_mem_size;
1580         do_div(tmp, HPB_ENTRY_SIZE);
1581         entries_per_rgn = (u32)tmp;
1582         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1583         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1584
1585         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1586         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1587         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1588
1589         tmp = rgn_mem_size;
1590         do_div(tmp, hpb->srgn_mem_size);
1591         hpb->srgns_per_rgn = (int)tmp;
1592
1593         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1594                                 entries_per_rgn);
1595         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1596                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1597         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1598                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1599
1600         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1601
1602         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1603                 hpb->is_hcm = true;
1604 }
1605
1606 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1607 {
1608         struct ufshpb_region *rgn_table, *rgn;
1609         int rgn_idx, i;
1610         int ret = 0;
1611
1612         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1613                             GFP_KERNEL);
1614         if (!rgn_table)
1615                 return -ENOMEM;
1616
1617         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1618                 int srgn_cnt = hpb->srgns_per_rgn;
1619                 bool last_srgn = false;
1620
1621                 rgn = rgn_table + rgn_idx;
1622                 rgn->rgn_idx = rgn_idx;
1623
1624                 spin_lock_init(&rgn->rgn_lock);
1625
1626                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1627                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1628                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1629
1630                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1631                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1632                                     hpb->srgns_per_rgn) + 1;
1633                         last_srgn = true;
1634                 }
1635
1636                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1637                 if (ret)
1638                         goto release_srgn_table;
1639                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1640
1641                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1642                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1643                         if (ret)
1644                                 goto release_srgn_table;
1645                 } else {
1646                         rgn->rgn_state = HPB_RGN_INACTIVE;
1647                 }
1648
1649                 rgn->rgn_flags = 0;
1650                 rgn->hpb = hpb;
1651         }
1652
1653         hpb->rgn_tbl = rgn_table;
1654
1655         return 0;
1656
1657 release_srgn_table:
1658         for (i = 0; i <= rgn_idx; i++)
1659                 kvfree(rgn_table[i].srgn_tbl);
1660
1661         kvfree(rgn_table);
1662         return ret;
1663 }
1664
1665 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1666                                          struct ufshpb_region *rgn)
1667 {
1668         int srgn_idx;
1669         struct ufshpb_subregion *srgn;
1670
1671         for_each_sub_region(rgn, srgn_idx, srgn)
1672                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1673                         srgn->srgn_state = HPB_SRGN_UNUSED;
1674                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1675                 }
1676 }
1677
1678 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1679 {
1680         int rgn_idx;
1681
1682         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1683                 struct ufshpb_region *rgn;
1684
1685                 rgn = hpb->rgn_tbl + rgn_idx;
1686                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1687                         rgn->rgn_state = HPB_RGN_INACTIVE;
1688
1689                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1690                 }
1691
1692                 kvfree(rgn->srgn_tbl);
1693         }
1694
1695         kvfree(hpb->rgn_tbl);
1696 }
1697
1698 /* SYSFS functions */
1699 #define ufshpb_sysfs_attr_show_func(__name)                             \
1700 static ssize_t __name##_show(struct device *dev,                        \
1701         struct device_attribute *attr, char *buf)                       \
1702 {                                                                       \
1703         struct scsi_device *sdev = to_scsi_device(dev);                 \
1704         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1705                                                                         \
1706         if (!hpb)                                                       \
1707                 return -ENODEV;                                         \
1708                                                                         \
1709         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1710 }                                                                       \
1711 \
1712 static DEVICE_ATTR_RO(__name)
1713
1714 ufshpb_sysfs_attr_show_func(hit_cnt);
1715 ufshpb_sysfs_attr_show_func(miss_cnt);
1716 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
1717 ufshpb_sysfs_attr_show_func(rb_active_cnt);
1718 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
1719 ufshpb_sysfs_attr_show_func(map_req_cnt);
1720 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1721
1722 static struct attribute *hpb_dev_stat_attrs[] = {
1723         &dev_attr_hit_cnt.attr,
1724         &dev_attr_miss_cnt.attr,
1725         &dev_attr_rb_noti_cnt.attr,
1726         &dev_attr_rb_active_cnt.attr,
1727         &dev_attr_rb_inactive_cnt.attr,
1728         &dev_attr_map_req_cnt.attr,
1729         &dev_attr_umap_req_cnt.attr,
1730         NULL,
1731 };
1732
1733 struct attribute_group ufs_sysfs_hpb_stat_group = {
1734         .name = "hpb_stats",
1735         .attrs = hpb_dev_stat_attrs,
1736 };
1737
1738 /* SYSFS functions */
1739 #define ufshpb_sysfs_param_show_func(__name)                            \
1740 static ssize_t __name##_show(struct device *dev,                        \
1741         struct device_attribute *attr, char *buf)                       \
1742 {                                                                       \
1743         struct scsi_device *sdev = to_scsi_device(dev);                 \
1744         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1745                                                                         \
1746         if (!hpb)                                                       \
1747                 return -ENODEV;                                         \
1748                                                                         \
1749         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1750 }
1751
1752 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1753 static ssize_t
1754 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1755                          const char *buf, size_t count)
1756 {
1757         struct scsi_device *sdev = to_scsi_device(dev);
1758         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1759         int val;
1760
1761         if (!hpb)
1762                 return -ENODEV;
1763
1764         if (kstrtouint(buf, 0, &val))
1765                 return -EINVAL;
1766
1767         if (val < 0)
1768                 return -EINVAL;
1769
1770         hpb->params.requeue_timeout_ms = val;
1771
1772         return count;
1773 }
1774 static DEVICE_ATTR_RW(requeue_timeout_ms);
1775
1776 ufshpb_sysfs_param_show_func(activation_thld);
1777 static ssize_t
1778 activation_thld_store(struct device *dev, struct device_attribute *attr,
1779                       const char *buf, size_t count)
1780 {
1781         struct scsi_device *sdev = to_scsi_device(dev);
1782         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1783         int val;
1784
1785         if (!hpb)
1786                 return -ENODEV;
1787
1788         if (!hpb->is_hcm)
1789                 return -EOPNOTSUPP;
1790
1791         if (kstrtouint(buf, 0, &val))
1792                 return -EINVAL;
1793
1794         if (val <= 0)
1795                 return -EINVAL;
1796
1797         hpb->params.activation_thld = val;
1798
1799         return count;
1800 }
1801 static DEVICE_ATTR_RW(activation_thld);
1802
1803 ufshpb_sysfs_param_show_func(normalization_factor);
1804 static ssize_t
1805 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1806                            const char *buf, size_t count)
1807 {
1808         struct scsi_device *sdev = to_scsi_device(dev);
1809         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1810         int val;
1811
1812         if (!hpb)
1813                 return -ENODEV;
1814
1815         if (!hpb->is_hcm)
1816                 return -EOPNOTSUPP;
1817
1818         if (kstrtouint(buf, 0, &val))
1819                 return -EINVAL;
1820
1821         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1822                 return -EINVAL;
1823
1824         hpb->params.normalization_factor = val;
1825
1826         return count;
1827 }
1828 static DEVICE_ATTR_RW(normalization_factor);
1829
1830 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1831 static ssize_t
1832 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1833                           const char *buf, size_t count)
1834 {
1835         struct scsi_device *sdev = to_scsi_device(dev);
1836         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1837         int val;
1838
1839         if (!hpb)
1840                 return -ENODEV;
1841
1842         if (!hpb->is_hcm)
1843                 return -EOPNOTSUPP;
1844
1845         if (kstrtouint(buf, 0, &val))
1846                 return -EINVAL;
1847
1848         if (val <= hpb->params.eviction_thld_exit)
1849                 return -EINVAL;
1850
1851         hpb->params.eviction_thld_enter = val;
1852
1853         return count;
1854 }
1855 static DEVICE_ATTR_RW(eviction_thld_enter);
1856
1857 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1858 static ssize_t
1859 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1860                          const char *buf, size_t count)
1861 {
1862         struct scsi_device *sdev = to_scsi_device(dev);
1863         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1864         int val;
1865
1866         if (!hpb)
1867                 return -ENODEV;
1868
1869         if (!hpb->is_hcm)
1870                 return -EOPNOTSUPP;
1871
1872         if (kstrtouint(buf, 0, &val))
1873                 return -EINVAL;
1874
1875         if (val <= hpb->params.activation_thld)
1876                 return -EINVAL;
1877
1878         hpb->params.eviction_thld_exit = val;
1879
1880         return count;
1881 }
1882 static DEVICE_ATTR_RW(eviction_thld_exit);
1883
1884 ufshpb_sysfs_param_show_func(read_timeout_ms);
1885 static ssize_t
1886 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1887                       const char *buf, size_t count)
1888 {
1889         struct scsi_device *sdev = to_scsi_device(dev);
1890         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1891         int val;
1892
1893         if (!hpb)
1894                 return -ENODEV;
1895
1896         if (!hpb->is_hcm)
1897                 return -EOPNOTSUPP;
1898
1899         if (kstrtouint(buf, 0, &val))
1900                 return -EINVAL;
1901
1902         /* read_timeout >> timeout_polling_interval */
1903         if (val < hpb->params.timeout_polling_interval_ms * 2)
1904                 return -EINVAL;
1905
1906         hpb->params.read_timeout_ms = val;
1907
1908         return count;
1909 }
1910 static DEVICE_ATTR_RW(read_timeout_ms);
1911
1912 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1913 static ssize_t
1914 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1915                             const char *buf, size_t count)
1916 {
1917         struct scsi_device *sdev = to_scsi_device(dev);
1918         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1919         int val;
1920
1921         if (!hpb)
1922                 return -ENODEV;
1923
1924         if (!hpb->is_hcm)
1925                 return -EOPNOTSUPP;
1926
1927         if (kstrtouint(buf, 0, &val))
1928                 return -EINVAL;
1929
1930         if (val <= 0)
1931                 return -EINVAL;
1932
1933         hpb->params.read_timeout_expiries = val;
1934
1935         return count;
1936 }
1937 static DEVICE_ATTR_RW(read_timeout_expiries);
1938
1939 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1940 static ssize_t
1941 timeout_polling_interval_ms_store(struct device *dev,
1942                                   struct device_attribute *attr,
1943                                   const char *buf, size_t count)
1944 {
1945         struct scsi_device *sdev = to_scsi_device(dev);
1946         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1947         int val;
1948
1949         if (!hpb)
1950                 return -ENODEV;
1951
1952         if (!hpb->is_hcm)
1953                 return -EOPNOTSUPP;
1954
1955         if (kstrtouint(buf, 0, &val))
1956                 return -EINVAL;
1957
1958         /* timeout_polling_interval << read_timeout */
1959         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
1960                 return -EINVAL;
1961
1962         hpb->params.timeout_polling_interval_ms = val;
1963
1964         return count;
1965 }
1966 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
1967
1968 ufshpb_sysfs_param_show_func(inflight_map_req);
1969 static ssize_t inflight_map_req_store(struct device *dev,
1970                                       struct device_attribute *attr,
1971                                       const char *buf, size_t count)
1972 {
1973         struct scsi_device *sdev = to_scsi_device(dev);
1974         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1975         int val;
1976
1977         if (!hpb)
1978                 return -ENODEV;
1979
1980         if (!hpb->is_hcm)
1981                 return -EOPNOTSUPP;
1982
1983         if (kstrtouint(buf, 0, &val))
1984                 return -EINVAL;
1985
1986         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
1987                 return -EINVAL;
1988
1989         hpb->params.inflight_map_req = val;
1990
1991         return count;
1992 }
1993 static DEVICE_ATTR_RW(inflight_map_req);
1994
1995 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
1996 {
1997         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
1998         hpb->params.normalization_factor = 1;
1999         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2000         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2001         hpb->params.read_timeout_ms = READ_TO_MS;
2002         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2003         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2004         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2005 }
2006
2007 static struct attribute *hpb_dev_param_attrs[] = {
2008         &dev_attr_requeue_timeout_ms.attr,
2009         &dev_attr_activation_thld.attr,
2010         &dev_attr_normalization_factor.attr,
2011         &dev_attr_eviction_thld_enter.attr,
2012         &dev_attr_eviction_thld_exit.attr,
2013         &dev_attr_read_timeout_ms.attr,
2014         &dev_attr_read_timeout_expiries.attr,
2015         &dev_attr_timeout_polling_interval_ms.attr,
2016         &dev_attr_inflight_map_req.attr,
2017         NULL,
2018 };
2019
2020 struct attribute_group ufs_sysfs_hpb_param_group = {
2021         .name = "hpb_params",
2022         .attrs = hpb_dev_param_attrs,
2023 };
2024
2025 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2026 {
2027         struct ufshpb_req *pre_req = NULL, *t;
2028         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2029         int i;
2030
2031         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2032
2033         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2034         hpb->throttle_pre_req = qd;
2035         hpb->num_inflight_pre_req = 0;
2036
2037         if (!hpb->pre_req)
2038                 goto release_mem;
2039
2040         for (i = 0; i < qd; i++) {
2041                 pre_req = hpb->pre_req + i;
2042                 INIT_LIST_HEAD(&pre_req->list_req);
2043                 pre_req->req = NULL;
2044
2045                 pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2046                 if (!pre_req->bio)
2047                         goto release_mem;
2048
2049                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2050                 if (!pre_req->wb.m_page) {
2051                         bio_put(pre_req->bio);
2052                         goto release_mem;
2053                 }
2054
2055                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2056         }
2057
2058         return 0;
2059 release_mem:
2060         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2061                 list_del_init(&pre_req->list_req);
2062                 bio_put(pre_req->bio);
2063                 __free_page(pre_req->wb.m_page);
2064         }
2065
2066         kfree(hpb->pre_req);
2067         return -ENOMEM;
2068 }
2069
2070 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2071 {
2072         struct ufshpb_req *pre_req = NULL;
2073         int i;
2074
2075         for (i = 0; i < hpb->throttle_pre_req; i++) {
2076                 pre_req = hpb->pre_req + i;
2077                 bio_put(hpb->pre_req[i].bio);
2078                 if (!pre_req->wb.m_page)
2079                         __free_page(hpb->pre_req[i].wb.m_page);
2080                 list_del_init(&pre_req->list_req);
2081         }
2082
2083         kfree(hpb->pre_req);
2084 }
2085
2086 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2087 {
2088         hpb->stats.hit_cnt = 0;
2089         hpb->stats.miss_cnt = 0;
2090         hpb->stats.rb_noti_cnt = 0;
2091         hpb->stats.rb_active_cnt = 0;
2092         hpb->stats.rb_inactive_cnt = 0;
2093         hpb->stats.map_req_cnt = 0;
2094         hpb->stats.umap_req_cnt = 0;
2095 }
2096
2097 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2098 {
2099         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2100         if (hpb->is_hcm)
2101                 ufshpb_hcm_param_init(hpb);
2102 }
2103
2104 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2105 {
2106         int ret;
2107
2108         spin_lock_init(&hpb->rgn_state_lock);
2109         spin_lock_init(&hpb->rsp_list_lock);
2110         spin_lock_init(&hpb->param_lock);
2111
2112         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2113         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2114         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2115         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2116
2117         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2118         if (hpb->is_hcm) {
2119                 INIT_WORK(&hpb->ufshpb_normalization_work,
2120                           ufshpb_normalization_work_handler);
2121                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2122                                   ufshpb_read_to_handler);
2123         }
2124
2125         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2126                           sizeof(struct ufshpb_req), 0, 0, NULL);
2127         if (!hpb->map_req_cache) {
2128                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2129                         hpb->lun);
2130                 return -ENOMEM;
2131         }
2132
2133         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2134                           sizeof(struct page *) * hpb->pages_per_srgn,
2135                           0, 0, NULL);
2136         if (!hpb->m_page_cache) {
2137                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2138                         hpb->lun);
2139                 ret = -ENOMEM;
2140                 goto release_req_cache;
2141         }
2142
2143         ret = ufshpb_pre_req_mempool_init(hpb);
2144         if (ret) {
2145                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2146                         hpb->lun);
2147                 goto release_m_page_cache;
2148         }
2149
2150         ret = ufshpb_alloc_region_tbl(hba, hpb);
2151         if (ret)
2152                 goto release_pre_req_mempool;
2153
2154         ufshpb_stat_init(hpb);
2155         ufshpb_param_init(hpb);
2156
2157         if (hpb->is_hcm) {
2158                 unsigned int poll;
2159
2160                 poll = hpb->params.timeout_polling_interval_ms;
2161                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2162                                       msecs_to_jiffies(poll));
2163         }
2164
2165         return 0;
2166
2167 release_pre_req_mempool:
2168         ufshpb_pre_req_mempool_destroy(hpb);
2169 release_m_page_cache:
2170         kmem_cache_destroy(hpb->m_page_cache);
2171 release_req_cache:
2172         kmem_cache_destroy(hpb->map_req_cache);
2173         return ret;
2174 }
2175
2176 static struct ufshpb_lu *
2177 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2178                     struct ufshpb_dev_info *hpb_dev_info,
2179                     struct ufshpb_lu_info *hpb_lu_info)
2180 {
2181         struct ufshpb_lu *hpb;
2182         int ret;
2183
2184         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2185         if (!hpb)
2186                 return NULL;
2187
2188         hpb->lun = sdev->lun;
2189         hpb->sdev_ufs_lu = sdev;
2190
2191         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2192
2193         ret = ufshpb_lu_hpb_init(hba, hpb);
2194         if (ret) {
2195                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2196                 goto release_hpb;
2197         }
2198
2199         sdev->hostdata = hpb;
2200         return hpb;
2201
2202 release_hpb:
2203         kfree(hpb);
2204         return NULL;
2205 }
2206
2207 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2208 {
2209         struct ufshpb_region *rgn, *next_rgn;
2210         struct ufshpb_subregion *srgn, *next_srgn;
2211         unsigned long flags;
2212
2213         /*
2214          * If the device reset occurred, the remaining HPB region information
2215          * may be stale. Therefore, by discarding the lists of HPB response
2216          * that remained after reset, we prevent unnecessary work.
2217          */
2218         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2219         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2220                                  list_inact_rgn)
2221                 list_del_init(&rgn->list_inact_rgn);
2222
2223         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2224                                  list_act_srgn)
2225                 list_del_init(&srgn->list_act_srgn);
2226         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2227 }
2228
2229 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2230 {
2231         if (hpb->is_hcm) {
2232                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2233                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2234         }
2235         cancel_work_sync(&hpb->map_work);
2236 }
2237
2238 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2239 {
2240         int err = 0;
2241         bool flag_res = true;
2242         int try;
2243
2244         /* wait for the device to complete HPB reset query */
2245         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2246                 dev_dbg(hba->dev,
2247                         "%s start flag reset polling %d times\n",
2248                         __func__, try);
2249
2250                 /* Poll fHpbReset flag to be cleared */
2251                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2252                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2253
2254                 if (err) {
2255                         dev_err(hba->dev,
2256                                 "%s reading fHpbReset flag failed with error %d\n",
2257                                 __func__, err);
2258                         return flag_res;
2259                 }
2260
2261                 if (!flag_res)
2262                         goto out;
2263
2264                 usleep_range(1000, 1100);
2265         }
2266         if (flag_res) {
2267                 dev_err(hba->dev,
2268                         "%s fHpbReset was not cleared by the device\n",
2269                         __func__);
2270         }
2271 out:
2272         return flag_res;
2273 }
2274
2275 void ufshpb_reset(struct ufs_hba *hba)
2276 {
2277         struct ufshpb_lu *hpb;
2278         struct scsi_device *sdev;
2279
2280         shost_for_each_device(sdev, hba->host) {
2281                 hpb = ufshpb_get_hpb_data(sdev);
2282                 if (!hpb)
2283                         continue;
2284
2285                 if (ufshpb_get_state(hpb) != HPB_RESET)
2286                         continue;
2287
2288                 ufshpb_set_state(hpb, HPB_PRESENT);
2289         }
2290 }
2291
2292 void ufshpb_reset_host(struct ufs_hba *hba)
2293 {
2294         struct ufshpb_lu *hpb;
2295         struct scsi_device *sdev;
2296
2297         shost_for_each_device(sdev, hba->host) {
2298                 hpb = ufshpb_get_hpb_data(sdev);
2299                 if (!hpb)
2300                         continue;
2301
2302                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2303                         continue;
2304                 ufshpb_set_state(hpb, HPB_RESET);
2305                 ufshpb_cancel_jobs(hpb);
2306                 ufshpb_discard_rsp_lists(hpb);
2307         }
2308 }
2309
2310 void ufshpb_suspend(struct ufs_hba *hba)
2311 {
2312         struct ufshpb_lu *hpb;
2313         struct scsi_device *sdev;
2314
2315         shost_for_each_device(sdev, hba->host) {
2316                 hpb = ufshpb_get_hpb_data(sdev);
2317                 if (!hpb)
2318                         continue;
2319
2320                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2321                         continue;
2322                 ufshpb_set_state(hpb, HPB_SUSPEND);
2323                 ufshpb_cancel_jobs(hpb);
2324         }
2325 }
2326
2327 void ufshpb_resume(struct ufs_hba *hba)
2328 {
2329         struct ufshpb_lu *hpb;
2330         struct scsi_device *sdev;
2331
2332         shost_for_each_device(sdev, hba->host) {
2333                 hpb = ufshpb_get_hpb_data(sdev);
2334                 if (!hpb)
2335                         continue;
2336
2337                 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2338                     (ufshpb_get_state(hpb) != HPB_SUSPEND))
2339                         continue;
2340                 ufshpb_set_state(hpb, HPB_PRESENT);
2341                 ufshpb_kick_map_work(hpb);
2342                 if (hpb->is_hcm) {
2343                         unsigned int poll =
2344                                 hpb->params.timeout_polling_interval_ms;
2345
2346                         schedule_delayed_work(&hpb->ufshpb_read_to_work,
2347                                 msecs_to_jiffies(poll));
2348                 }
2349         }
2350 }
2351
2352 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2353                               struct ufshpb_lu_info *hpb_lu_info)
2354 {
2355         u16 max_active_rgns;
2356         u8 lu_enable;
2357         int size;
2358         int ret;
2359         char desc_buf[QUERY_DESC_MAX_SIZE];
2360
2361         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2362
2363         ufshcd_rpm_get_sync(hba);
2364         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2365                                             QUERY_DESC_IDN_UNIT, lun, 0,
2366                                             desc_buf, &size);
2367         ufshcd_rpm_put_sync(hba);
2368
2369         if (ret) {
2370                 dev_err(hba->dev,
2371                         "%s: idn: %d lun: %d  query request failed",
2372                         __func__, QUERY_DESC_IDN_UNIT, lun);
2373                 return ret;
2374         }
2375
2376         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2377         if (lu_enable != LU_ENABLED_HPB_FUNC)
2378                 return -ENODEV;
2379
2380         max_active_rgns = get_unaligned_be16(
2381                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2382         if (!max_active_rgns) {
2383                 dev_err(hba->dev,
2384                         "lun %d wrong number of max active regions\n", lun);
2385                 return -ENODEV;
2386         }
2387
2388         hpb_lu_info->num_blocks = get_unaligned_be64(
2389                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2390         hpb_lu_info->pinned_start = get_unaligned_be16(
2391                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2392         hpb_lu_info->num_pinned = get_unaligned_be16(
2393                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2394         hpb_lu_info->max_active_rgns = max_active_rgns;
2395
2396         return 0;
2397 }
2398
2399 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2400 {
2401         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2402
2403         if (!hpb)
2404                 return;
2405
2406         ufshpb_set_state(hpb, HPB_FAILED);
2407
2408         sdev = hpb->sdev_ufs_lu;
2409         sdev->hostdata = NULL;
2410
2411         ufshpb_cancel_jobs(hpb);
2412
2413         ufshpb_pre_req_mempool_destroy(hpb);
2414         ufshpb_destroy_region_tbl(hpb);
2415
2416         kmem_cache_destroy(hpb->map_req_cache);
2417         kmem_cache_destroy(hpb->m_page_cache);
2418
2419         list_del_init(&hpb->list_hpb_lu);
2420
2421         kfree(hpb);
2422 }
2423
2424 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2425 {
2426         int pool_size;
2427         struct ufshpb_lu *hpb;
2428         struct scsi_device *sdev;
2429         bool init_success;
2430
2431         if (tot_active_srgn_pages == 0) {
2432                 ufshpb_remove(hba);
2433                 return;
2434         }
2435
2436         init_success = !ufshpb_check_hpb_reset_query(hba);
2437
2438         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2439         if (pool_size > tot_active_srgn_pages) {
2440                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2441                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2442         }
2443
2444         shost_for_each_device(sdev, hba->host) {
2445                 hpb = ufshpb_get_hpb_data(sdev);
2446                 if (!hpb)
2447                         continue;
2448
2449                 if (init_success) {
2450                         ufshpb_set_state(hpb, HPB_PRESENT);
2451                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2452                                 queue_work(ufshpb_wq, &hpb->map_work);
2453                         if (!hpb->is_hcm)
2454                                 ufshpb_issue_umap_all_req(hpb);
2455                 } else {
2456                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2457                         ufshpb_destroy_lu(hba, sdev);
2458                 }
2459         }
2460
2461         if (!init_success)
2462                 ufshpb_remove(hba);
2463 }
2464
2465 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2466 {
2467         struct ufshpb_lu *hpb;
2468         int ret;
2469         struct ufshpb_lu_info hpb_lu_info = { 0 };
2470         int lun = sdev->lun;
2471
2472         if (lun >= hba->dev_info.max_lu_supported)
2473                 goto out;
2474
2475         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2476         if (ret)
2477                 goto out;
2478
2479         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2480                                   &hpb_lu_info);
2481         if (!hpb)
2482                 goto out;
2483
2484         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2485                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2486
2487 out:
2488         /* All LUs are initialized */
2489         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2490                 ufshpb_hpb_lu_prepared(hba);
2491 }
2492
2493 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2494 {
2495         int ret;
2496         unsigned int pool_size;
2497
2498         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2499                                         sizeof(struct ufshpb_map_ctx),
2500                                         0, 0, NULL);
2501         if (!ufshpb_mctx_cache) {
2502                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2503                 return -ENOMEM;
2504         }
2505
2506         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2507         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2508                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2509
2510         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2511                                                     ufshpb_mctx_cache);
2512         if (!ufshpb_mctx_pool) {
2513                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2514                 ret = -ENOMEM;
2515                 goto release_mctx_cache;
2516         }
2517
2518         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2519         if (!ufshpb_page_pool) {
2520                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2521                 ret = -ENOMEM;
2522                 goto release_mctx_pool;
2523         }
2524
2525         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2526                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2527         if (!ufshpb_wq) {
2528                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2529                 ret = -ENOMEM;
2530                 goto release_page_pool;
2531         }
2532
2533         return 0;
2534
2535 release_page_pool:
2536         mempool_destroy(ufshpb_page_pool);
2537 release_mctx_pool:
2538         mempool_destroy(ufshpb_mctx_pool);
2539 release_mctx_cache:
2540         kmem_cache_destroy(ufshpb_mctx_cache);
2541         return ret;
2542 }
2543
2544 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2545 {
2546         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2547         int max_active_rgns = 0;
2548         int hpb_num_lu;
2549
2550         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2551         if (hpb_num_lu == 0) {
2552                 dev_err(hba->dev, "No HPB LU supported\n");
2553                 hpb_info->hpb_disabled = true;
2554                 return;
2555         }
2556
2557         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2558         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2559         max_active_rgns = get_unaligned_be16(geo_buf +
2560                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2561
2562         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2563             max_active_rgns == 0) {
2564                 dev_err(hba->dev, "No HPB supported device\n");
2565                 hpb_info->hpb_disabled = true;
2566                 return;
2567         }
2568 }
2569
2570 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2571 {
2572         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2573         int version, ret;
2574         int max_single_cmd;
2575
2576         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2577
2578         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2579         if ((version != HPB_SUPPORT_VERSION) &&
2580             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2581                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2582                         __func__, version);
2583                 hpb_dev_info->hpb_disabled = true;
2584                 return;
2585         }
2586
2587         if (version == HPB_SUPPORT_LEGACY_VERSION)
2588                 hpb_dev_info->is_legacy = true;
2589
2590         /*
2591          * Get the number of user logical unit to check whether all
2592          * scsi_device finish initialization
2593          */
2594         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2595
2596         if (hpb_dev_info->is_legacy)
2597                 return;
2598
2599         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2600                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2601
2602         if (ret)
2603                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2604         else
2605                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2606 }
2607
2608 void ufshpb_init(struct ufs_hba *hba)
2609 {
2610         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2611         int try;
2612         int ret;
2613
2614         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2615                 return;
2616
2617         if (ufshpb_init_mem_wq(hba)) {
2618                 hpb_dev_info->hpb_disabled = true;
2619                 return;
2620         }
2621
2622         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2623         tot_active_srgn_pages = 0;
2624         /* issue HPB reset query */
2625         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2626                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2627                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2628                 if (!ret)
2629                         break;
2630         }
2631 }
2632
2633 void ufshpb_remove(struct ufs_hba *hba)
2634 {
2635         mempool_destroy(ufshpb_page_pool);
2636         mempool_destroy(ufshpb_mctx_pool);
2637         kmem_cache_destroy(ufshpb_mctx_cache);
2638
2639         destroy_workqueue(ufshpb_wq);
2640 }
2641
2642 module_param(ufshpb_host_map_kbytes, uint, 0644);
2643 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2644         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");