Merge tag 'spdx-5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
14
15 #include "ufshcd.h"
16 #include "ufshpb.h"
17 #include "../sd.h"
18
19 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
20 #define READ_TO_MS 1000
21 #define READ_TO_EXPIRIES 100
22 #define POLLING_INTERVAL_MS 200
23 #define THROTTLE_MAP_REQ_DEFAULT 1
24
25 /* memory management */
26 static struct kmem_cache *ufshpb_mctx_cache;
27 static mempool_t *ufshpb_mctx_pool;
28 static mempool_t *ufshpb_page_pool;
29 /* A cache size of 2MB can cache ppn in the 1GB range. */
30 static unsigned int ufshpb_host_map_kbytes = 2048;
31 static int tot_active_srgn_pages;
32
33 static struct workqueue_struct *ufshpb_wq;
34
35 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
36                                       int srgn_idx);
37
38 bool ufshpb_is_allowed(struct ufs_hba *hba)
39 {
40         return !(hba->ufshpb_dev.hpb_disabled);
41 }
42
43 /* HPB version 1.0 is called as legacy version. */
44 bool ufshpb_is_legacy(struct ufs_hba *hba)
45 {
46         return hba->ufshpb_dev.is_legacy;
47 }
48
49 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
50 {
51         return sdev->hostdata;
52 }
53
54 static int ufshpb_get_state(struct ufshpb_lu *hpb)
55 {
56         return atomic_read(&hpb->hpb_state);
57 }
58
59 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
60 {
61         atomic_set(&hpb->hpb_state, state);
62 }
63
64 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
65                                 struct ufshpb_subregion *srgn)
66 {
67         return rgn->rgn_state != HPB_RGN_INACTIVE &&
68                 srgn->srgn_state == HPB_SRGN_VALID;
69 }
70
71 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
72 {
73         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
74 }
75
76 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
77 {
78         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
79                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
80 }
81
82 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
83 {
84         return transfer_len <= hpb->pre_req_max_tr_len;
85 }
86
87 static bool ufshpb_is_general_lun(int lun)
88 {
89         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
90 }
91
92 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
93 {
94         if (hpb->lu_pinned_end != PINNED_NOT_SET &&
95             rgn_idx >= hpb->lu_pinned_start &&
96             rgn_idx <= hpb->lu_pinned_end)
97                 return true;
98
99         return false;
100 }
101
102 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
103 {
104         bool ret = false;
105         unsigned long flags;
106
107         if (ufshpb_get_state(hpb) != HPB_PRESENT)
108                 return;
109
110         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
111         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
112                 ret = true;
113         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
114
115         if (ret)
116                 queue_work(ufshpb_wq, &hpb->map_work);
117 }
118
119 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
120                                     struct ufshcd_lrb *lrbp,
121                                     struct utp_hpb_rsp *rsp_field)
122 {
123         /* Check HPB_UPDATE_ALERT */
124         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
125               UPIU_HEADER_DWORD(0, 2, 0, 0)))
126                 return false;
127
128         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
129             rsp_field->desc_type != DEV_DES_TYPE ||
130             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
131             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
132             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
133             rsp_field->hpb_op == HPB_RSP_NONE ||
134             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
135              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
136                 return false;
137
138         if (!ufshpb_is_general_lun(rsp_field->lun)) {
139                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
140                          lrbp->lun);
141                 return false;
142         }
143
144         return true;
145 }
146
147 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
148                                int srgn_offset, int cnt, bool set_dirty)
149 {
150         struct ufshpb_region *rgn;
151         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
152         int set_bit_len;
153         int bitmap_len;
154         unsigned long flags;
155
156 next_srgn:
157         rgn = hpb->rgn_tbl + rgn_idx;
158         srgn = rgn->srgn_tbl + srgn_idx;
159
160         if (likely(!srgn->is_last))
161                 bitmap_len = hpb->entries_per_srgn;
162         else
163                 bitmap_len = hpb->last_srgn_entries;
164
165         if ((srgn_offset + cnt) > bitmap_len)
166                 set_bit_len = bitmap_len - srgn_offset;
167         else
168                 set_bit_len = cnt;
169
170         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
171         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
172                 if (set_dirty) {
173                         if (srgn->srgn_state == HPB_SRGN_VALID)
174                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
175                                            set_bit_len);
176                 } else if (hpb->is_hcm) {
177                          /* rewind the read timer for lru regions */
178                         rgn->read_timeout = ktime_add_ms(ktime_get(),
179                                         rgn->hpb->params.read_timeout_ms);
180                         rgn->read_timeout_expiries =
181                                 rgn->hpb->params.read_timeout_expiries;
182                 }
183         }
184         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
185
186         if (hpb->is_hcm && prev_srgn != srgn) {
187                 bool activate = false;
188
189                 spin_lock(&rgn->rgn_lock);
190                 if (set_dirty) {
191                         rgn->reads -= srgn->reads;
192                         srgn->reads = 0;
193                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
194                 } else {
195                         srgn->reads++;
196                         rgn->reads++;
197                         if (srgn->reads == hpb->params.activation_thld)
198                                 activate = true;
199                 }
200                 spin_unlock(&rgn->rgn_lock);
201
202                 if (activate ||
203                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
204                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
205                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
206                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
207                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
208                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
209                 }
210
211                 prev_srgn = srgn;
212         }
213
214         srgn_offset = 0;
215         if (++srgn_idx == hpb->srgns_per_rgn) {
216                 srgn_idx = 0;
217                 rgn_idx++;
218         }
219
220         cnt -= set_bit_len;
221         if (cnt > 0)
222                 goto next_srgn;
223 }
224
225 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
226                                   int srgn_idx, int srgn_offset, int cnt)
227 {
228         struct ufshpb_region *rgn;
229         struct ufshpb_subregion *srgn;
230         int bitmap_len;
231         int bit_len;
232
233 next_srgn:
234         rgn = hpb->rgn_tbl + rgn_idx;
235         srgn = rgn->srgn_tbl + srgn_idx;
236
237         if (likely(!srgn->is_last))
238                 bitmap_len = hpb->entries_per_srgn;
239         else
240                 bitmap_len = hpb->last_srgn_entries;
241
242         if (!ufshpb_is_valid_srgn(rgn, srgn))
243                 return true;
244
245         /*
246          * If the region state is active, mctx must be allocated.
247          * In this case, check whether the region is evicted or
248          * mctx allocation fail.
249          */
250         if (unlikely(!srgn->mctx)) {
251                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
252                         "no mctx in region %d subregion %d.\n",
253                         srgn->rgn_idx, srgn->srgn_idx);
254                 return true;
255         }
256
257         if ((srgn_offset + cnt) > bitmap_len)
258                 bit_len = bitmap_len - srgn_offset;
259         else
260                 bit_len = cnt;
261
262         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
263                           srgn_offset) < bit_len + srgn_offset)
264                 return true;
265
266         srgn_offset = 0;
267         if (++srgn_idx == hpb->srgns_per_rgn) {
268                 srgn_idx = 0;
269                 rgn_idx++;
270         }
271
272         cnt -= bit_len;
273         if (cnt > 0)
274                 goto next_srgn;
275
276         return false;
277 }
278
279 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
280 {
281         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
282 }
283
284 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
285                                      struct ufshpb_map_ctx *mctx, int pos,
286                                      int len, __be64 *ppn_buf)
287 {
288         struct page *page;
289         int index, offset;
290         int copied;
291
292         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
293         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
294
295         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
296                 copied = len;
297         else
298                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
299
300         page = mctx->m_page[index];
301         if (unlikely(!page)) {
302                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
303                         "error. cannot find page in mctx\n");
304                 return -ENOMEM;
305         }
306
307         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
308                copied * HPB_ENTRY_SIZE);
309
310         return copied;
311 }
312
313 static void
314 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
315                         int *srgn_idx, int *offset)
316 {
317         int rgn_offset;
318
319         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
320         rgn_offset = lpn & hpb->entries_per_rgn_mask;
321         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
322         *offset = rgn_offset & hpb->entries_per_srgn_mask;
323 }
324
325 static void
326 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
327                             __be64 ppn, u8 transfer_len)
328 {
329         unsigned char *cdb = lrbp->cmd->cmnd;
330         __be64 ppn_tmp = ppn;
331         cdb[0] = UFSHPB_READ;
332
333         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
334                 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
335
336         /* ppn value is stored as big-endian in the host memory */
337         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
338         cdb[14] = transfer_len;
339         cdb[15] = 0;
340
341         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
342 }
343
344 /*
345  * This function will set up HPB read command using host-side L2P map data.
346  */
347 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
348 {
349         struct ufshpb_lu *hpb;
350         struct ufshpb_region *rgn;
351         struct ufshpb_subregion *srgn;
352         struct scsi_cmnd *cmd = lrbp->cmd;
353         u32 lpn;
354         __be64 ppn;
355         unsigned long flags;
356         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
357         int err = 0;
358
359         hpb = ufshpb_get_hpb_data(cmd->device);
360         if (!hpb)
361                 return -ENODEV;
362
363         if (ufshpb_get_state(hpb) == HPB_INIT)
364                 return -ENODEV;
365
366         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
367                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
368                            "%s: ufshpb state is not PRESENT", __func__);
369                 return -ENODEV;
370         }
371
372         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
373             (!ufshpb_is_write_or_discard(cmd) &&
374              !ufshpb_is_read_cmd(cmd)))
375                 return 0;
376
377         transfer_len = sectors_to_logical(cmd->device,
378                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
379         if (unlikely(!transfer_len))
380                 return 0;
381
382         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
383         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
384         rgn = hpb->rgn_tbl + rgn_idx;
385         srgn = rgn->srgn_tbl + srgn_idx;
386
387         /* If command type is WRITE or DISCARD, set bitmap as drity */
388         if (ufshpb_is_write_or_discard(cmd)) {
389                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
390                                    transfer_len, true);
391                 return 0;
392         }
393
394         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
395                 return 0;
396
397         if (hpb->is_hcm) {
398                 /*
399                  * in host control mode, reads are the main source for
400                  * activation trials.
401                  */
402                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
403                                    transfer_len, false);
404
405                 /* keep those counters normalized */
406                 if (rgn->reads > hpb->entries_per_srgn)
407                         schedule_work(&hpb->ufshpb_normalization_work);
408         }
409
410         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
411         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
412                                    transfer_len)) {
413                 hpb->stats.miss_cnt++;
414                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
415                 return 0;
416         }
417
418         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
419         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
420         if (unlikely(err < 0)) {
421                 /*
422                  * In this case, the region state is active,
423                  * but the ppn table is not allocated.
424                  * Make sure that ppn table must be allocated on
425                  * active state.
426                  */
427                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
428                 return err;
429         }
430
431         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
432
433         hpb->stats.hit_cnt++;
434         return 0;
435 }
436
437 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
438                                          int rgn_idx, enum req_opf dir,
439                                          bool atomic)
440 {
441         struct ufshpb_req *rq;
442         struct request *req;
443         int retries = HPB_MAP_REQ_RETRIES;
444
445         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
446         if (!rq)
447                 return NULL;
448
449 retry:
450         req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
451                               BLK_MQ_REQ_NOWAIT);
452
453         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
454                 usleep_range(3000, 3100);
455                 goto retry;
456         }
457
458         if (IS_ERR(req))
459                 goto free_rq;
460
461         rq->hpb = hpb;
462         rq->req = req;
463         rq->rb.rgn_idx = rgn_idx;
464
465         return rq;
466
467 free_rq:
468         kmem_cache_free(hpb->map_req_cache, rq);
469         return NULL;
470 }
471
472 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
473 {
474         blk_mq_free_request(rq->req);
475         kmem_cache_free(hpb->map_req_cache, rq);
476 }
477
478 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
479                                              struct ufshpb_subregion *srgn)
480 {
481         struct ufshpb_req *map_req;
482         struct bio *bio;
483         unsigned long flags;
484
485         if (hpb->is_hcm &&
486             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
487                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
488                          "map_req throttle. inflight %d throttle %d",
489                          hpb->num_inflight_map_req,
490                          hpb->params.inflight_map_req);
491                 return NULL;
492         }
493
494         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
495         if (!map_req)
496                 return NULL;
497
498         bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
499         if (!bio) {
500                 ufshpb_put_req(hpb, map_req);
501                 return NULL;
502         }
503
504         map_req->bio = bio;
505
506         map_req->rb.srgn_idx = srgn->srgn_idx;
507         map_req->rb.mctx = srgn->mctx;
508
509         spin_lock_irqsave(&hpb->param_lock, flags);
510         hpb->num_inflight_map_req++;
511         spin_unlock_irqrestore(&hpb->param_lock, flags);
512
513         return map_req;
514 }
515
516 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
517                                struct ufshpb_req *map_req)
518 {
519         unsigned long flags;
520
521         bio_put(map_req->bio);
522         ufshpb_put_req(hpb, map_req);
523
524         spin_lock_irqsave(&hpb->param_lock, flags);
525         hpb->num_inflight_map_req--;
526         spin_unlock_irqrestore(&hpb->param_lock, flags);
527 }
528
529 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
530                                      struct ufshpb_subregion *srgn)
531 {
532         struct ufshpb_region *rgn;
533         u32 num_entries = hpb->entries_per_srgn;
534
535         if (!srgn->mctx) {
536                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
537                         "no mctx in region %d subregion %d.\n",
538                         srgn->rgn_idx, srgn->srgn_idx);
539                 return -1;
540         }
541
542         if (unlikely(srgn->is_last))
543                 num_entries = hpb->last_srgn_entries;
544
545         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
546
547         rgn = hpb->rgn_tbl + srgn->rgn_idx;
548         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
549
550         return 0;
551 }
552
553 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
554                                       int srgn_idx)
555 {
556         struct ufshpb_region *rgn;
557         struct ufshpb_subregion *srgn;
558
559         rgn = hpb->rgn_tbl + rgn_idx;
560         srgn = rgn->srgn_tbl + srgn_idx;
561
562         list_del_init(&rgn->list_inact_rgn);
563
564         if (list_empty(&srgn->list_act_srgn))
565                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
566
567         hpb->stats.rb_active_cnt++;
568 }
569
570 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
571 {
572         struct ufshpb_region *rgn;
573         struct ufshpb_subregion *srgn;
574         int srgn_idx;
575
576         rgn = hpb->rgn_tbl + rgn_idx;
577
578         for_each_sub_region(rgn, srgn_idx, srgn)
579                 list_del_init(&srgn->list_act_srgn);
580
581         if (list_empty(&rgn->list_inact_rgn))
582                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
583
584         hpb->stats.rb_inactive_cnt++;
585 }
586
587 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
588                                       struct ufshpb_subregion *srgn)
589 {
590         struct ufshpb_region *rgn;
591
592         /*
593          * If there is no mctx in subregion
594          * after I/O progress for HPB_READ_BUFFER, the region to which the
595          * subregion belongs was evicted.
596          * Make sure the region must not evict in I/O progress
597          */
598         if (!srgn->mctx) {
599                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
600                         "no mctx in region %d subregion %d.\n",
601                         srgn->rgn_idx, srgn->srgn_idx);
602                 srgn->srgn_state = HPB_SRGN_INVALID;
603                 return;
604         }
605
606         rgn = hpb->rgn_tbl + srgn->rgn_idx;
607
608         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
609                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
610                         "region %d subregion %d evicted\n",
611                         srgn->rgn_idx, srgn->srgn_idx);
612                 srgn->srgn_state = HPB_SRGN_INVALID;
613                 return;
614         }
615         srgn->srgn_state = HPB_SRGN_VALID;
616 }
617
618 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
619 {
620         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
621
622         ufshpb_put_req(umap_req->hpb, umap_req);
623 }
624
625 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
626 {
627         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
628         struct ufshpb_lu *hpb = map_req->hpb;
629         struct ufshpb_subregion *srgn;
630         unsigned long flags;
631
632         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
633                 map_req->rb.srgn_idx;
634
635         ufshpb_clear_dirty_bitmap(hpb, srgn);
636         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
637         ufshpb_activate_subregion(hpb, srgn);
638         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
639
640         ufshpb_put_map_req(map_req->hpb, map_req);
641 }
642
643 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
644 {
645         cdb[0] = UFSHPB_WRITE_BUFFER;
646         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
647                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
648         if (rgn)
649                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
650         cdb[9] = 0x00;
651 }
652
653 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
654                                     int srgn_idx, int srgn_mem_size)
655 {
656         cdb[0] = UFSHPB_READ_BUFFER;
657         cdb[1] = UFSHPB_READ_BUFFER_ID;
658
659         put_unaligned_be16(rgn_idx, &cdb[2]);
660         put_unaligned_be16(srgn_idx, &cdb[4]);
661         put_unaligned_be24(srgn_mem_size, &cdb[6]);
662
663         cdb[9] = 0x00;
664 }
665
666 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
667                                    struct ufshpb_req *umap_req,
668                                    struct ufshpb_region *rgn)
669 {
670         struct request *req;
671         struct scsi_request *rq;
672
673         req = umap_req->req;
674         req->timeout = 0;
675         req->end_io_data = (void *)umap_req;
676         rq = scsi_req(req);
677         ufshpb_set_unmap_cmd(rq->cmd, rgn);
678         rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
679
680         blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
681
682         hpb->stats.umap_req_cnt++;
683 }
684
685 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
686                                   struct ufshpb_req *map_req, bool last)
687 {
688         struct request_queue *q;
689         struct request *req;
690         struct scsi_request *rq;
691         int mem_size = hpb->srgn_mem_size;
692         int ret = 0;
693         int i;
694
695         q = hpb->sdev_ufs_lu->request_queue;
696         for (i = 0; i < hpb->pages_per_srgn; i++) {
697                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
698                                       PAGE_SIZE, 0);
699                 if (ret != PAGE_SIZE) {
700                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
701                                    "bio_add_pc_page fail %d - %d\n",
702                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
703                         return ret;
704                 }
705         }
706
707         req = map_req->req;
708
709         blk_rq_append_bio(req, map_req->bio);
710
711         req->end_io_data = map_req;
712
713         rq = scsi_req(req);
714
715         if (unlikely(last))
716                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
717
718         ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
719                                 map_req->rb.srgn_idx, mem_size);
720         rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
721
722         blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
723
724         hpb->stats.map_req_cnt++;
725         return 0;
726 }
727
728 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
729                                                  bool last)
730 {
731         struct ufshpb_map_ctx *mctx;
732         u32 num_entries = hpb->entries_per_srgn;
733         int i, j;
734
735         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
736         if (!mctx)
737                 return NULL;
738
739         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
740         if (!mctx->m_page)
741                 goto release_mctx;
742
743         if (unlikely(last))
744                 num_entries = hpb->last_srgn_entries;
745
746         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
747         if (!mctx->ppn_dirty)
748                 goto release_m_page;
749
750         for (i = 0; i < hpb->pages_per_srgn; i++) {
751                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
752                 if (!mctx->m_page[i]) {
753                         for (j = 0; j < i; j++)
754                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
755                         goto release_ppn_dirty;
756                 }
757                 clear_page(page_address(mctx->m_page[i]));
758         }
759
760         return mctx;
761
762 release_ppn_dirty:
763         bitmap_free(mctx->ppn_dirty);
764 release_m_page:
765         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
766 release_mctx:
767         mempool_free(mctx, ufshpb_mctx_pool);
768         return NULL;
769 }
770
771 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
772                                struct ufshpb_map_ctx *mctx)
773 {
774         int i;
775
776         for (i = 0; i < hpb->pages_per_srgn; i++)
777                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
778
779         bitmap_free(mctx->ppn_dirty);
780         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
781         mempool_free(mctx, ufshpb_mctx_pool);
782 }
783
784 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
785                                           struct ufshpb_region *rgn)
786 {
787         struct ufshpb_subregion *srgn;
788         int srgn_idx;
789
790         for_each_sub_region(rgn, srgn_idx, srgn)
791                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
792                         return -EPERM;
793
794         return 0;
795 }
796
797 static void ufshpb_read_to_handler(struct work_struct *work)
798 {
799         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
800                                              ufshpb_read_to_work.work);
801         struct victim_select_info *lru_info = &hpb->lru_info;
802         struct ufshpb_region *rgn, *next_rgn;
803         unsigned long flags;
804         unsigned int poll;
805         LIST_HEAD(expired_list);
806
807         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
808                 return;
809
810         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
811
812         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
813                                  list_lru_rgn) {
814                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
815
816                 if (timedout) {
817                         rgn->read_timeout_expiries--;
818                         if (is_rgn_dirty(rgn) ||
819                             rgn->read_timeout_expiries == 0)
820                                 list_add(&rgn->list_expired_rgn, &expired_list);
821                         else
822                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
823                                                 hpb->params.read_timeout_ms);
824                 }
825         }
826
827         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
828
829         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
830                                  list_expired_rgn) {
831                 list_del_init(&rgn->list_expired_rgn);
832                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
833                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
834                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
835         }
836
837         ufshpb_kick_map_work(hpb);
838
839         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
840
841         poll = hpb->params.timeout_polling_interval_ms;
842         schedule_delayed_work(&hpb->ufshpb_read_to_work,
843                               msecs_to_jiffies(poll));
844 }
845
846 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
847                                 struct ufshpb_region *rgn)
848 {
849         rgn->rgn_state = HPB_RGN_ACTIVE;
850         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
851         atomic_inc(&lru_info->active_cnt);
852         if (rgn->hpb->is_hcm) {
853                 rgn->read_timeout =
854                         ktime_add_ms(ktime_get(),
855                                      rgn->hpb->params.read_timeout_ms);
856                 rgn->read_timeout_expiries =
857                         rgn->hpb->params.read_timeout_expiries;
858         }
859 }
860
861 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
862                                 struct ufshpb_region *rgn)
863 {
864         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
865 }
866
867 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
868 {
869         struct victim_select_info *lru_info = &hpb->lru_info;
870         struct ufshpb_region *rgn, *victim_rgn = NULL;
871
872         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
873                 if (!rgn) {
874                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
875                                 "%s: no region allocated\n",
876                                 __func__);
877                         return NULL;
878                 }
879                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
880                         continue;
881
882                 /*
883                  * in host control mode, verify that the exiting region
884                  * has fewer reads
885                  */
886                 if (hpb->is_hcm &&
887                     rgn->reads > hpb->params.eviction_thld_exit)
888                         continue;
889
890                 victim_rgn = rgn;
891                 break;
892         }
893
894         return victim_rgn;
895 }
896
897 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
898                                     struct ufshpb_region *rgn)
899 {
900         list_del_init(&rgn->list_lru_rgn);
901         rgn->rgn_state = HPB_RGN_INACTIVE;
902         atomic_dec(&lru_info->active_cnt);
903 }
904
905 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
906                                           struct ufshpb_subregion *srgn)
907 {
908         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
909                 ufshpb_put_map_ctx(hpb, srgn->mctx);
910                 srgn->srgn_state = HPB_SRGN_UNUSED;
911                 srgn->mctx = NULL;
912         }
913 }
914
915 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
916                                  struct ufshpb_region *rgn,
917                                  bool atomic)
918 {
919         struct ufshpb_req *umap_req;
920         int rgn_idx = rgn ? rgn->rgn_idx : 0;
921
922         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
923         if (!umap_req)
924                 return -ENOMEM;
925
926         ufshpb_execute_umap_req(hpb, umap_req, rgn);
927
928         return 0;
929 }
930
931 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
932                                         struct ufshpb_region *rgn)
933 {
934         return ufshpb_issue_umap_req(hpb, rgn, true);
935 }
936
937 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
938 {
939         return ufshpb_issue_umap_req(hpb, NULL, false);
940 }
941
942 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
943                                  struct ufshpb_region *rgn)
944 {
945         struct victim_select_info *lru_info;
946         struct ufshpb_subregion *srgn;
947         int srgn_idx;
948
949         lru_info = &hpb->lru_info;
950
951         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
952
953         ufshpb_cleanup_lru_info(lru_info, rgn);
954
955         for_each_sub_region(rgn, srgn_idx, srgn)
956                 ufshpb_purge_active_subregion(hpb, srgn);
957 }
958
959 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
960 {
961         unsigned long flags;
962         int ret = 0;
963
964         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
965         if (rgn->rgn_state == HPB_RGN_PINNED) {
966                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
967                          "pinned region cannot drop-out. region %d\n",
968                          rgn->rgn_idx);
969                 goto out;
970         }
971
972         if (!list_empty(&rgn->list_lru_rgn)) {
973                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
974                         ret = -EBUSY;
975                         goto out;
976                 }
977
978                 if (hpb->is_hcm) {
979                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
980                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
981                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
982                         if (ret)
983                                 goto out;
984                 }
985
986                 __ufshpb_evict_region(hpb, rgn);
987         }
988 out:
989         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
990         return ret;
991 }
992
993 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
994                                 struct ufshpb_region *rgn,
995                                 struct ufshpb_subregion *srgn)
996 {
997         struct ufshpb_req *map_req;
998         unsigned long flags;
999         int ret;
1000         int err = -EAGAIN;
1001         bool alloc_required = false;
1002         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1003
1004         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1005
1006         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1007                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1008                            "%s: ufshpb state is not PRESENT\n", __func__);
1009                 goto unlock_out;
1010         }
1011
1012         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1013             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1014                 err = 0;
1015                 goto unlock_out;
1016         }
1017
1018         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1019                 alloc_required = true;
1020
1021         /*
1022          * If the subregion is already ISSUED state,
1023          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1024          * the device and HPB response for map loading is received.
1025          * In this case, after finishing the HPB_READ_BUFFER,
1026          * the next HPB_READ_BUFFER is performed again to obtain the latest
1027          * map data.
1028          */
1029         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1030                 goto unlock_out;
1031
1032         srgn->srgn_state = HPB_SRGN_ISSUED;
1033         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1034
1035         if (alloc_required) {
1036                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1037                 if (!srgn->mctx) {
1038                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1039                             "get map_ctx failed. region %d - %d\n",
1040                             rgn->rgn_idx, srgn->srgn_idx);
1041                         state = HPB_SRGN_UNUSED;
1042                         goto change_srgn_state;
1043                 }
1044         }
1045
1046         map_req = ufshpb_get_map_req(hpb, srgn);
1047         if (!map_req)
1048                 goto change_srgn_state;
1049
1050
1051         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1052         if (ret) {
1053                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1054                            "%s: issue map_req failed: %d, region %d - %d\n",
1055                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1056                 goto free_map_req;
1057         }
1058         return 0;
1059
1060 free_map_req:
1061         ufshpb_put_map_req(hpb, map_req);
1062 change_srgn_state:
1063         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1064         srgn->srgn_state = state;
1065 unlock_out:
1066         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1067         return err;
1068 }
1069
1070 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1071 {
1072         struct ufshpb_region *victim_rgn = NULL;
1073         struct victim_select_info *lru_info = &hpb->lru_info;
1074         unsigned long flags;
1075         int ret = 0;
1076
1077         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1078         /*
1079          * If region belongs to lru_list, just move the region
1080          * to the front of lru list because the state of the region
1081          * is already active-state.
1082          */
1083         if (!list_empty(&rgn->list_lru_rgn)) {
1084                 ufshpb_hit_lru_info(lru_info, rgn);
1085                 goto out;
1086         }
1087
1088         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1089                 if (atomic_read(&lru_info->active_cnt) ==
1090                     lru_info->max_lru_active_cnt) {
1091                         /*
1092                          * If the maximum number of active regions
1093                          * is exceeded, evict the least recently used region.
1094                          * This case may occur when the device responds
1095                          * to the eviction information late.
1096                          * It is okay to evict the least recently used region,
1097                          * because the device could detect this region
1098                          * by not issuing HPB_READ
1099                          *
1100                          * in host control mode, verify that the entering
1101                          * region has enough reads
1102                          */
1103                         if (hpb->is_hcm &&
1104                             rgn->reads < hpb->params.eviction_thld_enter) {
1105                                 ret = -EACCES;
1106                                 goto out;
1107                         }
1108
1109                         victim_rgn = ufshpb_victim_lru_info(hpb);
1110                         if (!victim_rgn) {
1111                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1112                                     "cannot get victim region %s\n",
1113                                     hpb->is_hcm ? "" : "error");
1114                                 ret = -ENOMEM;
1115                                 goto out;
1116                         }
1117
1118                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1119                                 "LRU full (%d), choose victim %d\n",
1120                                 atomic_read(&lru_info->active_cnt),
1121                                 victim_rgn->rgn_idx);
1122
1123                         if (hpb->is_hcm) {
1124                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1125                                                        flags);
1126                                 ret = ufshpb_issue_umap_single_req(hpb,
1127                                                                 victim_rgn);
1128                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1129                                                   flags);
1130                                 if (ret)
1131                                         goto out;
1132                         }
1133
1134                         __ufshpb_evict_region(hpb, victim_rgn);
1135                 }
1136
1137                 /*
1138                  * When a region is added to lru_info list_head,
1139                  * it is guaranteed that the subregion has been
1140                  * assigned all mctx. If failed, try to receive mctx again
1141                  * without being added to lru_info list_head
1142                  */
1143                 ufshpb_add_lru_info(lru_info, rgn);
1144         }
1145 out:
1146         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1147         return ret;
1148 }
1149
1150 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1151                                          struct utp_hpb_rsp *rsp_field)
1152 {
1153         struct ufshpb_region *rgn;
1154         struct ufshpb_subregion *srgn;
1155         int i, rgn_i, srgn_i;
1156
1157         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1158         /*
1159          * If the active region and the inactive region are the same,
1160          * we will inactivate this region.
1161          * The device could check this (region inactivated) and
1162          * will response the proper active region information
1163          */
1164         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1165                 rgn_i =
1166                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1167                 srgn_i =
1168                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1169
1170                 rgn = hpb->rgn_tbl + rgn_i;
1171                 if (hpb->is_hcm &&
1172                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1173                         /*
1174                          * in host control mode, subregion activation
1175                          * recommendations are only allowed to active regions.
1176                          * Also, ignore recommendations for dirty regions - the
1177                          * host will make decisions concerning those by himself
1178                          */
1179                         continue;
1180                 }
1181
1182                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1183                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1184
1185                 spin_lock(&hpb->rsp_list_lock);
1186                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1187                 spin_unlock(&hpb->rsp_list_lock);
1188
1189                 srgn = rgn->srgn_tbl + srgn_i;
1190
1191                 /* blocking HPB_READ */
1192                 spin_lock(&hpb->rgn_state_lock);
1193                 if (srgn->srgn_state == HPB_SRGN_VALID)
1194                         srgn->srgn_state = HPB_SRGN_INVALID;
1195                 spin_unlock(&hpb->rgn_state_lock);
1196         }
1197
1198         if (hpb->is_hcm) {
1199                 /*
1200                  * in host control mode the device is not allowed to inactivate
1201                  * regions
1202                  */
1203                 goto out;
1204         }
1205
1206         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1207                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1208                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1209                         "inactivate(%d) region %d\n", i, rgn_i);
1210
1211                 spin_lock(&hpb->rsp_list_lock);
1212                 ufshpb_update_inactive_info(hpb, rgn_i);
1213                 spin_unlock(&hpb->rsp_list_lock);
1214
1215                 rgn = hpb->rgn_tbl + rgn_i;
1216
1217                 spin_lock(&hpb->rgn_state_lock);
1218                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1219                         for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1220                                 srgn = rgn->srgn_tbl + srgn_i;
1221                                 if (srgn->srgn_state == HPB_SRGN_VALID)
1222                                         srgn->srgn_state = HPB_SRGN_INVALID;
1223                         }
1224                 }
1225                 spin_unlock(&hpb->rgn_state_lock);
1226
1227         }
1228
1229 out:
1230         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1231                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1232
1233         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1234                 queue_work(ufshpb_wq, &hpb->map_work);
1235 }
1236
1237 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1238 {
1239         struct victim_select_info *lru_info = &hpb->lru_info;
1240         struct ufshpb_region *rgn;
1241         unsigned long flags;
1242
1243         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1244
1245         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1246                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1247
1248         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1249 }
1250
1251 /*
1252  * This function will parse recommended active subregion information in sense
1253  * data field of response UPIU with SAM_STAT_GOOD state.
1254  */
1255 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1256 {
1257         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1258         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1259         int data_seg_len;
1260
1261         if (unlikely(lrbp->lun != rsp_field->lun)) {
1262                 struct scsi_device *sdev;
1263                 bool found = false;
1264
1265                 __shost_for_each_device(sdev, hba->host) {
1266                         hpb = ufshpb_get_hpb_data(sdev);
1267
1268                         if (!hpb)
1269                                 continue;
1270
1271                         if (rsp_field->lun == hpb->lun) {
1272                                 found = true;
1273                                 break;
1274                         }
1275                 }
1276
1277                 if (!found)
1278                         return;
1279         }
1280
1281         if (!hpb)
1282                 return;
1283
1284         if (ufshpb_get_state(hpb) == HPB_INIT)
1285                 return;
1286
1287         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1288             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1289                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1290                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1291                            __func__);
1292                 return;
1293         }
1294
1295         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1296                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1297
1298         /* To flush remained rsp_list, we queue the map_work task */
1299         if (!data_seg_len) {
1300                 if (!ufshpb_is_general_lun(hpb->lun))
1301                         return;
1302
1303                 ufshpb_kick_map_work(hpb);
1304                 return;
1305         }
1306
1307         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1308
1309         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1310                 return;
1311
1312         hpb->stats.rb_noti_cnt++;
1313
1314         switch (rsp_field->hpb_op) {
1315         case HPB_RSP_REQ_REGION_UPDATE:
1316                 if (data_seg_len != DEV_DATA_SEG_LEN)
1317                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1318                                  "%s: data seg length is not same.\n",
1319                                  __func__);
1320                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1321                 break;
1322         case HPB_RSP_DEV_RESET:
1323                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1324                          "UFS device lost HPB information during PM.\n");
1325
1326                 if (hpb->is_hcm) {
1327                         struct scsi_device *sdev;
1328
1329                         __shost_for_each_device(sdev, hba->host) {
1330                                 struct ufshpb_lu *h = sdev->hostdata;
1331
1332                                 if (h)
1333                                         ufshpb_dev_reset_handler(h);
1334                         }
1335                 }
1336
1337                 break;
1338         default:
1339                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1340                            "hpb_op is not available: %d\n",
1341                            rsp_field->hpb_op);
1342                 break;
1343         }
1344 }
1345
1346 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1347                                    struct ufshpb_region *rgn,
1348                                    struct ufshpb_subregion *srgn)
1349 {
1350         if (!list_empty(&rgn->list_inact_rgn))
1351                 return;
1352
1353         if (!list_empty(&srgn->list_act_srgn)) {
1354                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1355                 return;
1356         }
1357
1358         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1359 }
1360
1361 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1362                                           struct ufshpb_region *rgn,
1363                                           struct list_head *pending_list)
1364 {
1365         struct ufshpb_subregion *srgn;
1366         int srgn_idx;
1367
1368         if (!list_empty(&rgn->list_inact_rgn))
1369                 return;
1370
1371         for_each_sub_region(rgn, srgn_idx, srgn)
1372                 if (!list_empty(&srgn->list_act_srgn))
1373                         return;
1374
1375         list_add_tail(&rgn->list_inact_rgn, pending_list);
1376 }
1377
1378 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1379 {
1380         struct ufshpb_region *rgn;
1381         struct ufshpb_subregion *srgn;
1382         unsigned long flags;
1383         int ret = 0;
1384
1385         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1386         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1387                                                 struct ufshpb_subregion,
1388                                                 list_act_srgn))) {
1389                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1390                         break;
1391
1392                 list_del_init(&srgn->list_act_srgn);
1393                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1394
1395                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1396                 ret = ufshpb_add_region(hpb, rgn);
1397                 if (ret)
1398                         goto active_failed;
1399
1400                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1401                 if (ret) {
1402                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1403                             "issue map_req failed. ret %d, region %d - %d\n",
1404                             ret, rgn->rgn_idx, srgn->srgn_idx);
1405                         goto active_failed;
1406                 }
1407                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1408         }
1409         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1410         return;
1411
1412 active_failed:
1413         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1414                    rgn->rgn_idx, srgn->srgn_idx);
1415         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1416         ufshpb_add_active_list(hpb, rgn, srgn);
1417         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1418 }
1419
1420 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1421 {
1422         struct ufshpb_region *rgn;
1423         unsigned long flags;
1424         int ret;
1425         LIST_HEAD(pending_list);
1426
1427         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1428         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1429                                                struct ufshpb_region,
1430                                                list_inact_rgn))) {
1431                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1432                         break;
1433
1434                 list_del_init(&rgn->list_inact_rgn);
1435                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1436
1437                 ret = ufshpb_evict_region(hpb, rgn);
1438                 if (ret) {
1439                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1440                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1441                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1442                 }
1443
1444                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1445         }
1446
1447         list_splice(&pending_list, &hpb->lh_inact_rgn);
1448         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1449 }
1450
1451 static void ufshpb_normalization_work_handler(struct work_struct *work)
1452 {
1453         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1454                                              ufshpb_normalization_work);
1455         int rgn_idx;
1456         u8 factor = hpb->params.normalization_factor;
1457
1458         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1459                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1460                 int srgn_idx;
1461
1462                 spin_lock(&rgn->rgn_lock);
1463                 rgn->reads = 0;
1464                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1465                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1466
1467                         srgn->reads >>= factor;
1468                         rgn->reads += srgn->reads;
1469                 }
1470                 spin_unlock(&rgn->rgn_lock);
1471
1472                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1473                         continue;
1474
1475                 /* if region is active but has no reads - inactivate it */
1476                 spin_lock(&hpb->rsp_list_lock);
1477                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1478                 spin_unlock(&hpb->rsp_list_lock);
1479         }
1480 }
1481
1482 static void ufshpb_map_work_handler(struct work_struct *work)
1483 {
1484         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1485
1486         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1487                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1488                            "%s: ufshpb state is not PRESENT\n", __func__);
1489                 return;
1490         }
1491
1492         ufshpb_run_inactive_region_list(hpb);
1493         ufshpb_run_active_subregion_list(hpb);
1494 }
1495
1496 /*
1497  * this function doesn't need to hold lock due to be called in init.
1498  * (rgn_state_lock, rsp_list_lock, etc..)
1499  */
1500 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1501                                             struct ufshpb_lu *hpb,
1502                                             struct ufshpb_region *rgn)
1503 {
1504         struct ufshpb_subregion *srgn;
1505         int srgn_idx, i;
1506         int err = 0;
1507
1508         for_each_sub_region(rgn, srgn_idx, srgn) {
1509                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1510                 srgn->srgn_state = HPB_SRGN_INVALID;
1511                 if (!srgn->mctx) {
1512                         err = -ENOMEM;
1513                         dev_err(hba->dev,
1514                                 "alloc mctx for pinned region failed\n");
1515                         goto release;
1516                 }
1517
1518                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1519         }
1520
1521         rgn->rgn_state = HPB_RGN_PINNED;
1522         return 0;
1523
1524 release:
1525         for (i = 0; i < srgn_idx; i++) {
1526                 srgn = rgn->srgn_tbl + i;
1527                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1528         }
1529         return err;
1530 }
1531
1532 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1533                                       struct ufshpb_region *rgn, bool last)
1534 {
1535         int srgn_idx;
1536         struct ufshpb_subregion *srgn;
1537
1538         for_each_sub_region(rgn, srgn_idx, srgn) {
1539                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1540
1541                 srgn->rgn_idx = rgn->rgn_idx;
1542                 srgn->srgn_idx = srgn_idx;
1543                 srgn->srgn_state = HPB_SRGN_UNUSED;
1544         }
1545
1546         if (unlikely(last && hpb->last_srgn_entries))
1547                 srgn->is_last = true;
1548 }
1549
1550 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1551                                       struct ufshpb_region *rgn, int srgn_cnt)
1552 {
1553         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1554                                  GFP_KERNEL);
1555         if (!rgn->srgn_tbl)
1556                 return -ENOMEM;
1557
1558         rgn->srgn_cnt = srgn_cnt;
1559         return 0;
1560 }
1561
1562 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1563                                      struct ufshpb_lu *hpb,
1564                                      struct ufshpb_dev_info *hpb_dev_info,
1565                                      struct ufshpb_lu_info *hpb_lu_info)
1566 {
1567         u32 entries_per_rgn;
1568         u64 rgn_mem_size, tmp;
1569
1570         if (ufshpb_is_legacy(hba))
1571                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1572         else
1573                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1574
1575         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1576         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1577                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1578                 : PINNED_NOT_SET;
1579         hpb->lru_info.max_lru_active_cnt =
1580                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1581
1582         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1583                         * HPB_ENTRY_SIZE;
1584         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1585         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1586                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1587
1588         tmp = rgn_mem_size;
1589         do_div(tmp, HPB_ENTRY_SIZE);
1590         entries_per_rgn = (u32)tmp;
1591         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1592         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1593
1594         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1595         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1596         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1597
1598         tmp = rgn_mem_size;
1599         do_div(tmp, hpb->srgn_mem_size);
1600         hpb->srgns_per_rgn = (int)tmp;
1601
1602         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1603                                 entries_per_rgn);
1604         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1605                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1606         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1607                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1608
1609         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1610
1611         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1612                 hpb->is_hcm = true;
1613 }
1614
1615 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1616 {
1617         struct ufshpb_region *rgn_table, *rgn;
1618         int rgn_idx, i;
1619         int ret = 0;
1620
1621         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1622                             GFP_KERNEL);
1623         if (!rgn_table)
1624                 return -ENOMEM;
1625
1626         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1627                 int srgn_cnt = hpb->srgns_per_rgn;
1628                 bool last_srgn = false;
1629
1630                 rgn = rgn_table + rgn_idx;
1631                 rgn->rgn_idx = rgn_idx;
1632
1633                 spin_lock_init(&rgn->rgn_lock);
1634
1635                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1636                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1637                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1638
1639                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1640                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1641                                     hpb->srgns_per_rgn) + 1;
1642                         last_srgn = true;
1643                 }
1644
1645                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1646                 if (ret)
1647                         goto release_srgn_table;
1648                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1649
1650                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1651                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1652                         if (ret)
1653                                 goto release_srgn_table;
1654                 } else {
1655                         rgn->rgn_state = HPB_RGN_INACTIVE;
1656                 }
1657
1658                 rgn->rgn_flags = 0;
1659                 rgn->hpb = hpb;
1660         }
1661
1662         hpb->rgn_tbl = rgn_table;
1663
1664         return 0;
1665
1666 release_srgn_table:
1667         for (i = 0; i <= rgn_idx; i++)
1668                 kvfree(rgn_table[i].srgn_tbl);
1669
1670         kvfree(rgn_table);
1671         return ret;
1672 }
1673
1674 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1675                                          struct ufshpb_region *rgn)
1676 {
1677         int srgn_idx;
1678         struct ufshpb_subregion *srgn;
1679
1680         for_each_sub_region(rgn, srgn_idx, srgn)
1681                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1682                         srgn->srgn_state = HPB_SRGN_UNUSED;
1683                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1684                 }
1685 }
1686
1687 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1688 {
1689         int rgn_idx;
1690
1691         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1692                 struct ufshpb_region *rgn;
1693
1694                 rgn = hpb->rgn_tbl + rgn_idx;
1695                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1696                         rgn->rgn_state = HPB_RGN_INACTIVE;
1697
1698                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1699                 }
1700
1701                 kvfree(rgn->srgn_tbl);
1702         }
1703
1704         kvfree(hpb->rgn_tbl);
1705 }
1706
1707 /* SYSFS functions */
1708 #define ufshpb_sysfs_attr_show_func(__name)                             \
1709 static ssize_t __name##_show(struct device *dev,                        \
1710         struct device_attribute *attr, char *buf)                       \
1711 {                                                                       \
1712         struct scsi_device *sdev = to_scsi_device(dev);                 \
1713         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1714                                                                         \
1715         if (!hpb)                                                       \
1716                 return -ENODEV;                                         \
1717                                                                         \
1718         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1719 }                                                                       \
1720 \
1721 static DEVICE_ATTR_RO(__name)
1722
1723 ufshpb_sysfs_attr_show_func(hit_cnt);
1724 ufshpb_sysfs_attr_show_func(miss_cnt);
1725 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
1726 ufshpb_sysfs_attr_show_func(rb_active_cnt);
1727 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
1728 ufshpb_sysfs_attr_show_func(map_req_cnt);
1729 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1730
1731 static struct attribute *hpb_dev_stat_attrs[] = {
1732         &dev_attr_hit_cnt.attr,
1733         &dev_attr_miss_cnt.attr,
1734         &dev_attr_rb_noti_cnt.attr,
1735         &dev_attr_rb_active_cnt.attr,
1736         &dev_attr_rb_inactive_cnt.attr,
1737         &dev_attr_map_req_cnt.attr,
1738         &dev_attr_umap_req_cnt.attr,
1739         NULL,
1740 };
1741
1742 struct attribute_group ufs_sysfs_hpb_stat_group = {
1743         .name = "hpb_stats",
1744         .attrs = hpb_dev_stat_attrs,
1745 };
1746
1747 /* SYSFS functions */
1748 #define ufshpb_sysfs_param_show_func(__name)                            \
1749 static ssize_t __name##_show(struct device *dev,                        \
1750         struct device_attribute *attr, char *buf)                       \
1751 {                                                                       \
1752         struct scsi_device *sdev = to_scsi_device(dev);                 \
1753         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1754                                                                         \
1755         if (!hpb)                                                       \
1756                 return -ENODEV;                                         \
1757                                                                         \
1758         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1759 }
1760
1761 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1762 static ssize_t
1763 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1764                          const char *buf, size_t count)
1765 {
1766         struct scsi_device *sdev = to_scsi_device(dev);
1767         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1768         int val;
1769
1770         if (!hpb)
1771                 return -ENODEV;
1772
1773         if (kstrtouint(buf, 0, &val))
1774                 return -EINVAL;
1775
1776         if (val < 0)
1777                 return -EINVAL;
1778
1779         hpb->params.requeue_timeout_ms = val;
1780
1781         return count;
1782 }
1783 static DEVICE_ATTR_RW(requeue_timeout_ms);
1784
1785 ufshpb_sysfs_param_show_func(activation_thld);
1786 static ssize_t
1787 activation_thld_store(struct device *dev, struct device_attribute *attr,
1788                       const char *buf, size_t count)
1789 {
1790         struct scsi_device *sdev = to_scsi_device(dev);
1791         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1792         int val;
1793
1794         if (!hpb)
1795                 return -ENODEV;
1796
1797         if (!hpb->is_hcm)
1798                 return -EOPNOTSUPP;
1799
1800         if (kstrtouint(buf, 0, &val))
1801                 return -EINVAL;
1802
1803         if (val <= 0)
1804                 return -EINVAL;
1805
1806         hpb->params.activation_thld = val;
1807
1808         return count;
1809 }
1810 static DEVICE_ATTR_RW(activation_thld);
1811
1812 ufshpb_sysfs_param_show_func(normalization_factor);
1813 static ssize_t
1814 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1815                            const char *buf, size_t count)
1816 {
1817         struct scsi_device *sdev = to_scsi_device(dev);
1818         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1819         int val;
1820
1821         if (!hpb)
1822                 return -ENODEV;
1823
1824         if (!hpb->is_hcm)
1825                 return -EOPNOTSUPP;
1826
1827         if (kstrtouint(buf, 0, &val))
1828                 return -EINVAL;
1829
1830         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1831                 return -EINVAL;
1832
1833         hpb->params.normalization_factor = val;
1834
1835         return count;
1836 }
1837 static DEVICE_ATTR_RW(normalization_factor);
1838
1839 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1840 static ssize_t
1841 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1842                           const char *buf, size_t count)
1843 {
1844         struct scsi_device *sdev = to_scsi_device(dev);
1845         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1846         int val;
1847
1848         if (!hpb)
1849                 return -ENODEV;
1850
1851         if (!hpb->is_hcm)
1852                 return -EOPNOTSUPP;
1853
1854         if (kstrtouint(buf, 0, &val))
1855                 return -EINVAL;
1856
1857         if (val <= hpb->params.eviction_thld_exit)
1858                 return -EINVAL;
1859
1860         hpb->params.eviction_thld_enter = val;
1861
1862         return count;
1863 }
1864 static DEVICE_ATTR_RW(eviction_thld_enter);
1865
1866 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1867 static ssize_t
1868 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1869                          const char *buf, size_t count)
1870 {
1871         struct scsi_device *sdev = to_scsi_device(dev);
1872         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1873         int val;
1874
1875         if (!hpb)
1876                 return -ENODEV;
1877
1878         if (!hpb->is_hcm)
1879                 return -EOPNOTSUPP;
1880
1881         if (kstrtouint(buf, 0, &val))
1882                 return -EINVAL;
1883
1884         if (val <= hpb->params.activation_thld)
1885                 return -EINVAL;
1886
1887         hpb->params.eviction_thld_exit = val;
1888
1889         return count;
1890 }
1891 static DEVICE_ATTR_RW(eviction_thld_exit);
1892
1893 ufshpb_sysfs_param_show_func(read_timeout_ms);
1894 static ssize_t
1895 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1896                       const char *buf, size_t count)
1897 {
1898         struct scsi_device *sdev = to_scsi_device(dev);
1899         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1900         int val;
1901
1902         if (!hpb)
1903                 return -ENODEV;
1904
1905         if (!hpb->is_hcm)
1906                 return -EOPNOTSUPP;
1907
1908         if (kstrtouint(buf, 0, &val))
1909                 return -EINVAL;
1910
1911         /* read_timeout >> timeout_polling_interval */
1912         if (val < hpb->params.timeout_polling_interval_ms * 2)
1913                 return -EINVAL;
1914
1915         hpb->params.read_timeout_ms = val;
1916
1917         return count;
1918 }
1919 static DEVICE_ATTR_RW(read_timeout_ms);
1920
1921 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1922 static ssize_t
1923 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1924                             const char *buf, size_t count)
1925 {
1926         struct scsi_device *sdev = to_scsi_device(dev);
1927         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1928         int val;
1929
1930         if (!hpb)
1931                 return -ENODEV;
1932
1933         if (!hpb->is_hcm)
1934                 return -EOPNOTSUPP;
1935
1936         if (kstrtouint(buf, 0, &val))
1937                 return -EINVAL;
1938
1939         if (val <= 0)
1940                 return -EINVAL;
1941
1942         hpb->params.read_timeout_expiries = val;
1943
1944         return count;
1945 }
1946 static DEVICE_ATTR_RW(read_timeout_expiries);
1947
1948 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1949 static ssize_t
1950 timeout_polling_interval_ms_store(struct device *dev,
1951                                   struct device_attribute *attr,
1952                                   const char *buf, size_t count)
1953 {
1954         struct scsi_device *sdev = to_scsi_device(dev);
1955         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1956         int val;
1957
1958         if (!hpb)
1959                 return -ENODEV;
1960
1961         if (!hpb->is_hcm)
1962                 return -EOPNOTSUPP;
1963
1964         if (kstrtouint(buf, 0, &val))
1965                 return -EINVAL;
1966
1967         /* timeout_polling_interval << read_timeout */
1968         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
1969                 return -EINVAL;
1970
1971         hpb->params.timeout_polling_interval_ms = val;
1972
1973         return count;
1974 }
1975 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
1976
1977 ufshpb_sysfs_param_show_func(inflight_map_req);
1978 static ssize_t inflight_map_req_store(struct device *dev,
1979                                       struct device_attribute *attr,
1980                                       const char *buf, size_t count)
1981 {
1982         struct scsi_device *sdev = to_scsi_device(dev);
1983         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1984         int val;
1985
1986         if (!hpb)
1987                 return -ENODEV;
1988
1989         if (!hpb->is_hcm)
1990                 return -EOPNOTSUPP;
1991
1992         if (kstrtouint(buf, 0, &val))
1993                 return -EINVAL;
1994
1995         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
1996                 return -EINVAL;
1997
1998         hpb->params.inflight_map_req = val;
1999
2000         return count;
2001 }
2002 static DEVICE_ATTR_RW(inflight_map_req);
2003
2004 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2005 {
2006         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2007         hpb->params.normalization_factor = 1;
2008         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2009         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2010         hpb->params.read_timeout_ms = READ_TO_MS;
2011         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2012         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2013         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2014 }
2015
2016 static struct attribute *hpb_dev_param_attrs[] = {
2017         &dev_attr_requeue_timeout_ms.attr,
2018         &dev_attr_activation_thld.attr,
2019         &dev_attr_normalization_factor.attr,
2020         &dev_attr_eviction_thld_enter.attr,
2021         &dev_attr_eviction_thld_exit.attr,
2022         &dev_attr_read_timeout_ms.attr,
2023         &dev_attr_read_timeout_expiries.attr,
2024         &dev_attr_timeout_polling_interval_ms.attr,
2025         &dev_attr_inflight_map_req.attr,
2026         NULL,
2027 };
2028
2029 struct attribute_group ufs_sysfs_hpb_param_group = {
2030         .name = "hpb_params",
2031         .attrs = hpb_dev_param_attrs,
2032 };
2033
2034 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2035 {
2036         struct ufshpb_req *pre_req = NULL, *t;
2037         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2038         int i;
2039
2040         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2041
2042         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2043         hpb->throttle_pre_req = qd;
2044         hpb->num_inflight_pre_req = 0;
2045
2046         if (!hpb->pre_req)
2047                 goto release_mem;
2048
2049         for (i = 0; i < qd; i++) {
2050                 pre_req = hpb->pre_req + i;
2051                 INIT_LIST_HEAD(&pre_req->list_req);
2052                 pre_req->req = NULL;
2053
2054                 pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2055                 if (!pre_req->bio)
2056                         goto release_mem;
2057
2058                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2059                 if (!pre_req->wb.m_page) {
2060                         bio_put(pre_req->bio);
2061                         goto release_mem;
2062                 }
2063
2064                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2065         }
2066
2067         return 0;
2068 release_mem:
2069         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2070                 list_del_init(&pre_req->list_req);
2071                 bio_put(pre_req->bio);
2072                 __free_page(pre_req->wb.m_page);
2073         }
2074
2075         kfree(hpb->pre_req);
2076         return -ENOMEM;
2077 }
2078
2079 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2080 {
2081         struct ufshpb_req *pre_req = NULL;
2082         int i;
2083
2084         for (i = 0; i < hpb->throttle_pre_req; i++) {
2085                 pre_req = hpb->pre_req + i;
2086                 bio_put(hpb->pre_req[i].bio);
2087                 if (!pre_req->wb.m_page)
2088                         __free_page(hpb->pre_req[i].wb.m_page);
2089                 list_del_init(&pre_req->list_req);
2090         }
2091
2092         kfree(hpb->pre_req);
2093 }
2094
2095 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2096 {
2097         hpb->stats.hit_cnt = 0;
2098         hpb->stats.miss_cnt = 0;
2099         hpb->stats.rb_noti_cnt = 0;
2100         hpb->stats.rb_active_cnt = 0;
2101         hpb->stats.rb_inactive_cnt = 0;
2102         hpb->stats.map_req_cnt = 0;
2103         hpb->stats.umap_req_cnt = 0;
2104 }
2105
2106 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2107 {
2108         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2109         if (hpb->is_hcm)
2110                 ufshpb_hcm_param_init(hpb);
2111 }
2112
2113 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2114 {
2115         int ret;
2116
2117         spin_lock_init(&hpb->rgn_state_lock);
2118         spin_lock_init(&hpb->rsp_list_lock);
2119         spin_lock_init(&hpb->param_lock);
2120
2121         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2122         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2123         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2124         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2125
2126         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2127         if (hpb->is_hcm) {
2128                 INIT_WORK(&hpb->ufshpb_normalization_work,
2129                           ufshpb_normalization_work_handler);
2130                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2131                                   ufshpb_read_to_handler);
2132         }
2133
2134         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2135                           sizeof(struct ufshpb_req), 0, 0, NULL);
2136         if (!hpb->map_req_cache) {
2137                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2138                         hpb->lun);
2139                 return -ENOMEM;
2140         }
2141
2142         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2143                           sizeof(struct page *) * hpb->pages_per_srgn,
2144                           0, 0, NULL);
2145         if (!hpb->m_page_cache) {
2146                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2147                         hpb->lun);
2148                 ret = -ENOMEM;
2149                 goto release_req_cache;
2150         }
2151
2152         ret = ufshpb_pre_req_mempool_init(hpb);
2153         if (ret) {
2154                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2155                         hpb->lun);
2156                 goto release_m_page_cache;
2157         }
2158
2159         ret = ufshpb_alloc_region_tbl(hba, hpb);
2160         if (ret)
2161                 goto release_pre_req_mempool;
2162
2163         ufshpb_stat_init(hpb);
2164         ufshpb_param_init(hpb);
2165
2166         if (hpb->is_hcm) {
2167                 unsigned int poll;
2168
2169                 poll = hpb->params.timeout_polling_interval_ms;
2170                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2171                                       msecs_to_jiffies(poll));
2172         }
2173
2174         return 0;
2175
2176 release_pre_req_mempool:
2177         ufshpb_pre_req_mempool_destroy(hpb);
2178 release_m_page_cache:
2179         kmem_cache_destroy(hpb->m_page_cache);
2180 release_req_cache:
2181         kmem_cache_destroy(hpb->map_req_cache);
2182         return ret;
2183 }
2184
2185 static struct ufshpb_lu *
2186 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2187                     struct ufshpb_dev_info *hpb_dev_info,
2188                     struct ufshpb_lu_info *hpb_lu_info)
2189 {
2190         struct ufshpb_lu *hpb;
2191         int ret;
2192
2193         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2194         if (!hpb)
2195                 return NULL;
2196
2197         hpb->lun = sdev->lun;
2198         hpb->sdev_ufs_lu = sdev;
2199
2200         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2201
2202         ret = ufshpb_lu_hpb_init(hba, hpb);
2203         if (ret) {
2204                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2205                 goto release_hpb;
2206         }
2207
2208         sdev->hostdata = hpb;
2209         return hpb;
2210
2211 release_hpb:
2212         kfree(hpb);
2213         return NULL;
2214 }
2215
2216 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2217 {
2218         struct ufshpb_region *rgn, *next_rgn;
2219         struct ufshpb_subregion *srgn, *next_srgn;
2220         unsigned long flags;
2221
2222         /*
2223          * If the device reset occurred, the remaining HPB region information
2224          * may be stale. Therefore, by discarding the lists of HPB response
2225          * that remained after reset, we prevent unnecessary work.
2226          */
2227         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2228         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2229                                  list_inact_rgn)
2230                 list_del_init(&rgn->list_inact_rgn);
2231
2232         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2233                                  list_act_srgn)
2234                 list_del_init(&srgn->list_act_srgn);
2235         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2236 }
2237
2238 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2239 {
2240         if (hpb->is_hcm) {
2241                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2242                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2243         }
2244         cancel_work_sync(&hpb->map_work);
2245 }
2246
2247 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2248 {
2249         int err = 0;
2250         bool flag_res = true;
2251         int try;
2252
2253         /* wait for the device to complete HPB reset query */
2254         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2255                 dev_dbg(hba->dev,
2256                         "%s start flag reset polling %d times\n",
2257                         __func__, try);
2258
2259                 /* Poll fHpbReset flag to be cleared */
2260                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2261                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2262
2263                 if (err) {
2264                         dev_err(hba->dev,
2265                                 "%s reading fHpbReset flag failed with error %d\n",
2266                                 __func__, err);
2267                         return flag_res;
2268                 }
2269
2270                 if (!flag_res)
2271                         goto out;
2272
2273                 usleep_range(1000, 1100);
2274         }
2275         if (flag_res) {
2276                 dev_err(hba->dev,
2277                         "%s fHpbReset was not cleared by the device\n",
2278                         __func__);
2279         }
2280 out:
2281         return flag_res;
2282 }
2283
2284 void ufshpb_reset(struct ufs_hba *hba)
2285 {
2286         struct ufshpb_lu *hpb;
2287         struct scsi_device *sdev;
2288
2289         shost_for_each_device(sdev, hba->host) {
2290                 hpb = ufshpb_get_hpb_data(sdev);
2291                 if (!hpb)
2292                         continue;
2293
2294                 if (ufshpb_get_state(hpb) != HPB_RESET)
2295                         continue;
2296
2297                 ufshpb_set_state(hpb, HPB_PRESENT);
2298         }
2299 }
2300
2301 void ufshpb_reset_host(struct ufs_hba *hba)
2302 {
2303         struct ufshpb_lu *hpb;
2304         struct scsi_device *sdev;
2305
2306         shost_for_each_device(sdev, hba->host) {
2307                 hpb = ufshpb_get_hpb_data(sdev);
2308                 if (!hpb)
2309                         continue;
2310
2311                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2312                         continue;
2313                 ufshpb_set_state(hpb, HPB_RESET);
2314                 ufshpb_cancel_jobs(hpb);
2315                 ufshpb_discard_rsp_lists(hpb);
2316         }
2317 }
2318
2319 void ufshpb_suspend(struct ufs_hba *hba)
2320 {
2321         struct ufshpb_lu *hpb;
2322         struct scsi_device *sdev;
2323
2324         shost_for_each_device(sdev, hba->host) {
2325                 hpb = ufshpb_get_hpb_data(sdev);
2326                 if (!hpb)
2327                         continue;
2328
2329                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2330                         continue;
2331                 ufshpb_set_state(hpb, HPB_SUSPEND);
2332                 ufshpb_cancel_jobs(hpb);
2333         }
2334 }
2335
2336 void ufshpb_resume(struct ufs_hba *hba)
2337 {
2338         struct ufshpb_lu *hpb;
2339         struct scsi_device *sdev;
2340
2341         shost_for_each_device(sdev, hba->host) {
2342                 hpb = ufshpb_get_hpb_data(sdev);
2343                 if (!hpb)
2344                         continue;
2345
2346                 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2347                     (ufshpb_get_state(hpb) != HPB_SUSPEND))
2348                         continue;
2349                 ufshpb_set_state(hpb, HPB_PRESENT);
2350                 ufshpb_kick_map_work(hpb);
2351                 if (hpb->is_hcm) {
2352                         unsigned int poll =
2353                                 hpb->params.timeout_polling_interval_ms;
2354
2355                         schedule_delayed_work(&hpb->ufshpb_read_to_work,
2356                                 msecs_to_jiffies(poll));
2357                 }
2358         }
2359 }
2360
2361 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2362                               struct ufshpb_lu_info *hpb_lu_info)
2363 {
2364         u16 max_active_rgns;
2365         u8 lu_enable;
2366         int size;
2367         int ret;
2368         char desc_buf[QUERY_DESC_MAX_SIZE];
2369
2370         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2371
2372         ufshcd_rpm_get_sync(hba);
2373         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2374                                             QUERY_DESC_IDN_UNIT, lun, 0,
2375                                             desc_buf, &size);
2376         ufshcd_rpm_put_sync(hba);
2377
2378         if (ret) {
2379                 dev_err(hba->dev,
2380                         "%s: idn: %d lun: %d  query request failed",
2381                         __func__, QUERY_DESC_IDN_UNIT, lun);
2382                 return ret;
2383         }
2384
2385         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2386         if (lu_enable != LU_ENABLED_HPB_FUNC)
2387                 return -ENODEV;
2388
2389         max_active_rgns = get_unaligned_be16(
2390                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2391         if (!max_active_rgns) {
2392                 dev_err(hba->dev,
2393                         "lun %d wrong number of max active regions\n", lun);
2394                 return -ENODEV;
2395         }
2396
2397         hpb_lu_info->num_blocks = get_unaligned_be64(
2398                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2399         hpb_lu_info->pinned_start = get_unaligned_be16(
2400                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2401         hpb_lu_info->num_pinned = get_unaligned_be16(
2402                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2403         hpb_lu_info->max_active_rgns = max_active_rgns;
2404
2405         return 0;
2406 }
2407
2408 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2409 {
2410         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2411
2412         if (!hpb)
2413                 return;
2414
2415         ufshpb_set_state(hpb, HPB_FAILED);
2416
2417         sdev = hpb->sdev_ufs_lu;
2418         sdev->hostdata = NULL;
2419
2420         ufshpb_cancel_jobs(hpb);
2421
2422         ufshpb_pre_req_mempool_destroy(hpb);
2423         ufshpb_destroy_region_tbl(hpb);
2424
2425         kmem_cache_destroy(hpb->map_req_cache);
2426         kmem_cache_destroy(hpb->m_page_cache);
2427
2428         list_del_init(&hpb->list_hpb_lu);
2429
2430         kfree(hpb);
2431 }
2432
2433 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2434 {
2435         int pool_size;
2436         struct ufshpb_lu *hpb;
2437         struct scsi_device *sdev;
2438         bool init_success;
2439
2440         if (tot_active_srgn_pages == 0) {
2441                 ufshpb_remove(hba);
2442                 return;
2443         }
2444
2445         init_success = !ufshpb_check_hpb_reset_query(hba);
2446
2447         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2448         if (pool_size > tot_active_srgn_pages) {
2449                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2450                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2451         }
2452
2453         shost_for_each_device(sdev, hba->host) {
2454                 hpb = ufshpb_get_hpb_data(sdev);
2455                 if (!hpb)
2456                         continue;
2457
2458                 if (init_success) {
2459                         ufshpb_set_state(hpb, HPB_PRESENT);
2460                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2461                                 queue_work(ufshpb_wq, &hpb->map_work);
2462                         if (!hpb->is_hcm)
2463                                 ufshpb_issue_umap_all_req(hpb);
2464                 } else {
2465                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2466                         ufshpb_destroy_lu(hba, sdev);
2467                 }
2468         }
2469
2470         if (!init_success)
2471                 ufshpb_remove(hba);
2472 }
2473
2474 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2475 {
2476         struct ufshpb_lu *hpb;
2477         int ret;
2478         struct ufshpb_lu_info hpb_lu_info = { 0 };
2479         int lun = sdev->lun;
2480
2481         if (lun >= hba->dev_info.max_lu_supported)
2482                 goto out;
2483
2484         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2485         if (ret)
2486                 goto out;
2487
2488         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2489                                   &hpb_lu_info);
2490         if (!hpb)
2491                 goto out;
2492
2493         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2494                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2495
2496 out:
2497         /* All LUs are initialized */
2498         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2499                 ufshpb_hpb_lu_prepared(hba);
2500 }
2501
2502 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2503 {
2504         int ret;
2505         unsigned int pool_size;
2506
2507         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2508                                         sizeof(struct ufshpb_map_ctx),
2509                                         0, 0, NULL);
2510         if (!ufshpb_mctx_cache) {
2511                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2512                 return -ENOMEM;
2513         }
2514
2515         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2516         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2517                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2518
2519         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2520                                                     ufshpb_mctx_cache);
2521         if (!ufshpb_mctx_pool) {
2522                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2523                 ret = -ENOMEM;
2524                 goto release_mctx_cache;
2525         }
2526
2527         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2528         if (!ufshpb_page_pool) {
2529                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2530                 ret = -ENOMEM;
2531                 goto release_mctx_pool;
2532         }
2533
2534         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2535                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2536         if (!ufshpb_wq) {
2537                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2538                 ret = -ENOMEM;
2539                 goto release_page_pool;
2540         }
2541
2542         return 0;
2543
2544 release_page_pool:
2545         mempool_destroy(ufshpb_page_pool);
2546 release_mctx_pool:
2547         mempool_destroy(ufshpb_mctx_pool);
2548 release_mctx_cache:
2549         kmem_cache_destroy(ufshpb_mctx_cache);
2550         return ret;
2551 }
2552
2553 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2554 {
2555         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2556         int max_active_rgns = 0;
2557         int hpb_num_lu;
2558
2559         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2560         if (hpb_num_lu == 0) {
2561                 dev_err(hba->dev, "No HPB LU supported\n");
2562                 hpb_info->hpb_disabled = true;
2563                 return;
2564         }
2565
2566         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2567         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2568         max_active_rgns = get_unaligned_be16(geo_buf +
2569                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2570
2571         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2572             max_active_rgns == 0) {
2573                 dev_err(hba->dev, "No HPB supported device\n");
2574                 hpb_info->hpb_disabled = true;
2575                 return;
2576         }
2577 }
2578
2579 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2580 {
2581         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2582         int version, ret;
2583         int max_single_cmd;
2584
2585         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2586
2587         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2588         if ((version != HPB_SUPPORT_VERSION) &&
2589             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2590                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2591                         __func__, version);
2592                 hpb_dev_info->hpb_disabled = true;
2593                 return;
2594         }
2595
2596         if (version == HPB_SUPPORT_LEGACY_VERSION)
2597                 hpb_dev_info->is_legacy = true;
2598
2599         /*
2600          * Get the number of user logical unit to check whether all
2601          * scsi_device finish initialization
2602          */
2603         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2604
2605         if (hpb_dev_info->is_legacy)
2606                 return;
2607
2608         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2609                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2610
2611         if (ret)
2612                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2613         else
2614                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2615 }
2616
2617 void ufshpb_init(struct ufs_hba *hba)
2618 {
2619         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2620         int try;
2621         int ret;
2622
2623         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2624                 return;
2625
2626         if (ufshpb_init_mem_wq(hba)) {
2627                 hpb_dev_info->hpb_disabled = true;
2628                 return;
2629         }
2630
2631         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2632         tot_active_srgn_pages = 0;
2633         /* issue HPB reset query */
2634         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2635                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2636                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2637                 if (!ret)
2638                         break;
2639         }
2640 }
2641
2642 void ufshpb_remove(struct ufs_hba *hba)
2643 {
2644         mempool_destroy(ufshpb_page_pool);
2645         mempool_destroy(ufshpb_mctx_pool);
2646         kmem_cache_destroy(ufshpb_mctx_cache);
2647
2648         destroy_workqueue(ufshpb_wq);
2649 }
2650
2651 module_param(ufshpb_host_map_kbytes, uint, 0644);
2652 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2653         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");