1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-read.c - pblk's read path
22 * There is no guarantee that the value read from cache has not been updated and
23 * resides at another location in the cache. We guarantee though that if the
24 * value is read from the cache, it belongs to the mapped lba. In order to
25 * guarantee and order between writes and reads are ordered, a flush must be
28 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
29 sector_t lba, struct ppa_addr ppa,
30 int bio_iter, bool advanced_bio)
32 #ifdef CONFIG_NVM_PBLK_DEBUG
33 /* Callers must ensure that the ppa points to a cache address */
34 BUG_ON(pblk_ppa_empty(ppa));
35 BUG_ON(!pblk_addr_in_cache(ppa));
38 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
39 bio_iter, advanced_bio);
42 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
43 struct bio *bio, sector_t blba,
44 unsigned long *read_bitmap)
46 void *meta_list = rqd->meta_list;
47 struct ppa_addr ppas[NVM_MAX_VLBA];
48 int nr_secs = rqd->nr_ppas;
49 bool advanced_bio = false;
52 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
54 for (i = 0; i < nr_secs; i++) {
55 struct ppa_addr p = ppas[i];
56 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
57 sector_t lba = blba + i;
60 if (pblk_ppa_empty(p)) {
61 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
63 WARN_ON(test_and_set_bit(i, read_bitmap));
64 meta->lba = addr_empty;
66 if (unlikely(!advanced_bio)) {
67 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
74 /* Try to read from write buffer. The address is later checked
75 * on the write buffer to prevent retrieving overwritten data.
77 if (pblk_addr_in_cache(p)) {
78 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
80 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
83 WARN_ON(test_and_set_bit(i, read_bitmap));
84 meta->lba = cpu_to_le64(lba);
86 #ifdef CONFIG_NVM_PBLK_DEBUG
87 atomic_long_inc(&pblk->cache_reads);
90 /* Read from media non-cached sectors */
91 rqd->ppa_list[j++] = p;
96 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
99 if (pblk_io_aligned(pblk, nr_secs))
102 #ifdef CONFIG_NVM_PBLK_DEBUG
103 atomic_long_add(nr_secs, &pblk->inflight_reads);
108 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
111 void *meta_list = rqd->meta_list;
112 int nr_lbas = rqd->nr_ppas;
115 if (!pblk_is_oob_meta_supported(pblk))
118 for (i = 0; i < nr_lbas; i++) {
119 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
120 u64 lba = le64_to_cpu(meta->lba);
122 if (lba == ADDR_EMPTY)
125 if (lba != blba + i) {
126 #ifdef CONFIG_NVM_PBLK_DEBUG
127 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
129 print_ppa(pblk, &ppa_list[i], "seq", i);
131 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
139 * There can be holes in the lba list.
141 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
142 u64 *lba_list, int nr_lbas)
144 void *meta_lba_list = rqd->meta_list;
147 if (!pblk_is_oob_meta_supported(pblk))
150 for (i = 0, j = 0; i < nr_lbas; i++) {
151 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
153 u64 lba = lba_list[i];
156 if (lba == ADDR_EMPTY)
159 meta_lba = le64_to_cpu(meta->lba);
161 if (lba != meta_lba) {
162 #ifdef CONFIG_NVM_PBLK_DEBUG
163 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
165 print_ppa(pblk, &ppa_list[j], "rnd", j);
167 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
175 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
178 static void pblk_end_user_read(struct bio *bio)
180 #ifdef CONFIG_NVM_PBLK_DEBUG
181 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
186 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
189 struct nvm_tgt_dev *dev = pblk->dev;
190 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
191 struct bio *int_bio = rqd->bio;
192 unsigned long start_time = r_ctx->start_time;
194 generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
197 pblk_log_read_err(pblk, rqd);
199 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
205 pblk_rq_to_line_put(pblk, rqd);
207 #ifdef CONFIG_NVM_PBLK_DEBUG
208 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
209 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
212 pblk_free_rqd(pblk, rqd, PBLK_READ);
213 atomic_dec(&pblk->inflight_io);
216 static void pblk_end_io_read(struct nvm_rq *rqd)
218 struct pblk *pblk = rqd->private;
219 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
220 struct bio *bio = (struct bio *)r_ctx->private;
222 pblk_end_user_read(bio);
223 __pblk_end_io_read(pblk, rqd, true);
226 static void pblk_end_partial_read(struct nvm_rq *rqd)
228 struct pblk *pblk = rqd->private;
229 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
230 struct pblk_pr_ctx *pr_ctx = r_ctx->private;
231 struct pblk_sec_meta *meta;
232 struct bio *new_bio = rqd->bio;
233 struct bio *bio = pr_ctx->orig_bio;
234 struct bio_vec src_bv, dst_bv;
235 void *meta_list = rqd->meta_list;
236 int bio_init_idx = pr_ctx->bio_init_idx;
237 unsigned long *read_bitmap = pr_ctx->bitmap;
238 int nr_secs = pr_ctx->orig_nr_secs;
239 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
243 if (unlikely(nr_holes == 1)) {
247 rqd->ppa_list = pr_ctx->ppa_ptr;
248 rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
249 rqd->ppa_list[0] = ppa;
252 for (i = 0; i < nr_secs; i++) {
253 meta = pblk_get_meta(pblk, meta_list, i);
254 pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba);
255 meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]);
258 /* Fill the holes in the original bio */
260 hole = find_first_zero_bit(read_bitmap, nr_secs);
262 struct pblk_line *line;
264 line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
265 kref_put(&line->ref, pblk_line_put);
267 meta = pblk_get_meta(pblk, meta_list, hole);
268 meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
270 src_bv = new_bio->bi_io_vec[i++];
271 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
273 src_p = kmap_atomic(src_bv.bv_page);
274 dst_p = kmap_atomic(dst_bv.bv_page);
276 memcpy(dst_p + dst_bv.bv_offset,
277 src_p + src_bv.bv_offset,
278 PBLK_EXPOSED_PAGE_SIZE);
280 kunmap_atomic(src_p);
281 kunmap_atomic(dst_p);
283 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
285 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
286 } while (hole < nr_secs);
291 /* restore original request */
293 rqd->nr_ppas = nr_secs;
296 __pblk_end_io_read(pblk, rqd, false);
299 static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
300 unsigned int bio_init_idx,
301 unsigned long *read_bitmap,
304 void *meta_list = rqd->meta_list;
305 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
306 struct pblk_pr_ctx *pr_ctx;
307 struct bio *new_bio, *bio = r_ctx->private;
308 int nr_secs = rqd->nr_ppas;
311 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
313 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
316 if (nr_holes != new_bio->bi_vcnt) {
317 WARN_ONCE(1, "pblk: malformed bio\n");
318 goto fail_free_pages;
321 pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
323 goto fail_free_pages;
325 for (i = 0; i < nr_secs; i++) {
326 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
328 pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba);
331 new_bio->bi_iter.bi_sector = 0; /* internal bio */
332 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
335 rqd->nr_ppas = nr_holes;
337 pr_ctx->orig_bio = bio;
338 bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
339 pr_ctx->bio_init_idx = bio_init_idx;
340 pr_ctx->orig_nr_secs = nr_secs;
341 r_ctx->private = pr_ctx;
343 if (unlikely(nr_holes == 1)) {
344 pr_ctx->ppa_ptr = rqd->ppa_list;
345 pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
346 rqd->ppa_addr = rqd->ppa_list[0];
351 pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
358 static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
359 unsigned int bio_init_idx,
360 unsigned long *read_bitmap, int nr_secs)
365 nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
367 if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
371 rqd->end_io = pblk_end_partial_read;
373 ret = pblk_submit_io(pblk, rqd);
376 pblk_err(pblk, "partial read IO submission failed\n");
383 pblk_err(pblk, "failed to perform partial read\n");
385 /* Free allocated pages in new bio */
386 pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
387 __pblk_end_io_read(pblk, rqd, false);
391 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
392 sector_t lba, unsigned long *read_bitmap)
394 struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
397 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
399 #ifdef CONFIG_NVM_PBLK_DEBUG
400 atomic_long_inc(&pblk->inflight_reads);
404 if (pblk_ppa_empty(ppa)) {
405 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
407 WARN_ON(test_and_set_bit(0, read_bitmap));
408 meta->lba = addr_empty;
412 /* Try to read from write buffer. The address is later checked on the
413 * write buffer to prevent retrieving overwritten data.
415 if (pblk_addr_in_cache(ppa)) {
416 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
417 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
421 WARN_ON(test_and_set_bit(0, read_bitmap));
422 meta->lba = cpu_to_le64(lba);
424 #ifdef CONFIG_NVM_PBLK_DEBUG
425 atomic_long_inc(&pblk->cache_reads);
432 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
434 struct nvm_tgt_dev *dev = pblk->dev;
435 struct request_queue *q = dev->q;
436 sector_t blba = pblk_get_lba(bio);
437 unsigned int nr_secs = pblk_get_secs(bio);
438 struct pblk_g_ctx *r_ctx;
440 unsigned int bio_init_idx;
441 DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
442 int ret = NVM_IO_ERR;
444 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
447 bitmap_zero(read_bitmap, nr_secs);
449 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
451 rqd->opcode = NVM_OP_PREAD;
452 rqd->nr_ppas = nr_secs;
453 rqd->bio = NULL; /* cloned bio if needed */
455 rqd->end_io = pblk_end_io_read;
457 r_ctx = nvm_rq_to_pdu(rqd);
458 r_ctx->start_time = jiffies;
460 r_ctx->private = bio; /* original bio */
462 /* Save the index for this bio's start. This is needed in case
463 * we need to fill a partial read.
465 bio_init_idx = pblk_get_bi_idx(bio);
467 if (pblk_alloc_rqd_meta(pblk, rqd))
471 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
473 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
475 if (bitmap_full(read_bitmap, nr_secs)) {
476 atomic_inc(&pblk->inflight_io);
477 __pblk_end_io_read(pblk, rqd, false);
481 /* All sectors are to be read from the device */
482 if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
483 struct bio *int_bio = NULL;
485 /* Clone read bio to deal with read errors internally */
486 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
488 pblk_err(pblk, "could not clone read bio\n");
494 if (pblk_submit_io(pblk, rqd)) {
495 pblk_err(pblk, "read IO submission failed\n");
503 /* The read bio request could be partially filled by the write buffer,
504 * but there are some holes that need to be read from the drive.
506 ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
514 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
516 pblk_free_rqd(pblk, rqd, PBLK_READ);
519 __pblk_end_io_read(pblk, rqd, false);
523 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
524 struct pblk_line *line, u64 *lba_list,
525 u64 *paddr_list_gc, unsigned int nr_secs)
527 struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
528 struct ppa_addr ppa_gc;
532 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
534 for (i = 0; i < nr_secs; i++) {
535 if (lba_list[i] == ADDR_EMPTY)
538 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
539 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
540 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
544 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
547 #ifdef CONFIG_NVM_PBLK_DEBUG
548 atomic_long_add(valid_secs, &pblk->inflight_reads);
554 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
555 struct pblk_line *line, sector_t lba,
558 struct ppa_addr ppa_l2p, ppa_gc;
561 if (lba == ADDR_EMPTY)
564 /* logic error: lba out-of-bounds */
565 if (lba >= pblk->rl.nr_secs) {
566 WARN(1, "pblk: read lba out of bounds\n");
570 spin_lock(&pblk->trans_lock);
571 ppa_l2p = pblk_trans_map_get(pblk, lba);
572 spin_unlock(&pblk->trans_lock);
574 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
575 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
578 rqd->ppa_addr = ppa_l2p;
581 #ifdef CONFIG_NVM_PBLK_DEBUG
582 atomic_long_inc(&pblk->inflight_reads);
589 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
591 struct nvm_tgt_dev *dev = pblk->dev;
592 struct nvm_geo *geo = &dev->geo;
598 memset(&rqd, 0, sizeof(struct nvm_rq));
600 ret = pblk_alloc_rqd_meta(pblk, &rqd);
604 if (gc_rq->nr_secs > 1) {
605 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
609 if (gc_rq->secs_to_gc == 1)
610 rqd.ppa_addr = rqd.ppa_list[0];
612 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
614 gc_rq->paddr_list[0]);
617 if (!(gc_rq->secs_to_gc))
620 data_len = (gc_rq->secs_to_gc) * geo->csecs;
621 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
622 PBLK_VMALLOC_META, GFP_KERNEL);
624 pblk_err(pblk, "could not allocate GC bio (%lu)\n",
630 bio->bi_iter.bi_sector = 0; /* internal bio */
631 bio_set_op_attrs(bio, REQ_OP_READ, 0);
633 rqd.opcode = NVM_OP_PREAD;
634 rqd.nr_ppas = gc_rq->secs_to_gc;
637 if (pblk_submit_io_sync(pblk, &rqd)) {
639 pblk_err(pblk, "GC read request failed\n");
643 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
645 atomic_dec(&pblk->inflight_io);
648 atomic_long_inc(&pblk->read_failed_gc);
649 #ifdef CONFIG_NVM_PBLK_DEBUG
650 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
654 #ifdef CONFIG_NVM_PBLK_DEBUG
655 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
656 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
657 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
661 pblk_free_rqd_meta(pblk, &rqd);
667 pblk_free_rqd_meta(pblk, &rqd);