1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-read.c - pblk's read path
22 * There is no guarantee that the value read from cache has not been updated and
23 * resides at another location in the cache. We guarantee though that if the
24 * value is read from the cache, it belongs to the mapped lba. In order to
25 * guarantee and order between writes and reads are ordered, a flush must be
28 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
29 sector_t lba, struct ppa_addr ppa,
30 int bio_iter, bool advanced_bio)
32 #ifdef CONFIG_NVM_PBLK_DEBUG
33 /* Callers must ensure that the ppa points to a cache address */
34 BUG_ON(pblk_ppa_empty(ppa));
35 BUG_ON(!pblk_addr_in_cache(ppa));
38 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
39 bio_iter, advanced_bio);
42 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
43 struct bio *bio, sector_t blba,
44 unsigned long *read_bitmap)
46 struct pblk_sec_meta *meta_list = rqd->meta_list;
47 struct ppa_addr ppas[NVM_MAX_VLBA];
48 int nr_secs = rqd->nr_ppas;
49 bool advanced_bio = false;
52 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
54 for (i = 0; i < nr_secs; i++) {
55 struct ppa_addr p = ppas[i];
56 sector_t lba = blba + i;
59 if (pblk_ppa_empty(p)) {
60 WARN_ON(test_and_set_bit(i, read_bitmap));
61 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
63 if (unlikely(!advanced_bio)) {
64 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
71 /* Try to read from write buffer. The address is later checked
72 * on the write buffer to prevent retrieving overwritten data.
74 if (pblk_addr_in_cache(p)) {
75 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
77 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
80 WARN_ON(test_and_set_bit(i, read_bitmap));
81 meta_list[i].lba = cpu_to_le64(lba);
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84 atomic_long_inc(&pblk->cache_reads);
87 /* Read from media non-cached sectors */
88 rqd->ppa_list[j++] = p;
93 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
96 if (pblk_io_aligned(pblk, nr_secs))
99 #ifdef CONFIG_NVM_PBLK_DEBUG
100 atomic_long_add(nr_secs, &pblk->inflight_reads);
105 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
108 struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
109 int nr_lbas = rqd->nr_ppas;
112 for (i = 0; i < nr_lbas; i++) {
113 u64 lba = le64_to_cpu(meta_lba_list[i].lba);
115 if (lba == ADDR_EMPTY)
118 if (lba != blba + i) {
119 #ifdef CONFIG_NVM_PBLK_DEBUG
120 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
122 print_ppa(pblk, &ppa_list[i], "seq", i);
124 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
132 * There can be holes in the lba list.
134 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
135 u64 *lba_list, int nr_lbas)
137 struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
140 for (i = 0, j = 0; i < nr_lbas; i++) {
141 u64 lba = lba_list[i];
144 if (lba == ADDR_EMPTY)
147 meta_lba = le64_to_cpu(meta_lba_list[j].lba);
149 if (lba != meta_lba) {
150 #ifdef CONFIG_NVM_PBLK_DEBUG
151 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
153 print_ppa(pblk, &ppa_list[j], "rnd", j);
155 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
163 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
166 static void pblk_end_user_read(struct bio *bio)
168 #ifdef CONFIG_NVM_PBLK_DEBUG
169 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
174 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
177 struct nvm_tgt_dev *dev = pblk->dev;
178 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
179 struct bio *int_bio = rqd->bio;
180 unsigned long start_time = r_ctx->start_time;
182 generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
185 pblk_log_read_err(pblk, rqd);
187 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
193 pblk_rq_to_line_put(pblk, rqd);
195 #ifdef CONFIG_NVM_PBLK_DEBUG
196 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
197 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
200 pblk_free_rqd(pblk, rqd, PBLK_READ);
201 atomic_dec(&pblk->inflight_io);
204 static void pblk_end_io_read(struct nvm_rq *rqd)
206 struct pblk *pblk = rqd->private;
207 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
208 struct bio *bio = (struct bio *)r_ctx->private;
210 pblk_end_user_read(bio);
211 __pblk_end_io_read(pblk, rqd, true);
214 static void pblk_end_partial_read(struct nvm_rq *rqd)
216 struct pblk *pblk = rqd->private;
217 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
218 struct pblk_pr_ctx *pr_ctx = r_ctx->private;
219 struct bio *new_bio = rqd->bio;
220 struct bio *bio = pr_ctx->orig_bio;
221 struct bio_vec src_bv, dst_bv;
222 struct pblk_sec_meta *meta_list = rqd->meta_list;
223 int bio_init_idx = pr_ctx->bio_init_idx;
224 unsigned long *read_bitmap = pr_ctx->bitmap;
225 int nr_secs = pr_ctx->orig_nr_secs;
226 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
230 if (unlikely(nr_holes == 1)) {
234 rqd->ppa_list = pr_ctx->ppa_ptr;
235 rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
236 rqd->ppa_list[0] = ppa;
239 for (i = 0; i < nr_secs; i++) {
240 pr_ctx->lba_list_media[i] = meta_list[i].lba;
241 meta_list[i].lba = pr_ctx->lba_list_mem[i];
244 /* Fill the holes in the original bio */
246 hole = find_first_zero_bit(read_bitmap, nr_secs);
248 struct pblk_line *line;
250 line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
251 kref_put(&line->ref, pblk_line_put);
253 meta_list[hole].lba = pr_ctx->lba_list_media[i];
255 src_bv = new_bio->bi_io_vec[i++];
256 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
258 src_p = kmap_atomic(src_bv.bv_page);
259 dst_p = kmap_atomic(dst_bv.bv_page);
261 memcpy(dst_p + dst_bv.bv_offset,
262 src_p + src_bv.bv_offset,
263 PBLK_EXPOSED_PAGE_SIZE);
265 kunmap_atomic(src_p);
266 kunmap_atomic(dst_p);
268 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
270 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
271 } while (hole < nr_secs);
276 /* restore original request */
278 rqd->nr_ppas = nr_secs;
281 __pblk_end_io_read(pblk, rqd, false);
284 static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
285 unsigned int bio_init_idx,
286 unsigned long *read_bitmap,
289 struct pblk_sec_meta *meta_list = rqd->meta_list;
290 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
291 struct pblk_pr_ctx *pr_ctx;
292 struct bio *new_bio, *bio = r_ctx->private;
293 int nr_secs = rqd->nr_ppas;
296 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
298 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
301 if (nr_holes != new_bio->bi_vcnt) {
302 WARN_ONCE(1, "pblk: malformed bio\n");
303 goto fail_free_pages;
306 pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
308 goto fail_free_pages;
310 for (i = 0; i < nr_secs; i++)
311 pr_ctx->lba_list_mem[i] = meta_list[i].lba;
313 new_bio->bi_iter.bi_sector = 0; /* internal bio */
314 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
317 rqd->nr_ppas = nr_holes;
319 pr_ctx->orig_bio = bio;
320 bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
321 pr_ctx->bio_init_idx = bio_init_idx;
322 pr_ctx->orig_nr_secs = nr_secs;
323 r_ctx->private = pr_ctx;
325 if (unlikely(nr_holes == 1)) {
326 pr_ctx->ppa_ptr = rqd->ppa_list;
327 pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
328 rqd->ppa_addr = rqd->ppa_list[0];
333 pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
340 static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
341 unsigned int bio_init_idx,
342 unsigned long *read_bitmap, int nr_secs)
347 nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
349 if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
353 rqd->end_io = pblk_end_partial_read;
355 ret = pblk_submit_io(pblk, rqd);
358 pblk_err(pblk, "partial read IO submission failed\n");
365 pblk_err(pblk, "failed to perform partial read\n");
367 /* Free allocated pages in new bio */
368 pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
369 __pblk_end_io_read(pblk, rqd, false);
373 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
374 sector_t lba, unsigned long *read_bitmap)
376 struct pblk_sec_meta *meta_list = rqd->meta_list;
379 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
381 #ifdef CONFIG_NVM_PBLK_DEBUG
382 atomic_long_inc(&pblk->inflight_reads);
386 if (pblk_ppa_empty(ppa)) {
387 WARN_ON(test_and_set_bit(0, read_bitmap));
388 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
392 /* Try to read from write buffer. The address is later checked on the
393 * write buffer to prevent retrieving overwritten data.
395 if (pblk_addr_in_cache(ppa)) {
396 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
397 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
401 WARN_ON(test_and_set_bit(0, read_bitmap));
402 meta_list[0].lba = cpu_to_le64(lba);
404 #ifdef CONFIG_NVM_PBLK_DEBUG
405 atomic_long_inc(&pblk->cache_reads);
412 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
414 struct nvm_tgt_dev *dev = pblk->dev;
415 struct request_queue *q = dev->q;
416 sector_t blba = pblk_get_lba(bio);
417 unsigned int nr_secs = pblk_get_secs(bio);
418 struct pblk_g_ctx *r_ctx;
420 unsigned int bio_init_idx;
421 DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
422 int ret = NVM_IO_ERR;
424 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
427 bitmap_zero(read_bitmap, nr_secs);
429 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
431 rqd->opcode = NVM_OP_PREAD;
432 rqd->nr_ppas = nr_secs;
433 rqd->bio = NULL; /* cloned bio if needed */
435 rqd->end_io = pblk_end_io_read;
437 r_ctx = nvm_rq_to_pdu(rqd);
438 r_ctx->start_time = jiffies;
440 r_ctx->private = bio; /* original bio */
442 /* Save the index for this bio's start. This is needed in case
443 * we need to fill a partial read.
445 bio_init_idx = pblk_get_bi_idx(bio);
447 if (pblk_alloc_rqd_meta(pblk, rqd))
451 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
453 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
455 if (bitmap_full(read_bitmap, nr_secs)) {
456 atomic_inc(&pblk->inflight_io);
457 __pblk_end_io_read(pblk, rqd, false);
461 /* All sectors are to be read from the device */
462 if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
463 struct bio *int_bio = NULL;
465 /* Clone read bio to deal with read errors internally */
466 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
468 pblk_err(pblk, "could not clone read bio\n");
474 if (pblk_submit_io(pblk, rqd)) {
475 pblk_err(pblk, "read IO submission failed\n");
483 /* The read bio request could be partially filled by the write buffer,
484 * but there are some holes that need to be read from the drive.
486 ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
494 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
496 pblk_free_rqd(pblk, rqd, PBLK_READ);
499 __pblk_end_io_read(pblk, rqd, false);
503 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
504 struct pblk_line *line, u64 *lba_list,
505 u64 *paddr_list_gc, unsigned int nr_secs)
507 struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
508 struct ppa_addr ppa_gc;
512 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
514 for (i = 0; i < nr_secs; i++) {
515 if (lba_list[i] == ADDR_EMPTY)
518 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
519 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
520 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
524 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
527 #ifdef CONFIG_NVM_PBLK_DEBUG
528 atomic_long_add(valid_secs, &pblk->inflight_reads);
534 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
535 struct pblk_line *line, sector_t lba,
538 struct ppa_addr ppa_l2p, ppa_gc;
541 if (lba == ADDR_EMPTY)
544 /* logic error: lba out-of-bounds */
545 if (lba >= pblk->rl.nr_secs) {
546 WARN(1, "pblk: read lba out of bounds\n");
550 spin_lock(&pblk->trans_lock);
551 ppa_l2p = pblk_trans_map_get(pblk, lba);
552 spin_unlock(&pblk->trans_lock);
554 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
555 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
558 rqd->ppa_addr = ppa_l2p;
561 #ifdef CONFIG_NVM_PBLK_DEBUG
562 atomic_long_inc(&pblk->inflight_reads);
569 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
571 struct nvm_tgt_dev *dev = pblk->dev;
572 struct nvm_geo *geo = &dev->geo;
578 memset(&rqd, 0, sizeof(struct nvm_rq));
580 ret = pblk_alloc_rqd_meta(pblk, &rqd);
584 if (gc_rq->nr_secs > 1) {
585 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
589 if (gc_rq->secs_to_gc == 1)
590 rqd.ppa_addr = rqd.ppa_list[0];
592 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
594 gc_rq->paddr_list[0]);
597 if (!(gc_rq->secs_to_gc))
600 data_len = (gc_rq->secs_to_gc) * geo->csecs;
601 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
602 PBLK_VMALLOC_META, GFP_KERNEL);
604 pblk_err(pblk, "could not allocate GC bio (%lu)\n",
610 bio->bi_iter.bi_sector = 0; /* internal bio */
611 bio_set_op_attrs(bio, REQ_OP_READ, 0);
613 rqd.opcode = NVM_OP_PREAD;
614 rqd.nr_ppas = gc_rq->secs_to_gc;
617 if (pblk_submit_io_sync(pblk, &rqd)) {
619 pblk_err(pblk, "GC read request failed\n");
623 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
625 atomic_dec(&pblk->inflight_io);
628 atomic_long_inc(&pblk->read_failed_gc);
629 #ifdef CONFIG_NVM_PBLK_DEBUG
630 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
634 #ifdef CONFIG_NVM_PBLK_DEBUG
635 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
636 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
637 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
641 pblk_free_rqd_meta(pblk, &rqd);
647 pblk_free_rqd_meta(pblk, &rqd);