2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-read.c - pblk's read path
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa,
29 int bio_iter, bool advanced_bio)
31 #ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa));
34 BUG_ON(!pblk_addr_in_cache(ppa));
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 bio_iter, advanced_bio);
41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42 sector_t blba, unsigned long *read_bitmap)
44 struct pblk_sec_meta *meta_list = rqd->meta_list;
45 struct bio *bio = rqd->bio;
46 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
47 int nr_secs = rqd->nr_ppas;
48 bool advanced_bio = false;
51 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
53 for (i = 0; i < nr_secs; i++) {
54 struct ppa_addr p = ppas[i];
55 sector_t lba = blba + i;
58 if (pblk_ppa_empty(p)) {
59 WARN_ON(test_and_set_bit(i, read_bitmap));
60 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
62 if (unlikely(!advanced_bio)) {
63 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
70 /* Try to read from write buffer. The address is later checked
71 * on the write buffer to prevent retrieving overwritten data.
73 if (pblk_addr_in_cache(p)) {
74 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
76 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
79 WARN_ON(test_and_set_bit(i, read_bitmap));
80 meta_list[i].lba = cpu_to_le64(lba);
82 #ifdef CONFIG_NVM_DEBUG
83 atomic_long_inc(&pblk->cache_reads);
86 /* Read from media non-cached sectors */
87 rqd->ppa_list[j++] = p;
92 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
95 if (pblk_io_aligned(pblk, nr_secs))
96 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
98 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
100 #ifdef CONFIG_NVM_DEBUG
101 atomic_long_add(nr_secs, &pblk->inflight_reads);
105 static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
109 err = pblk_submit_io(pblk, rqd);
116 static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
119 struct pblk_sec_meta *meta_list = rqd->meta_list;
120 int nr_lbas = rqd->nr_ppas;
123 for (i = 0; i < nr_lbas; i++) {
124 u64 lba = le64_to_cpu(meta_list[i].lba);
126 if (lba == ADDR_EMPTY)
129 WARN(lba != blba + i, "pblk: corrupted read LBA\n");
133 static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
135 struct ppa_addr *ppa_list;
138 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
140 for (i = 0; i < rqd->nr_ppas; i++) {
141 struct ppa_addr ppa = ppa_list[i];
142 struct pblk_line *line;
144 line = &pblk->lines[pblk_ppa_to_line(ppa)];
145 kref_put(&line->ref, pblk_line_put_wq);
149 static void pblk_end_user_read(struct bio *bio)
151 #ifdef CONFIG_NVM_DEBUG
152 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
158 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
161 struct nvm_tgt_dev *dev = pblk->dev;
162 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
163 struct bio *bio = rqd->bio;
164 unsigned long start_time = r_ctx->start_time;
166 generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
169 pblk_log_read_err(pblk, rqd);
170 #ifdef CONFIG_NVM_DEBUG
172 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
175 pblk_read_check(pblk, rqd, r_ctx->lba);
179 pblk_end_user_read((struct bio *)r_ctx->private);
182 pblk_read_put_rqd_kref(pblk, rqd);
184 #ifdef CONFIG_NVM_DEBUG
185 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
186 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
189 pblk_free_rqd(pblk, rqd, PBLK_READ);
190 atomic_dec(&pblk->inflight_io);
193 static void pblk_end_io_read(struct nvm_rq *rqd)
195 struct pblk *pblk = rqd->private;
197 __pblk_end_io_read(pblk, rqd, true);
200 static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
201 unsigned int bio_init_idx,
202 unsigned long *read_bitmap)
204 struct bio *new_bio, *bio = rqd->bio;
205 struct pblk_sec_meta *meta_list = rqd->meta_list;
206 struct bio_vec src_bv, dst_bv;
207 void *ppa_ptr = NULL;
209 dma_addr_t dma_ppa_list = 0;
210 __le64 *lba_list_mem, *lba_list_media;
211 int nr_secs = rqd->nr_ppas;
212 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
215 /* Re-use allocated memory for intermediate lbas */
216 lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
217 lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
219 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
221 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
224 if (nr_holes != new_bio->bi_vcnt) {
225 pr_err("pblk: malformed bio\n");
229 for (i = 0; i < nr_secs; i++)
230 lba_list_mem[i] = meta_list[i].lba;
232 new_bio->bi_iter.bi_sector = 0; /* internal bio */
233 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
236 rqd->nr_ppas = nr_holes;
237 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
239 if (unlikely(nr_holes == 1)) {
240 ppa_ptr = rqd->ppa_list;
241 dma_ppa_list = rqd->dma_ppa_list;
242 rqd->ppa_addr = rqd->ppa_list[0];
245 ret = pblk_submit_io_sync(pblk, rqd);
248 pr_err("pblk: sync read IO submission failed\n");
253 atomic_long_inc(&pblk->read_failed);
254 #ifdef CONFIG_NVM_DEBUG
255 pblk_print_failed_rqd(pblk, rqd, rqd->error);
259 if (unlikely(nr_holes == 1)) {
263 rqd->ppa_list = ppa_ptr;
264 rqd->dma_ppa_list = dma_ppa_list;
265 rqd->ppa_list[0] = ppa;
268 for (i = 0; i < nr_secs; i++) {
269 lba_list_media[i] = meta_list[i].lba;
270 meta_list[i].lba = lba_list_mem[i];
273 /* Fill the holes in the original bio */
275 hole = find_first_zero_bit(read_bitmap, nr_secs);
277 int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
278 struct pblk_line *line = &pblk->lines[line_id];
280 kref_put(&line->ref, pblk_line_put);
282 meta_list[hole].lba = lba_list_media[i];
284 src_bv = new_bio->bi_io_vec[i++];
285 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
287 src_p = kmap_atomic(src_bv.bv_page);
288 dst_p = kmap_atomic(dst_bv.bv_page);
290 memcpy(dst_p + dst_bv.bv_offset,
291 src_p + src_bv.bv_offset,
292 PBLK_EXPOSED_PAGE_SIZE);
294 kunmap_atomic(src_p);
295 kunmap_atomic(dst_p);
297 mempool_free(src_bv.bv_page, pblk->page_bio_pool);
299 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
300 } while (hole < nr_secs);
304 /* Complete the original bio and associated request */
307 rqd->nr_ppas = nr_secs;
309 __pblk_end_io_read(pblk, rqd, false);
313 pr_err("pblk: failed to perform partial read\n");
315 /* Free allocated pages in new bio */
316 pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
317 __pblk_end_io_read(pblk, rqd, false);
321 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
322 sector_t lba, unsigned long *read_bitmap)
324 struct pblk_sec_meta *meta_list = rqd->meta_list;
325 struct bio *bio = rqd->bio;
328 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
330 #ifdef CONFIG_NVM_DEBUG
331 atomic_long_inc(&pblk->inflight_reads);
335 if (pblk_ppa_empty(ppa)) {
336 WARN_ON(test_and_set_bit(0, read_bitmap));
337 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
341 /* Try to read from write buffer. The address is later checked on the
342 * write buffer to prevent retrieving overwritten data.
344 if (pblk_addr_in_cache(ppa)) {
345 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
346 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
350 WARN_ON(test_and_set_bit(0, read_bitmap));
351 meta_list[0].lba = cpu_to_le64(lba);
353 #ifdef CONFIG_NVM_DEBUG
354 atomic_long_inc(&pblk->cache_reads);
360 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
363 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
365 struct nvm_tgt_dev *dev = pblk->dev;
366 struct request_queue *q = dev->q;
367 sector_t blba = pblk_get_lba(bio);
368 unsigned int nr_secs = pblk_get_secs(bio);
369 struct pblk_g_ctx *r_ctx;
371 unsigned int bio_init_idx;
372 unsigned long read_bitmap; /* Max 64 ppas per request */
373 int ret = NVM_IO_ERR;
375 /* logic error: lba out-of-bounds. Ignore read request */
376 if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
377 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
378 (unsigned long long)blba, nr_secs);
382 generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
384 bitmap_zero(&read_bitmap, nr_secs);
386 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
388 rqd->opcode = NVM_OP_PREAD;
390 rqd->nr_ppas = nr_secs;
392 rqd->end_io = pblk_end_io_read;
394 r_ctx = nvm_rq_to_pdu(rqd);
395 r_ctx->start_time = jiffies;
398 /* Save the index for this bio's start. This is needed in case
399 * we need to fill a partial read.
401 bio_init_idx = pblk_get_bi_idx(bio);
403 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
404 &rqd->dma_meta_list);
405 if (!rqd->meta_list) {
406 pr_err("pblk: not able to allocate ppa list\n");
411 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
412 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
414 pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
416 pblk_read_rq(pblk, rqd, blba, &read_bitmap);
420 if (bitmap_full(&read_bitmap, nr_secs)) {
422 atomic_inc(&pblk->inflight_io);
423 __pblk_end_io_read(pblk, rqd, false);
427 /* All sectors are to be read from the device */
428 if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
429 struct bio *int_bio = NULL;
431 /* Clone read bio to deal with read errors internally */
432 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
434 pr_err("pblk: could not clone read bio\n");
439 r_ctx->private = bio;
441 ret = pblk_submit_read_io(pblk, rqd);
443 pr_err("pblk: read IO submission failed\n");
452 /* The read bio request could be partially filled by the write buffer,
453 * but there are some holes that need to be read from the drive.
455 return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
458 pblk_free_rqd(pblk, rqd, PBLK_READ);
461 __pblk_end_io_read(pblk, rqd, false);
465 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
466 struct pblk_line *line, u64 *lba_list,
467 u64 *paddr_list_gc, unsigned int nr_secs)
469 struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
470 struct ppa_addr ppa_gc;
474 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
476 for (i = 0; i < nr_secs; i++) {
477 if (lba_list[i] == ADDR_EMPTY)
480 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
481 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
482 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
486 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
489 #ifdef CONFIG_NVM_DEBUG
490 atomic_long_add(valid_secs, &pblk->inflight_reads);
496 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
497 struct pblk_line *line, sector_t lba,
500 struct ppa_addr ppa_l2p, ppa_gc;
503 if (lba == ADDR_EMPTY)
506 /* logic error: lba out-of-bounds */
507 if (lba >= pblk->rl.nr_secs) {
508 WARN(1, "pblk: read lba out of bounds\n");
512 spin_lock(&pblk->trans_lock);
513 ppa_l2p = pblk_trans_map_get(pblk, lba);
514 spin_unlock(&pblk->trans_lock);
516 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
517 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
520 rqd->ppa_addr = ppa_l2p;
523 #ifdef CONFIG_NVM_DEBUG
524 atomic_long_inc(&pblk->inflight_reads);
531 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
533 struct nvm_tgt_dev *dev = pblk->dev;
534 struct nvm_geo *geo = &dev->geo;
540 memset(&rqd, 0, sizeof(struct nvm_rq));
542 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
547 if (gc_rq->nr_secs > 1) {
548 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
549 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
551 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
555 if (gc_rq->secs_to_gc == 1)
556 rqd.ppa_addr = rqd.ppa_list[0];
558 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
560 gc_rq->paddr_list[0]);
563 if (!(gc_rq->secs_to_gc))
566 data_len = (gc_rq->secs_to_gc) * geo->csecs;
567 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
568 PBLK_VMALLOC_META, GFP_KERNEL);
570 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
574 bio->bi_iter.bi_sector = 0; /* internal bio */
575 bio_set_op_attrs(bio, REQ_OP_READ, 0);
577 rqd.opcode = NVM_OP_PREAD;
578 rqd.nr_ppas = gc_rq->secs_to_gc;
579 rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
582 if (pblk_submit_io_sync(pblk, &rqd)) {
584 pr_err("pblk: GC read request failed\n");
588 atomic_dec(&pblk->inflight_io);
591 atomic_long_inc(&pblk->read_failed_gc);
592 #ifdef CONFIG_NVM_DEBUG
593 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
597 #ifdef CONFIG_NVM_DEBUG
598 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
599 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
600 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
604 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
610 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);