Merge tag 'vfio-v4.18-rc1' of git://github.com/awilliam/linux-vfio
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-read.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-read.c - pblk's read path
16  */
17
18 #include "pblk.h"
19
20 /*
21  * There is no guarantee that the value read from cache has not been updated and
22  * resides at another location in the cache. We guarantee though that if the
23  * value is read from the cache, it belongs to the mapped lba. In order to
24  * guarantee and order between writes and reads are ordered, a flush must be
25  * issued.
26  */
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28                                 sector_t lba, struct ppa_addr ppa,
29                                 int bio_iter, bool advanced_bio)
30 {
31 #ifdef CONFIG_NVM_DEBUG
32         /* Callers must ensure that the ppa points to a cache address */
33         BUG_ON(pblk_ppa_empty(ppa));
34         BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36
37         return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38                                                 bio_iter, advanced_bio);
39 }
40
41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42                                  struct bio *bio, sector_t blba,
43                                  unsigned long *read_bitmap)
44 {
45         struct pblk_sec_meta *meta_list = rqd->meta_list;
46         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
47         int nr_secs = rqd->nr_ppas;
48         bool advanced_bio = false;
49         int i, j = 0;
50
51         pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52
53         for (i = 0; i < nr_secs; i++) {
54                 struct ppa_addr p = ppas[i];
55                 sector_t lba = blba + i;
56
57 retry:
58                 if (pblk_ppa_empty(p)) {
59                         WARN_ON(test_and_set_bit(i, read_bitmap));
60                         meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
61
62                         if (unlikely(!advanced_bio)) {
63                                 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64                                 advanced_bio = true;
65                         }
66
67                         goto next;
68                 }
69
70                 /* Try to read from write buffer. The address is later checked
71                  * on the write buffer to prevent retrieving overwritten data.
72                  */
73                 if (pblk_addr_in_cache(p)) {
74                         if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75                                                                 advanced_bio)) {
76                                 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77                                 goto retry;
78                         }
79                         WARN_ON(test_and_set_bit(i, read_bitmap));
80                         meta_list[i].lba = cpu_to_le64(lba);
81                         advanced_bio = true;
82 #ifdef CONFIG_NVM_DEBUG
83                         atomic_long_inc(&pblk->cache_reads);
84 #endif
85                 } else {
86                         /* Read from media non-cached sectors */
87                         rqd->ppa_list[j++] = p;
88                 }
89
90 next:
91                 if (advanced_bio)
92                         bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
93         }
94
95         if (pblk_io_aligned(pblk, nr_secs))
96                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97         else
98                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99
100 #ifdef CONFIG_NVM_DEBUG
101         atomic_long_add(nr_secs, &pblk->inflight_reads);
102 #endif
103 }
104
105
106 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
107                                 sector_t blba)
108 {
109         struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
110         int nr_lbas = rqd->nr_ppas;
111         int i;
112
113         for (i = 0; i < nr_lbas; i++) {
114                 u64 lba = le64_to_cpu(meta_lba_list[i].lba);
115
116                 if (lba == ADDR_EMPTY)
117                         continue;
118
119                 if (lba != blba + i) {
120 #ifdef CONFIG_NVM_DEBUG
121                         struct ppa_addr *p;
122
123                         p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
124                         print_ppa(&pblk->dev->geo, p, "seq", i);
125 #endif
126                         pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
127                                                         lba, (u64)blba + i);
128                         WARN_ON(1);
129                 }
130         }
131 }
132
133 /*
134  * There can be holes in the lba list.
135  */
136 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
137                                  u64 *lba_list, int nr_lbas)
138 {
139         struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
140         int i, j;
141
142         for (i = 0, j = 0; i < nr_lbas; i++) {
143                 u64 lba = lba_list[i];
144                 u64 meta_lba;
145
146                 if (lba == ADDR_EMPTY)
147                         continue;
148
149                 meta_lba = le64_to_cpu(meta_lba_list[j].lba);
150
151                 if (lba != meta_lba) {
152 #ifdef CONFIG_NVM_DEBUG
153                         struct ppa_addr *p;
154                         int nr_ppas = rqd->nr_ppas;
155
156                         p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
157                         print_ppa(&pblk->dev->geo, p, "seq", j);
158 #endif
159                         pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
160                                                                 lba, meta_lba);
161                         WARN_ON(1);
162                 }
163
164                 j++;
165         }
166
167         WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
168 }
169
170 static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
171 {
172         struct ppa_addr *ppa_list;
173         int i;
174
175         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
176
177         for (i = 0; i < rqd->nr_ppas; i++) {
178                 struct ppa_addr ppa = ppa_list[i];
179                 struct pblk_line *line;
180
181                 line = &pblk->lines[pblk_ppa_to_line(ppa)];
182                 kref_put(&line->ref, pblk_line_put_wq);
183         }
184 }
185
186 static void pblk_end_user_read(struct bio *bio)
187 {
188 #ifdef CONFIG_NVM_DEBUG
189         WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
190 #endif
191         bio_endio(bio);
192 }
193
194 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
195                                bool put_line)
196 {
197         struct nvm_tgt_dev *dev = pblk->dev;
198         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
199         struct bio *int_bio = rqd->bio;
200         unsigned long start_time = r_ctx->start_time;
201
202         generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
203
204         if (rqd->error)
205                 pblk_log_read_err(pblk, rqd);
206
207         pblk_read_check_seq(pblk, rqd, r_ctx->lba);
208
209         if (int_bio)
210                 bio_put(int_bio);
211
212         if (put_line)
213                 pblk_read_put_rqd_kref(pblk, rqd);
214
215 #ifdef CONFIG_NVM_DEBUG
216         atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
217         atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
218 #endif
219
220         pblk_free_rqd(pblk, rqd, PBLK_READ);
221         atomic_dec(&pblk->inflight_io);
222 }
223
224 static void pblk_end_io_read(struct nvm_rq *rqd)
225 {
226         struct pblk *pblk = rqd->private;
227         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
228         struct bio *bio = (struct bio *)r_ctx->private;
229
230         pblk_end_user_read(bio);
231         __pblk_end_io_read(pblk, rqd, true);
232 }
233
234 static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
235                              struct bio *orig_bio, unsigned int bio_init_idx,
236                              unsigned long *read_bitmap)
237 {
238         struct pblk_sec_meta *meta_list = rqd->meta_list;
239         struct bio *new_bio;
240         struct bio_vec src_bv, dst_bv;
241         void *ppa_ptr = NULL;
242         void *src_p, *dst_p;
243         dma_addr_t dma_ppa_list = 0;
244         __le64 *lba_list_mem, *lba_list_media;
245         int nr_secs = rqd->nr_ppas;
246         int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
247         int i, ret, hole;
248
249         /* Re-use allocated memory for intermediate lbas */
250         lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
251         lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
252
253         new_bio = bio_alloc(GFP_KERNEL, nr_holes);
254
255         if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
256                 goto fail_add_pages;
257
258         if (nr_holes != new_bio->bi_vcnt) {
259                 pr_err("pblk: malformed bio\n");
260                 goto fail;
261         }
262
263         for (i = 0; i < nr_secs; i++)
264                 lba_list_mem[i] = meta_list[i].lba;
265
266         new_bio->bi_iter.bi_sector = 0; /* internal bio */
267         bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
268
269         rqd->bio = new_bio;
270         rqd->nr_ppas = nr_holes;
271         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
272
273         if (unlikely(nr_holes == 1)) {
274                 ppa_ptr = rqd->ppa_list;
275                 dma_ppa_list = rqd->dma_ppa_list;
276                 rqd->ppa_addr = rqd->ppa_list[0];
277         }
278
279         ret = pblk_submit_io_sync(pblk, rqd);
280         if (ret) {
281                 bio_put(rqd->bio);
282                 pr_err("pblk: sync read IO submission failed\n");
283                 goto fail;
284         }
285
286         if (rqd->error) {
287                 atomic_long_inc(&pblk->read_failed);
288 #ifdef CONFIG_NVM_DEBUG
289                 pblk_print_failed_rqd(pblk, rqd, rqd->error);
290 #endif
291         }
292
293         if (unlikely(nr_holes == 1)) {
294                 struct ppa_addr ppa;
295
296                 ppa = rqd->ppa_addr;
297                 rqd->ppa_list = ppa_ptr;
298                 rqd->dma_ppa_list = dma_ppa_list;
299                 rqd->ppa_list[0] = ppa;
300         }
301
302         for (i = 0; i < nr_secs; i++) {
303                 lba_list_media[i] = meta_list[i].lba;
304                 meta_list[i].lba = lba_list_mem[i];
305         }
306
307         /* Fill the holes in the original bio */
308         i = 0;
309         hole = find_first_zero_bit(read_bitmap, nr_secs);
310         do {
311                 int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
312                 struct pblk_line *line = &pblk->lines[line_id];
313
314                 kref_put(&line->ref, pblk_line_put);
315
316                 meta_list[hole].lba = lba_list_media[i];
317
318                 src_bv = new_bio->bi_io_vec[i++];
319                 dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
320
321                 src_p = kmap_atomic(src_bv.bv_page);
322                 dst_p = kmap_atomic(dst_bv.bv_page);
323
324                 memcpy(dst_p + dst_bv.bv_offset,
325                         src_p + src_bv.bv_offset,
326                         PBLK_EXPOSED_PAGE_SIZE);
327
328                 kunmap_atomic(src_p);
329                 kunmap_atomic(dst_p);
330
331                 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
332
333                 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
334         } while (hole < nr_secs);
335
336         bio_put(new_bio);
337
338         /* restore original request */
339         rqd->bio = NULL;
340         rqd->nr_ppas = nr_secs;
341
342         __pblk_end_io_read(pblk, rqd, false);
343         return NVM_IO_DONE;
344
345 fail:
346         /* Free allocated pages in new bio */
347         pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
348 fail_add_pages:
349         pr_err("pblk: failed to perform partial read\n");
350         __pblk_end_io_read(pblk, rqd, false);
351         return NVM_IO_ERR;
352 }
353
354 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
355                          sector_t lba, unsigned long *read_bitmap)
356 {
357         struct pblk_sec_meta *meta_list = rqd->meta_list;
358         struct ppa_addr ppa;
359
360         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
361
362 #ifdef CONFIG_NVM_DEBUG
363         atomic_long_inc(&pblk->inflight_reads);
364 #endif
365
366 retry:
367         if (pblk_ppa_empty(ppa)) {
368                 WARN_ON(test_and_set_bit(0, read_bitmap));
369                 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
370                 return;
371         }
372
373         /* Try to read from write buffer. The address is later checked on the
374          * write buffer to prevent retrieving overwritten data.
375          */
376         if (pblk_addr_in_cache(ppa)) {
377                 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
378                         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
379                         goto retry;
380                 }
381
382                 WARN_ON(test_and_set_bit(0, read_bitmap));
383                 meta_list[0].lba = cpu_to_le64(lba);
384
385 #ifdef CONFIG_NVM_DEBUG
386                 atomic_long_inc(&pblk->cache_reads);
387 #endif
388         } else {
389                 rqd->ppa_addr = ppa;
390         }
391
392         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
393 }
394
395 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
396 {
397         struct nvm_tgt_dev *dev = pblk->dev;
398         struct request_queue *q = dev->q;
399         sector_t blba = pblk_get_lba(bio);
400         unsigned int nr_secs = pblk_get_secs(bio);
401         struct pblk_g_ctx *r_ctx;
402         struct nvm_rq *rqd;
403         unsigned int bio_init_idx;
404         unsigned long read_bitmap; /* Max 64 ppas per request */
405         int ret = NVM_IO_ERR;
406
407         /* logic error: lba out-of-bounds. Ignore read request */
408         if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
409                 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
410                                         (unsigned long long)blba, nr_secs);
411                 return NVM_IO_ERR;
412         }
413
414         generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
415
416         bitmap_zero(&read_bitmap, nr_secs);
417
418         rqd = pblk_alloc_rqd(pblk, PBLK_READ);
419
420         rqd->opcode = NVM_OP_PREAD;
421         rqd->nr_ppas = nr_secs;
422         rqd->bio = NULL; /* cloned bio if needed */
423         rqd->private = pblk;
424         rqd->end_io = pblk_end_io_read;
425
426         r_ctx = nvm_rq_to_pdu(rqd);
427         r_ctx->start_time = jiffies;
428         r_ctx->lba = blba;
429         r_ctx->private = bio; /* original bio */
430
431         /* Save the index for this bio's start. This is needed in case
432          * we need to fill a partial read.
433          */
434         bio_init_idx = pblk_get_bi_idx(bio);
435
436         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
437                                                         &rqd->dma_meta_list);
438         if (!rqd->meta_list) {
439                 pr_err("pblk: not able to allocate ppa list\n");
440                 goto fail_rqd_free;
441         }
442
443         if (nr_secs > 1) {
444                 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
445                 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
446
447                 pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap);
448         } else {
449                 pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap);
450         }
451
452         if (bitmap_full(&read_bitmap, nr_secs)) {
453                 atomic_inc(&pblk->inflight_io);
454                 __pblk_end_io_read(pblk, rqd, false);
455                 return NVM_IO_DONE;
456         }
457
458         /* All sectors are to be read from the device */
459         if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
460                 struct bio *int_bio = NULL;
461
462                 /* Clone read bio to deal with read errors internally */
463                 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
464                 if (!int_bio) {
465                         pr_err("pblk: could not clone read bio\n");
466                         goto fail_end_io;
467                 }
468
469                 rqd->bio = int_bio;
470
471                 if (pblk_submit_io(pblk, rqd)) {
472                         pr_err("pblk: read IO submission failed\n");
473                         ret = NVM_IO_ERR;
474                         goto fail_end_io;
475                 }
476
477                 return NVM_IO_OK;
478         }
479
480         /* The read bio request could be partially filled by the write buffer,
481          * but there are some holes that need to be read from the drive.
482          */
483         return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap);
484
485 fail_rqd_free:
486         pblk_free_rqd(pblk, rqd, PBLK_READ);
487         return ret;
488 fail_end_io:
489         __pblk_end_io_read(pblk, rqd, false);
490         return ret;
491 }
492
493 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
494                               struct pblk_line *line, u64 *lba_list,
495                               u64 *paddr_list_gc, unsigned int nr_secs)
496 {
497         struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
498         struct ppa_addr ppa_gc;
499         int valid_secs = 0;
500         int i;
501
502         pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
503
504         for (i = 0; i < nr_secs; i++) {
505                 if (lba_list[i] == ADDR_EMPTY)
506                         continue;
507
508                 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
509                 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
510                         paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
511                         continue;
512                 }
513
514                 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
515         }
516
517 #ifdef CONFIG_NVM_DEBUG
518         atomic_long_add(valid_secs, &pblk->inflight_reads);
519 #endif
520
521         return valid_secs;
522 }
523
524 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
525                       struct pblk_line *line, sector_t lba,
526                       u64 paddr_gc)
527 {
528         struct ppa_addr ppa_l2p, ppa_gc;
529         int valid_secs = 0;
530
531         if (lba == ADDR_EMPTY)
532                 goto out;
533
534         /* logic error: lba out-of-bounds */
535         if (lba >= pblk->rl.nr_secs) {
536                 WARN(1, "pblk: read lba out of bounds\n");
537                 goto out;
538         }
539
540         spin_lock(&pblk->trans_lock);
541         ppa_l2p = pblk_trans_map_get(pblk, lba);
542         spin_unlock(&pblk->trans_lock);
543
544         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
545         if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
546                 goto out;
547
548         rqd->ppa_addr = ppa_l2p;
549         valid_secs = 1;
550
551 #ifdef CONFIG_NVM_DEBUG
552         atomic_long_inc(&pblk->inflight_reads);
553 #endif
554
555 out:
556         return valid_secs;
557 }
558
559 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
560 {
561         struct nvm_tgt_dev *dev = pblk->dev;
562         struct nvm_geo *geo = &dev->geo;
563         struct bio *bio;
564         struct nvm_rq rqd;
565         int data_len;
566         int ret = NVM_IO_OK;
567
568         memset(&rqd, 0, sizeof(struct nvm_rq));
569
570         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
571                                                         &rqd.dma_meta_list);
572         if (!rqd.meta_list)
573                 return -ENOMEM;
574
575         if (gc_rq->nr_secs > 1) {
576                 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
577                 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
578
579                 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
580                                                         gc_rq->lba_list,
581                                                         gc_rq->paddr_list,
582                                                         gc_rq->nr_secs);
583                 if (gc_rq->secs_to_gc == 1)
584                         rqd.ppa_addr = rqd.ppa_list[0];
585         } else {
586                 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
587                                                         gc_rq->lba_list[0],
588                                                         gc_rq->paddr_list[0]);
589         }
590
591         if (!(gc_rq->secs_to_gc))
592                 goto out;
593
594         data_len = (gc_rq->secs_to_gc) * geo->csecs;
595         bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
596                                                 PBLK_VMALLOC_META, GFP_KERNEL);
597         if (IS_ERR(bio)) {
598                 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
599                 goto err_free_dma;
600         }
601
602         bio->bi_iter.bi_sector = 0; /* internal bio */
603         bio_set_op_attrs(bio, REQ_OP_READ, 0);
604
605         rqd.opcode = NVM_OP_PREAD;
606         rqd.nr_ppas = gc_rq->secs_to_gc;
607         rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
608         rqd.bio = bio;
609
610         if (pblk_submit_io_sync(pblk, &rqd)) {
611                 ret = -EIO;
612                 pr_err("pblk: GC read request failed\n");
613                 goto err_free_bio;
614         }
615
616         pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
617
618         atomic_dec(&pblk->inflight_io);
619
620         if (rqd.error) {
621                 atomic_long_inc(&pblk->read_failed_gc);
622 #ifdef CONFIG_NVM_DEBUG
623                 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
624 #endif
625         }
626
627 #ifdef CONFIG_NVM_DEBUG
628         atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
629         atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
630         atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
631 #endif
632
633 out:
634         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
635         return ret;
636
637 err_free_bio:
638         bio_put(bio);
639 err_free_dma:
640         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
641         return ret;
642 }