Merge tag 'sound-5.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial: Javier Gonzalez <javier@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-recovery.c - pblk's recovery path
16  *
17  * The L2P recovery path is single threaded as the L2P table is updated in order
18  * following the line sequence ID.
19  */
20
21 #include "pblk.h"
22 #include "pblk-trace.h"
23
24 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
25 {
26         u32 crc;
27
28         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
29         if (le32_to_cpu(emeta_buf->crc) != crc)
30                 return 1;
31
32         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
33                 return 1;
34
35         return 0;
36 }
37
38 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
39 {
40         struct nvm_tgt_dev *dev = pblk->dev;
41         struct nvm_geo *geo = &dev->geo;
42         struct pblk_line_meta *lm = &pblk->lm;
43         struct pblk_emeta *emeta = line->emeta;
44         struct line_emeta *emeta_buf = emeta->buf;
45         __le64 *lba_list;
46         u64 data_start, data_end;
47         u64 nr_valid_lbas, nr_lbas = 0;
48         u64 i;
49
50         lba_list = emeta_to_lbas(pblk, emeta_buf);
51         if (!lba_list)
52                 return 1;
53
54         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
55         data_end = line->emeta_ssec;
56         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
57
58         for (i = data_start; i < data_end; i++) {
59                 struct ppa_addr ppa;
60                 int pos;
61
62                 ppa = addr_to_gen_ppa(pblk, i, line->id);
63                 pos = pblk_ppa_to_pos(geo, ppa);
64
65                 /* Do not update bad blocks */
66                 if (test_bit(pos, line->blk_bitmap))
67                         continue;
68
69                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
70                         spin_lock(&line->lock);
71                         if (test_and_set_bit(i, line->invalid_bitmap))
72                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
73                         else
74                                 le32_add_cpu(line->vsc, -1);
75                         spin_unlock(&line->lock);
76
77                         continue;
78                 }
79
80                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
81                 nr_lbas++;
82         }
83
84         if (nr_valid_lbas != nr_lbas)
85                 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
86                                 line->id, nr_valid_lbas, nr_lbas);
87
88         line->left_msecs = 0;
89
90         return 0;
91 }
92
93 static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
94                                 u64 written_secs)
95 {
96         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
97         int i;
98
99         for (i = 0; i < written_secs; i += pblk->min_write_pgs)
100                 __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
101
102         spin_lock(&l_mg->free_lock);
103         if (written_secs > line->left_msecs) {
104                 /*
105                  * We have all data sectors written
106                  * and some emeta sectors written too.
107                  */
108                 line->left_msecs = 0;
109         } else {
110                 /* We have only some data sectors written. */
111                 line->left_msecs -= written_secs;
112         }
113         spin_unlock(&l_mg->free_lock);
114 }
115
116 static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
117 {
118         struct pblk_line_meta *lm = &pblk->lm;
119         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
120         u64 written_secs = 0;
121         int valid_chunks = 0;
122         int i;
123
124         for (i = 0; i < lm->blk_per_line; i++) {
125                 struct nvm_chk_meta *chunk = &line->chks[i];
126
127                 if (chunk->state & NVM_CHK_ST_OFFLINE)
128                         continue;
129
130                 written_secs += chunk->wp;
131                 valid_chunks++;
132         }
133
134         if (lm->blk_per_line - nr_bb != valid_chunks)
135                 pblk_err(pblk, "recovery line %d is bad\n", line->id);
136
137         pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
138
139         return written_secs;
140 }
141
142 struct pblk_recov_alloc {
143         struct ppa_addr *ppa_list;
144         void *meta_list;
145         struct nvm_rq *rqd;
146         void *data;
147         dma_addr_t dma_ppa_list;
148         dma_addr_t dma_meta_list;
149 };
150
151 static void pblk_recov_complete(struct kref *ref)
152 {
153         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
154
155         complete(&pad_rq->wait);
156 }
157
158 static void pblk_end_io_recov(struct nvm_rq *rqd)
159 {
160         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
161         struct pblk_pad_rq *pad_rq = rqd->private;
162         struct pblk *pblk = pad_rq->pblk;
163
164         pblk_up_chunk(pblk, ppa_list[0]);
165
166         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
167
168         atomic_dec(&pblk->inflight_io);
169         kref_put(&pad_rq->ref, pblk_recov_complete);
170 }
171
172 /* pad line using line bitmap.  */
173 static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
174                                int left_ppas)
175 {
176         struct nvm_tgt_dev *dev = pblk->dev;
177         struct nvm_geo *geo = &dev->geo;
178         void *meta_list;
179         struct pblk_pad_rq *pad_rq;
180         struct nvm_rq *rqd;
181         struct ppa_addr *ppa_list;
182         void *data;
183         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
184         u64 w_ptr = line->cur_sec;
185         int left_line_ppas, rq_ppas;
186         int i, j;
187         int ret = 0;
188
189         spin_lock(&line->lock);
190         left_line_ppas = line->left_msecs;
191         spin_unlock(&line->lock);
192
193         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
194         if (!pad_rq)
195                 return -ENOMEM;
196
197         data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
198         if (!data) {
199                 ret = -ENOMEM;
200                 goto free_rq;
201         }
202
203         pad_rq->pblk = pblk;
204         init_completion(&pad_rq->wait);
205         kref_init(&pad_rq->ref);
206
207 next_pad_rq:
208         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
209         if (rq_ppas < pblk->min_write_pgs) {
210                 pblk_err(pblk, "corrupted pad line %d\n", line->id);
211                 goto fail_complete;
212         }
213
214         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
215
216         ret = pblk_alloc_rqd_meta(pblk, rqd);
217         if (ret) {
218                 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
219                 goto fail_complete;
220         }
221
222         rqd->bio = NULL;
223         rqd->opcode = NVM_OP_PWRITE;
224         rqd->is_seq = 1;
225         rqd->nr_ppas = rq_ppas;
226         rqd->end_io = pblk_end_io_recov;
227         rqd->private = pad_rq;
228
229         ppa_list = nvm_rq_to_ppa_list(rqd);
230         meta_list = rqd->meta_list;
231
232         for (i = 0; i < rqd->nr_ppas; ) {
233                 struct ppa_addr ppa;
234                 int pos;
235
236                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
237                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
238                 pos = pblk_ppa_to_pos(geo, ppa);
239
240                 while (test_bit(pos, line->blk_bitmap)) {
241                         w_ptr += pblk->min_write_pgs;
242                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
243                         pos = pblk_ppa_to_pos(geo, ppa);
244                 }
245
246                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
247                         struct ppa_addr dev_ppa;
248                         struct pblk_sec_meta *meta;
249                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
250
251                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
252
253                         pblk_map_invalidate(pblk, dev_ppa);
254                         lba_list[w_ptr] = addr_empty;
255                         meta = pblk_get_meta(pblk, meta_list, i);
256                         meta->lba = addr_empty;
257                         ppa_list[i] = dev_ppa;
258                 }
259         }
260
261         kref_get(&pad_rq->ref);
262         pblk_down_chunk(pblk, ppa_list[0]);
263
264         ret = pblk_submit_io(pblk, rqd, data);
265         if (ret) {
266                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
267                 pblk_up_chunk(pblk, ppa_list[0]);
268                 kref_put(&pad_rq->ref, pblk_recov_complete);
269                 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
270                 goto fail_complete;
271         }
272
273         left_line_ppas -= rq_ppas;
274         left_ppas -= rq_ppas;
275         if (left_ppas && left_line_ppas)
276                 goto next_pad_rq;
277
278 fail_complete:
279         kref_put(&pad_rq->ref, pblk_recov_complete);
280         wait_for_completion(&pad_rq->wait);
281
282         if (!pblk_line_is_full(line))
283                 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
284
285         vfree(data);
286 free_rq:
287         kfree(pad_rq);
288         return ret;
289 }
290
291 static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
292 {
293         struct nvm_tgt_dev *dev = pblk->dev;
294         struct nvm_geo *geo = &dev->geo;
295         int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
296
297         return (distance > line->left_msecs) ? line->left_msecs : distance;
298 }
299
300 /* Return a chunk belonging to a line by stripe(write order) index */
301 static struct nvm_chk_meta *pblk_get_stripe_chunk(struct pblk *pblk,
302                                                   struct pblk_line *line,
303                                                   int index)
304 {
305         struct nvm_tgt_dev *dev = pblk->dev;
306         struct nvm_geo *geo = &dev->geo;
307         struct pblk_lun *rlun;
308         struct ppa_addr ppa;
309         int pos;
310
311         rlun = &pblk->luns[index];
312         ppa = rlun->bppa;
313         pos = pblk_ppa_to_pos(geo, ppa);
314
315         return &line->chks[pos];
316 }
317
318 static int pblk_line_wps_are_unbalanced(struct pblk *pblk,
319                                       struct pblk_line *line)
320 {
321         struct pblk_line_meta *lm = &pblk->lm;
322         int blk_in_line = lm->blk_per_line;
323         struct nvm_chk_meta *chunk;
324         u64 max_wp, min_wp;
325         int i;
326
327         i = find_first_zero_bit(line->blk_bitmap, blk_in_line);
328
329         /* If there is one or zero good chunks in the line,
330          * the write pointers can't be unbalanced.
331          */
332         if (i >= (blk_in_line - 1))
333                 return 0;
334
335         chunk = pblk_get_stripe_chunk(pblk, line, i);
336         max_wp = chunk->wp;
337         if (max_wp > pblk->max_write_pgs)
338                 min_wp = max_wp - pblk->max_write_pgs;
339         else
340                 min_wp = 0;
341
342         i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
343         while (i < blk_in_line) {
344                 chunk = pblk_get_stripe_chunk(pblk, line, i);
345                 if (chunk->wp > max_wp || chunk->wp < min_wp)
346                         return 1;
347
348                 i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
349         }
350
351         return 0;
352 }
353
354 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
355                                struct pblk_recov_alloc p)
356 {
357         struct nvm_tgt_dev *dev = pblk->dev;
358         struct pblk_line_meta *lm = &pblk->lm;
359         struct nvm_geo *geo = &dev->geo;
360         struct ppa_addr *ppa_list;
361         void *meta_list;
362         struct nvm_rq *rqd;
363         void *data;
364         dma_addr_t dma_ppa_list, dma_meta_list;
365         __le64 *lba_list;
366         u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
367         bool padded = false;
368         int rq_ppas;
369         int i, j;
370         int ret;
371         u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
372
373         if (pblk_line_wps_are_unbalanced(pblk, line))
374                 pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
375
376         ppa_list = p.ppa_list;
377         meta_list = p.meta_list;
378         rqd = p.rqd;
379         data = p.data;
380         dma_ppa_list = p.dma_ppa_list;
381         dma_meta_list = p.dma_meta_list;
382
383         lba_list = emeta_to_lbas(pblk, line->emeta->buf);
384
385 next_rq:
386         memset(rqd, 0, pblk_g_rq_size);
387
388         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
389         if (!rq_ppas)
390                 rq_ppas = pblk->min_write_pgs;
391
392 retry_rq:
393         rqd->bio = NULL;
394         rqd->opcode = NVM_OP_PREAD;
395         rqd->meta_list = meta_list;
396         rqd->nr_ppas = rq_ppas;
397         rqd->ppa_list = ppa_list;
398         rqd->dma_ppa_list = dma_ppa_list;
399         rqd->dma_meta_list = dma_meta_list;
400         ppa_list = nvm_rq_to_ppa_list(rqd);
401
402         if (pblk_io_aligned(pblk, rq_ppas))
403                 rqd->is_seq = 1;
404
405         for (i = 0; i < rqd->nr_ppas; ) {
406                 struct ppa_addr ppa;
407                 int pos;
408
409                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
410                 pos = pblk_ppa_to_pos(geo, ppa);
411
412                 while (test_bit(pos, line->blk_bitmap)) {
413                         paddr += pblk->min_write_pgs;
414                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
415                         pos = pblk_ppa_to_pos(geo, ppa);
416                 }
417
418                 for (j = 0; j < pblk->min_write_pgs; j++, i++)
419                         ppa_list[i] =
420                                 addr_to_gen_ppa(pblk, paddr + j, line->id);
421         }
422
423         ret = pblk_submit_io_sync(pblk, rqd, data);
424         if (ret) {
425                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
426                 return ret;
427         }
428
429         atomic_dec(&pblk->inflight_io);
430
431         /* If a read fails, do a best effort by padding the line and retrying */
432         if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
433                 int pad_distance, ret;
434
435                 if (padded) {
436                         pblk_log_read_err(pblk, rqd);
437                         return -EINTR;
438                 }
439
440                 pad_distance = pblk_pad_distance(pblk, line);
441                 ret = pblk_recov_pad_line(pblk, line, pad_distance);
442                 if (ret) {
443                         return ret;
444                 }
445
446                 padded = true;
447                 goto retry_rq;
448         }
449
450         pblk_get_packed_meta(pblk, rqd);
451
452         for (i = 0; i < rqd->nr_ppas; i++) {
453                 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
454                 u64 lba = le64_to_cpu(meta->lba);
455
456                 lba_list[paddr++] = cpu_to_le64(lba);
457
458                 if (lba == ADDR_EMPTY || lba >= pblk->capacity)
459                         continue;
460
461                 line->nr_valid_lbas++;
462                 pblk_update_map(pblk, lba, ppa_list[i]);
463         }
464
465         left_ppas -= rq_ppas;
466         if (left_ppas > 0)
467                 goto next_rq;
468
469 #ifdef CONFIG_NVM_PBLK_DEBUG
470         WARN_ON(padded && !pblk_line_is_full(line));
471 #endif
472
473         return 0;
474 }
475
476 /* Scan line for lbas on out of bound area */
477 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
478 {
479         struct nvm_tgt_dev *dev = pblk->dev;
480         struct nvm_geo *geo = &dev->geo;
481         struct nvm_rq *rqd;
482         struct ppa_addr *ppa_list;
483         void *meta_list;
484         struct pblk_recov_alloc p;
485         void *data;
486         dma_addr_t dma_ppa_list, dma_meta_list;
487         int ret = 0;
488
489         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
490         if (!meta_list)
491                 return -ENOMEM;
492
493         ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
494         dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
495
496         data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
497         if (!data) {
498                 ret = -ENOMEM;
499                 goto free_meta_list;
500         }
501
502         rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
503         memset(rqd, 0, pblk_g_rq_size);
504
505         p.ppa_list = ppa_list;
506         p.meta_list = meta_list;
507         p.rqd = rqd;
508         p.data = data;
509         p.dma_ppa_list = dma_ppa_list;
510         p.dma_meta_list = dma_meta_list;
511
512         ret = pblk_recov_scan_oob(pblk, line, p);
513         if (ret) {
514                 pblk_err(pblk, "could not recover L2P form OOB\n");
515                 goto out;
516         }
517
518         if (pblk_line_is_full(line))
519                 pblk_line_recov_close(pblk, line);
520
521 out:
522         mempool_free(rqd, &pblk->r_rq_pool);
523         kfree(data);
524 free_meta_list:
525         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
526
527         return ret;
528 }
529
530 /* Insert lines ordered by sequence number (seq_num) on list */
531 static void pblk_recov_line_add_ordered(struct list_head *head,
532                                         struct pblk_line *line)
533 {
534         struct pblk_line *t = NULL;
535
536         list_for_each_entry(t, head, list)
537                 if (t->seq_nr > line->seq_nr)
538                         break;
539
540         __list_add(&line->list, t->list.prev, &t->list);
541 }
542
543 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
544 {
545         struct nvm_tgt_dev *dev = pblk->dev;
546         struct nvm_geo *geo = &dev->geo;
547         struct pblk_line_meta *lm = &pblk->lm;
548         unsigned int emeta_secs;
549         u64 emeta_start;
550         struct ppa_addr ppa;
551         int pos;
552
553         emeta_secs = lm->emeta_sec[0];
554         emeta_start = lm->sec_per_line;
555
556         while (emeta_secs) {
557                 emeta_start--;
558                 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
559                 pos = pblk_ppa_to_pos(geo, ppa);
560                 if (!test_bit(pos, line->blk_bitmap))
561                         emeta_secs--;
562         }
563
564         return emeta_start;
565 }
566
567 static int pblk_recov_check_line_version(struct pblk *pblk,
568                                          struct line_emeta *emeta)
569 {
570         struct line_header *header = &emeta->header;
571
572         if (header->version_major != EMETA_VERSION_MAJOR) {
573                 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
574                          header->version_major, EMETA_VERSION_MAJOR);
575                 return 1;
576         }
577
578 #ifdef CONFIG_NVM_PBLK_DEBUG
579         if (header->version_minor > EMETA_VERSION_MINOR)
580                 pblk_info(pblk, "newer line minor version found: %d\n",
581                                 header->version_minor);
582 #endif
583
584         return 0;
585 }
586
587 static void pblk_recov_wa_counters(struct pblk *pblk,
588                                    struct line_emeta *emeta)
589 {
590         struct pblk_line_meta *lm = &pblk->lm;
591         struct line_header *header = &emeta->header;
592         struct wa_counters *wa = emeta_to_wa(lm, emeta);
593
594         /* WA counters were introduced in emeta version 0.2 */
595         if (header->version_major > 0 || header->version_minor >= 2) {
596                 u64 user = le64_to_cpu(wa->user);
597                 u64 pad = le64_to_cpu(wa->pad);
598                 u64 gc = le64_to_cpu(wa->gc);
599
600                 atomic64_set(&pblk->user_wa, user);
601                 atomic64_set(&pblk->pad_wa, pad);
602                 atomic64_set(&pblk->gc_wa, gc);
603
604                 pblk->user_rst_wa = user;
605                 pblk->pad_rst_wa = pad;
606                 pblk->gc_rst_wa = gc;
607         }
608 }
609
610 static int pblk_line_was_written(struct pblk_line *line,
611                                  struct pblk *pblk)
612 {
613
614         struct pblk_line_meta *lm = &pblk->lm;
615         struct nvm_tgt_dev *dev = pblk->dev;
616         struct nvm_geo *geo = &dev->geo;
617         struct nvm_chk_meta *chunk;
618         struct ppa_addr bppa;
619         int smeta_blk;
620
621         if (line->state == PBLK_LINESTATE_BAD)
622                 return 0;
623
624         smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
625         if (smeta_blk >= lm->blk_per_line)
626                 return 0;
627
628         bppa = pblk->luns[smeta_blk].bppa;
629         chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
630
631         if (chunk->state & NVM_CHK_ST_CLOSED ||
632             (chunk->state & NVM_CHK_ST_OPEN
633              && chunk->wp >= lm->smeta_sec))
634                 return 1;
635
636         return 0;
637 }
638
639 static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
640 {
641         struct pblk_line_meta *lm = &pblk->lm;
642         int i;
643
644         for (i = 0; i < lm->blk_per_line; i++)
645                 if (line->chks[i].state & NVM_CHK_ST_OPEN)
646                         return true;
647
648         return false;
649 }
650
651 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
652 {
653         struct pblk_line_meta *lm = &pblk->lm;
654         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
655         struct pblk_line *line, *tline, *data_line = NULL;
656         struct pblk_smeta *smeta;
657         struct pblk_emeta *emeta;
658         struct line_smeta *smeta_buf;
659         int found_lines = 0, recovered_lines = 0, open_lines = 0;
660         int is_next = 0;
661         int meta_line;
662         int i, valid_uuid = 0;
663         LIST_HEAD(recov_list);
664
665         /* TODO: Implement FTL snapshot */
666
667         /* Scan recovery - takes place when FTL snapshot fails */
668         spin_lock(&l_mg->free_lock);
669         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
670         set_bit(meta_line, &l_mg->meta_bitmap);
671         smeta = l_mg->sline_meta[meta_line];
672         emeta = l_mg->eline_meta[meta_line];
673         smeta_buf = (struct line_smeta *)smeta;
674         spin_unlock(&l_mg->free_lock);
675
676         /* Order data lines using their sequence number */
677         for (i = 0; i < l_mg->nr_lines; i++) {
678                 u32 crc;
679
680                 line = &pblk->lines[i];
681
682                 memset(smeta, 0, lm->smeta_len);
683                 line->smeta = smeta;
684                 line->lun_bitmap = ((void *)(smeta_buf)) +
685                                                 sizeof(struct line_smeta);
686
687                 if (!pblk_line_was_written(line, pblk))
688                         continue;
689
690                 /* Lines that cannot be read are assumed as not written here */
691                 if (pblk_line_smeta_read(pblk, line))
692                         continue;
693
694                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
695                 if (le32_to_cpu(smeta_buf->crc) != crc)
696                         continue;
697
698                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
699                         continue;
700
701                 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
702                         pblk_err(pblk, "found incompatible line version %u\n",
703                                         smeta_buf->header.version_major);
704                         return ERR_PTR(-EINVAL);
705                 }
706
707                 /* The first valid instance uuid is used for initialization */
708                 if (!valid_uuid) {
709                         import_guid(&pblk->instance_uuid, smeta_buf->header.uuid);
710                         valid_uuid = 1;
711                 }
712
713                 if (!guid_equal(&pblk->instance_uuid,
714                                 (guid_t *)&smeta_buf->header.uuid)) {
715                         pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
716                                         i);
717                         continue;
718                 }
719
720                 /* Update line metadata */
721                 spin_lock(&line->lock);
722                 line->id = le32_to_cpu(smeta_buf->header.id);
723                 line->type = le16_to_cpu(smeta_buf->header.type);
724                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
725                 spin_unlock(&line->lock);
726
727                 /* Update general metadata */
728                 spin_lock(&l_mg->free_lock);
729                 if (line->seq_nr >= l_mg->d_seq_nr)
730                         l_mg->d_seq_nr = line->seq_nr + 1;
731                 l_mg->nr_free_lines--;
732                 spin_unlock(&l_mg->free_lock);
733
734                 if (pblk_line_recov_alloc(pblk, line))
735                         goto out;
736
737                 pblk_recov_line_add_ordered(&recov_list, line);
738                 found_lines++;
739                 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
740                                                 line->id, smeta_buf->seq_nr);
741         }
742
743         if (!found_lines) {
744                 guid_gen(&pblk->instance_uuid);
745
746                 spin_lock(&l_mg->free_lock);
747                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
748                                                         &l_mg->meta_bitmap));
749                 spin_unlock(&l_mg->free_lock);
750
751                 goto out;
752         }
753
754         /* Verify closed blocks and recover this portion of L2P table*/
755         list_for_each_entry_safe(line, tline, &recov_list, list) {
756                 recovered_lines++;
757
758                 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
759                 line->emeta = emeta;
760                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
761
762                 if (pblk_line_is_open(pblk, line)) {
763                         pblk_recov_l2p_from_oob(pblk, line);
764                         goto next;
765                 }
766
767                 if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
768                         pblk_recov_l2p_from_oob(pblk, line);
769                         goto next;
770                 }
771
772                 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
773                         pblk_recov_l2p_from_oob(pblk, line);
774                         goto next;
775                 }
776
777                 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
778                         return ERR_PTR(-EINVAL);
779
780                 pblk_recov_wa_counters(pblk, line->emeta->buf);
781
782                 if (pblk_recov_l2p_from_emeta(pblk, line))
783                         pblk_recov_l2p_from_oob(pblk, line);
784
785 next:
786                 if (pblk_line_is_full(line)) {
787                         struct list_head *move_list;
788
789                         spin_lock(&line->lock);
790                         line->state = PBLK_LINESTATE_CLOSED;
791                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
792                                         line->state);
793                         move_list = pblk_line_gc_list(pblk, line);
794                         spin_unlock(&line->lock);
795
796                         spin_lock(&l_mg->gc_lock);
797                         list_move_tail(&line->list, move_list);
798                         spin_unlock(&l_mg->gc_lock);
799
800                         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
801                         line->map_bitmap = NULL;
802                         line->smeta = NULL;
803                         line->emeta = NULL;
804                 } else {
805                         spin_lock(&line->lock);
806                         line->state = PBLK_LINESTATE_OPEN;
807                         spin_unlock(&line->lock);
808
809                         line->emeta->mem = 0;
810                         atomic_set(&line->emeta->sync, 0);
811
812                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
813                                         line->state);
814
815                         data_line = line;
816                         line->meta_line = meta_line;
817
818                         open_lines++;
819                 }
820         }
821
822         if (!open_lines) {
823                 spin_lock(&l_mg->free_lock);
824                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
825                                                         &l_mg->meta_bitmap));
826                 spin_unlock(&l_mg->free_lock);
827         } else {
828                 spin_lock(&l_mg->free_lock);
829                 l_mg->data_line = data_line;
830                 /* Allocate next line for preparation */
831                 l_mg->data_next = pblk_line_get(pblk);
832                 if (l_mg->data_next) {
833                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
834                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
835                         is_next = 1;
836                 }
837                 spin_unlock(&l_mg->free_lock);
838         }
839
840         if (is_next)
841                 pblk_line_erase(pblk, l_mg->data_next);
842
843 out:
844         if (found_lines != recovered_lines)
845                 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
846                                                 found_lines, recovered_lines);
847
848         return data_line;
849 }
850
851 /*
852  * Pad current line
853  */
854 int pblk_recov_pad(struct pblk *pblk)
855 {
856         struct pblk_line *line;
857         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
858         int left_msecs;
859         int ret = 0;
860
861         spin_lock(&l_mg->free_lock);
862         line = l_mg->data_line;
863         left_msecs = line->left_msecs;
864         spin_unlock(&l_mg->free_lock);
865
866         ret = pblk_recov_pad_line(pblk, line, left_msecs);
867         if (ret) {
868                 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
869                 return ret;
870         }
871
872         pblk_line_close_meta(pblk, line);
873         return ret;
874 }