xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order...
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial: Javier Gonzalez <javier@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-recovery.c - pblk's recovery path
16  *
17  * The L2P recovery path is single threaded as the L2P table is updated in order
18  * following the line sequence ID.
19  */
20
21 #include "pblk.h"
22 #include "pblk-trace.h"
23
24 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
25 {
26         u32 crc;
27
28         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
29         if (le32_to_cpu(emeta_buf->crc) != crc)
30                 return 1;
31
32         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
33                 return 1;
34
35         return 0;
36 }
37
38 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
39 {
40         struct nvm_tgt_dev *dev = pblk->dev;
41         struct nvm_geo *geo = &dev->geo;
42         struct pblk_line_meta *lm = &pblk->lm;
43         struct pblk_emeta *emeta = line->emeta;
44         struct line_emeta *emeta_buf = emeta->buf;
45         __le64 *lba_list;
46         u64 data_start, data_end;
47         u64 nr_valid_lbas, nr_lbas = 0;
48         u64 i;
49
50         lba_list = emeta_to_lbas(pblk, emeta_buf);
51         if (!lba_list)
52                 return 1;
53
54         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
55         data_end = line->emeta_ssec;
56         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
57
58         for (i = data_start; i < data_end; i++) {
59                 struct ppa_addr ppa;
60                 int pos;
61
62                 ppa = addr_to_gen_ppa(pblk, i, line->id);
63                 pos = pblk_ppa_to_pos(geo, ppa);
64
65                 /* Do not update bad blocks */
66                 if (test_bit(pos, line->blk_bitmap))
67                         continue;
68
69                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
70                         spin_lock(&line->lock);
71                         if (test_and_set_bit(i, line->invalid_bitmap))
72                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
73                         else
74                                 le32_add_cpu(line->vsc, -1);
75                         spin_unlock(&line->lock);
76
77                         continue;
78                 }
79
80                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
81                 nr_lbas++;
82         }
83
84         if (nr_valid_lbas != nr_lbas)
85                 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
86                                 line->id, nr_valid_lbas, nr_lbas);
87
88         line->left_msecs = 0;
89
90         return 0;
91 }
92
93 static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
94                                 u64 written_secs)
95 {
96         int i;
97
98         for (i = 0; i < written_secs; i += pblk->min_write_pgs)
99                 pblk_alloc_page(pblk, line, pblk->min_write_pgs);
100 }
101
102 static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
103 {
104         struct pblk_line_meta *lm = &pblk->lm;
105         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
106         u64 written_secs = 0;
107         int valid_chunks = 0;
108         int i;
109
110         for (i = 0; i < lm->blk_per_line; i++) {
111                 struct nvm_chk_meta *chunk = &line->chks[i];
112
113                 if (chunk->state & NVM_CHK_ST_OFFLINE)
114                         continue;
115
116                 written_secs += chunk->wp;
117                 valid_chunks++;
118         }
119
120         if (lm->blk_per_line - nr_bb != valid_chunks)
121                 pblk_err(pblk, "recovery line %d is bad\n", line->id);
122
123         pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
124
125         return written_secs;
126 }
127
128 struct pblk_recov_alloc {
129         struct ppa_addr *ppa_list;
130         void *meta_list;
131         struct nvm_rq *rqd;
132         void *data;
133         dma_addr_t dma_ppa_list;
134         dma_addr_t dma_meta_list;
135 };
136
137 static void pblk_recov_complete(struct kref *ref)
138 {
139         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
140
141         complete(&pad_rq->wait);
142 }
143
144 static void pblk_end_io_recov(struct nvm_rq *rqd)
145 {
146         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
147         struct pblk_pad_rq *pad_rq = rqd->private;
148         struct pblk *pblk = pad_rq->pblk;
149
150         pblk_up_chunk(pblk, ppa_list[0]);
151
152         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
153
154         atomic_dec(&pblk->inflight_io);
155         kref_put(&pad_rq->ref, pblk_recov_complete);
156 }
157
158 /* pad line using line bitmap.  */
159 static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
160                                int left_ppas)
161 {
162         struct nvm_tgt_dev *dev = pblk->dev;
163         struct nvm_geo *geo = &dev->geo;
164         void *meta_list;
165         struct pblk_pad_rq *pad_rq;
166         struct nvm_rq *rqd;
167         struct bio *bio;
168         void *data;
169         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
170         u64 w_ptr = line->cur_sec;
171         int left_line_ppas, rq_ppas, rq_len;
172         int i, j;
173         int ret = 0;
174
175         spin_lock(&line->lock);
176         left_line_ppas = line->left_msecs;
177         spin_unlock(&line->lock);
178
179         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
180         if (!pad_rq)
181                 return -ENOMEM;
182
183         data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
184         if (!data) {
185                 ret = -ENOMEM;
186                 goto free_rq;
187         }
188
189         pad_rq->pblk = pblk;
190         init_completion(&pad_rq->wait);
191         kref_init(&pad_rq->ref);
192
193 next_pad_rq:
194         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
195         if (rq_ppas < pblk->min_write_pgs) {
196                 pblk_err(pblk, "corrupted pad line %d\n", line->id);
197                 goto fail_free_pad;
198         }
199
200         rq_len = rq_ppas * geo->csecs;
201
202         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
203                                                 PBLK_VMALLOC_META, GFP_KERNEL);
204         if (IS_ERR(bio)) {
205                 ret = PTR_ERR(bio);
206                 goto fail_free_pad;
207         }
208
209         bio->bi_iter.bi_sector = 0; /* internal bio */
210         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
211
212         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
213
214         ret = pblk_alloc_rqd_meta(pblk, rqd);
215         if (ret)
216                 goto fail_free_rqd;
217
218         rqd->bio = bio;
219         rqd->opcode = NVM_OP_PWRITE;
220         rqd->is_seq = 1;
221         rqd->nr_ppas = rq_ppas;
222         rqd->end_io = pblk_end_io_recov;
223         rqd->private = pad_rq;
224
225         meta_list = rqd->meta_list;
226
227         for (i = 0; i < rqd->nr_ppas; ) {
228                 struct ppa_addr ppa;
229                 int pos;
230
231                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
232                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
233                 pos = pblk_ppa_to_pos(geo, ppa);
234
235                 while (test_bit(pos, line->blk_bitmap)) {
236                         w_ptr += pblk->min_write_pgs;
237                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
238                         pos = pblk_ppa_to_pos(geo, ppa);
239                 }
240
241                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
242                         struct ppa_addr dev_ppa;
243                         struct pblk_sec_meta *meta;
244                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
245
246                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
247
248                         pblk_map_invalidate(pblk, dev_ppa);
249                         lba_list[w_ptr] = addr_empty;
250                         meta = pblk_get_meta(pblk, meta_list, i);
251                         meta->lba = addr_empty;
252                         rqd->ppa_list[i] = dev_ppa;
253                 }
254         }
255
256         kref_get(&pad_rq->ref);
257         pblk_down_chunk(pblk, rqd->ppa_list[0]);
258
259         ret = pblk_submit_io(pblk, rqd);
260         if (ret) {
261                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
262                 pblk_up_chunk(pblk, rqd->ppa_list[0]);
263                 goto fail_free_rqd;
264         }
265
266         left_line_ppas -= rq_ppas;
267         left_ppas -= rq_ppas;
268         if (left_ppas && left_line_ppas)
269                 goto next_pad_rq;
270
271         kref_put(&pad_rq->ref, pblk_recov_complete);
272
273         if (!wait_for_completion_io_timeout(&pad_rq->wait,
274                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
275                 pblk_err(pblk, "pad write timed out\n");
276                 ret = -ETIME;
277         }
278
279         if (!pblk_line_is_full(line))
280                 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
281
282         vfree(data);
283 free_rq:
284         kfree(pad_rq);
285         return ret;
286
287 fail_free_rqd:
288         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
289         bio_put(bio);
290 fail_free_pad:
291         kfree(pad_rq);
292         vfree(data);
293         return ret;
294 }
295
296 static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
297 {
298         struct nvm_tgt_dev *dev = pblk->dev;
299         struct nvm_geo *geo = &dev->geo;
300         int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
301
302         return (distance > line->left_msecs) ? line->left_msecs : distance;
303 }
304
305 static int pblk_line_wp_is_unbalanced(struct pblk *pblk,
306                                       struct pblk_line *line)
307 {
308         struct nvm_tgt_dev *dev = pblk->dev;
309         struct nvm_geo *geo = &dev->geo;
310         struct pblk_line_meta *lm = &pblk->lm;
311         struct pblk_lun *rlun;
312         struct nvm_chk_meta *chunk;
313         struct ppa_addr ppa;
314         u64 line_wp;
315         int pos, i;
316
317         rlun = &pblk->luns[0];
318         ppa = rlun->bppa;
319         pos = pblk_ppa_to_pos(geo, ppa);
320         chunk = &line->chks[pos];
321
322         line_wp = chunk->wp;
323
324         for (i = 1; i < lm->blk_per_line; i++) {
325                 rlun = &pblk->luns[i];
326                 ppa = rlun->bppa;
327                 pos = pblk_ppa_to_pos(geo, ppa);
328                 chunk = &line->chks[pos];
329
330                 if (chunk->wp > line_wp)
331                         return 1;
332                 else if (chunk->wp < line_wp)
333                         line_wp = chunk->wp;
334         }
335
336         return 0;
337 }
338
339 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
340                                struct pblk_recov_alloc p)
341 {
342         struct nvm_tgt_dev *dev = pblk->dev;
343         struct pblk_line_meta *lm = &pblk->lm;
344         struct nvm_geo *geo = &dev->geo;
345         struct ppa_addr *ppa_list;
346         void *meta_list;
347         struct nvm_rq *rqd;
348         struct bio *bio;
349         void *data;
350         dma_addr_t dma_ppa_list, dma_meta_list;
351         __le64 *lba_list;
352         u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
353         bool padded = false;
354         int rq_ppas, rq_len;
355         int i, j;
356         int ret;
357         u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
358
359         if (pblk_line_wp_is_unbalanced(pblk, line))
360                 pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
361
362         ppa_list = p.ppa_list;
363         meta_list = p.meta_list;
364         rqd = p.rqd;
365         data = p.data;
366         dma_ppa_list = p.dma_ppa_list;
367         dma_meta_list = p.dma_meta_list;
368
369         lba_list = emeta_to_lbas(pblk, line->emeta->buf);
370
371 next_rq:
372         memset(rqd, 0, pblk_g_rq_size);
373
374         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
375         if (!rq_ppas)
376                 rq_ppas = pblk->min_write_pgs;
377         rq_len = rq_ppas * geo->csecs;
378
379 retry_rq:
380         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
381         if (IS_ERR(bio))
382                 return PTR_ERR(bio);
383
384         bio->bi_iter.bi_sector = 0; /* internal bio */
385         bio_set_op_attrs(bio, REQ_OP_READ, 0);
386         bio_get(bio);
387
388         rqd->bio = bio;
389         rqd->opcode = NVM_OP_PREAD;
390         rqd->meta_list = meta_list;
391         rqd->nr_ppas = rq_ppas;
392         rqd->ppa_list = ppa_list;
393         rqd->dma_ppa_list = dma_ppa_list;
394         rqd->dma_meta_list = dma_meta_list;
395
396         if (pblk_io_aligned(pblk, rq_ppas))
397                 rqd->is_seq = 1;
398
399         for (i = 0; i < rqd->nr_ppas; ) {
400                 struct ppa_addr ppa;
401                 int pos;
402
403                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
404                 pos = pblk_ppa_to_pos(geo, ppa);
405
406                 while (test_bit(pos, line->blk_bitmap)) {
407                         paddr += pblk->min_write_pgs;
408                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
409                         pos = pblk_ppa_to_pos(geo, ppa);
410                 }
411
412                 for (j = 0; j < pblk->min_write_pgs; j++, i++)
413                         rqd->ppa_list[i] =
414                                 addr_to_gen_ppa(pblk, paddr + j, line->id);
415         }
416
417         ret = pblk_submit_io_sync(pblk, rqd);
418         if (ret) {
419                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
420                 bio_put(bio);
421                 return ret;
422         }
423
424         atomic_dec(&pblk->inflight_io);
425
426         /* If a read fails, do a best effort by padding the line and retrying */
427         if (rqd->error) {
428                 int pad_distance, ret;
429
430                 if (padded) {
431                         pblk_log_read_err(pblk, rqd);
432                         bio_put(bio);
433                         return -EINTR;
434                 }
435
436                 pad_distance = pblk_pad_distance(pblk, line);
437                 ret = pblk_recov_pad_line(pblk, line, pad_distance);
438                 if (ret) {
439                         bio_put(bio);
440                         return ret;
441                 }
442
443                 padded = true;
444                 bio_put(bio);
445                 goto retry_rq;
446         }
447
448         pblk_get_packed_meta(pblk, rqd);
449         bio_put(bio);
450
451         for (i = 0; i < rqd->nr_ppas; i++) {
452                 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
453                 u64 lba = le64_to_cpu(meta->lba);
454
455                 lba_list[paddr++] = cpu_to_le64(lba);
456
457                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
458                         continue;
459
460                 line->nr_valid_lbas++;
461                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
462         }
463
464         left_ppas -= rq_ppas;
465         if (left_ppas > 0)
466                 goto next_rq;
467
468 #ifdef CONFIG_NVM_PBLK_DEBUG
469         WARN_ON(padded && !pblk_line_is_full(line));
470 #endif
471
472         return 0;
473 }
474
475 /* Scan line for lbas on out of bound area */
476 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
477 {
478         struct nvm_tgt_dev *dev = pblk->dev;
479         struct nvm_geo *geo = &dev->geo;
480         struct nvm_rq *rqd;
481         struct ppa_addr *ppa_list;
482         void *meta_list;
483         struct pblk_recov_alloc p;
484         void *data;
485         dma_addr_t dma_ppa_list, dma_meta_list;
486         int ret = 0;
487
488         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
489         if (!meta_list)
490                 return -ENOMEM;
491
492         ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
493         dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
494
495         data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
496         if (!data) {
497                 ret = -ENOMEM;
498                 goto free_meta_list;
499         }
500
501         rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
502         memset(rqd, 0, pblk_g_rq_size);
503
504         p.ppa_list = ppa_list;
505         p.meta_list = meta_list;
506         p.rqd = rqd;
507         p.data = data;
508         p.dma_ppa_list = dma_ppa_list;
509         p.dma_meta_list = dma_meta_list;
510
511         ret = pblk_recov_scan_oob(pblk, line, p);
512         if (ret) {
513                 pblk_err(pblk, "could not recover L2P form OOB\n");
514                 goto out;
515         }
516
517         if (pblk_line_is_full(line))
518                 pblk_line_recov_close(pblk, line);
519
520 out:
521         mempool_free(rqd, &pblk->r_rq_pool);
522         kfree(data);
523 free_meta_list:
524         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
525
526         return ret;
527 }
528
529 /* Insert lines ordered by sequence number (seq_num) on list */
530 static void pblk_recov_line_add_ordered(struct list_head *head,
531                                         struct pblk_line *line)
532 {
533         struct pblk_line *t = NULL;
534
535         list_for_each_entry(t, head, list)
536                 if (t->seq_nr > line->seq_nr)
537                         break;
538
539         __list_add(&line->list, t->list.prev, &t->list);
540 }
541
542 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
543 {
544         struct nvm_tgt_dev *dev = pblk->dev;
545         struct nvm_geo *geo = &dev->geo;
546         struct pblk_line_meta *lm = &pblk->lm;
547         unsigned int emeta_secs;
548         u64 emeta_start;
549         struct ppa_addr ppa;
550         int pos;
551
552         emeta_secs = lm->emeta_sec[0];
553         emeta_start = lm->sec_per_line;
554
555         while (emeta_secs) {
556                 emeta_start--;
557                 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
558                 pos = pblk_ppa_to_pos(geo, ppa);
559                 if (!test_bit(pos, line->blk_bitmap))
560                         emeta_secs--;
561         }
562
563         return emeta_start;
564 }
565
566 static int pblk_recov_check_line_version(struct pblk *pblk,
567                                          struct line_emeta *emeta)
568 {
569         struct line_header *header = &emeta->header;
570
571         if (header->version_major != EMETA_VERSION_MAJOR) {
572                 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
573                          header->version_major, EMETA_VERSION_MAJOR);
574                 return 1;
575         }
576
577 #ifdef CONFIG_NVM_PBLK_DEBUG
578         if (header->version_minor > EMETA_VERSION_MINOR)
579                 pblk_info(pblk, "newer line minor version found: %d\n",
580                                 header->version_minor);
581 #endif
582
583         return 0;
584 }
585
586 static void pblk_recov_wa_counters(struct pblk *pblk,
587                                    struct line_emeta *emeta)
588 {
589         struct pblk_line_meta *lm = &pblk->lm;
590         struct line_header *header = &emeta->header;
591         struct wa_counters *wa = emeta_to_wa(lm, emeta);
592
593         /* WA counters were introduced in emeta version 0.2 */
594         if (header->version_major > 0 || header->version_minor >= 2) {
595                 u64 user = le64_to_cpu(wa->user);
596                 u64 pad = le64_to_cpu(wa->pad);
597                 u64 gc = le64_to_cpu(wa->gc);
598
599                 atomic64_set(&pblk->user_wa, user);
600                 atomic64_set(&pblk->pad_wa, pad);
601                 atomic64_set(&pblk->gc_wa, gc);
602
603                 pblk->user_rst_wa = user;
604                 pblk->pad_rst_wa = pad;
605                 pblk->gc_rst_wa = gc;
606         }
607 }
608
609 static int pblk_line_was_written(struct pblk_line *line,
610                                  struct pblk *pblk)
611 {
612
613         struct pblk_line_meta *lm = &pblk->lm;
614         struct nvm_tgt_dev *dev = pblk->dev;
615         struct nvm_geo *geo = &dev->geo;
616         struct nvm_chk_meta *chunk;
617         struct ppa_addr bppa;
618         int smeta_blk;
619
620         if (line->state == PBLK_LINESTATE_BAD)
621                 return 0;
622
623         smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
624         if (smeta_blk >= lm->blk_per_line)
625                 return 0;
626
627         bppa = pblk->luns[smeta_blk].bppa;
628         chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
629
630         if (chunk->state & NVM_CHK_ST_FREE)
631                 return 0;
632
633         return 1;
634 }
635
636 static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
637 {
638         struct pblk_line_meta *lm = &pblk->lm;
639         int i;
640
641         for (i = 0; i < lm->blk_per_line; i++)
642                 if (line->chks[i].state & NVM_CHK_ST_OPEN)
643                         return true;
644
645         return false;
646 }
647
648 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
649 {
650         struct pblk_line_meta *lm = &pblk->lm;
651         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
652         struct pblk_line *line, *tline, *data_line = NULL;
653         struct pblk_smeta *smeta;
654         struct pblk_emeta *emeta;
655         struct line_smeta *smeta_buf;
656         int found_lines = 0, recovered_lines = 0, open_lines = 0;
657         int is_next = 0;
658         int meta_line;
659         int i, valid_uuid = 0;
660         LIST_HEAD(recov_list);
661
662         /* TODO: Implement FTL snapshot */
663
664         /* Scan recovery - takes place when FTL snapshot fails */
665         spin_lock(&l_mg->free_lock);
666         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
667         set_bit(meta_line, &l_mg->meta_bitmap);
668         smeta = l_mg->sline_meta[meta_line];
669         emeta = l_mg->eline_meta[meta_line];
670         smeta_buf = (struct line_smeta *)smeta;
671         spin_unlock(&l_mg->free_lock);
672
673         /* Order data lines using their sequence number */
674         for (i = 0; i < l_mg->nr_lines; i++) {
675                 u32 crc;
676
677                 line = &pblk->lines[i];
678
679                 memset(smeta, 0, lm->smeta_len);
680                 line->smeta = smeta;
681                 line->lun_bitmap = ((void *)(smeta_buf)) +
682                                                 sizeof(struct line_smeta);
683
684                 if (!pblk_line_was_written(line, pblk))
685                         continue;
686
687                 /* Lines that cannot be read are assumed as not written here */
688                 if (pblk_line_smeta_read(pblk, line))
689                         continue;
690
691                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
692                 if (le32_to_cpu(smeta_buf->crc) != crc)
693                         continue;
694
695                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
696                         continue;
697
698                 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
699                         pblk_err(pblk, "found incompatible line version %u\n",
700                                         smeta_buf->header.version_major);
701                         return ERR_PTR(-EINVAL);
702                 }
703
704                 /* The first valid instance uuid is used for initialization */
705                 if (!valid_uuid) {
706                         memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
707                         valid_uuid = 1;
708                 }
709
710                 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
711                         pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
712                                         i);
713                         continue;
714                 }
715
716                 /* Update line metadata */
717                 spin_lock(&line->lock);
718                 line->id = le32_to_cpu(smeta_buf->header.id);
719                 line->type = le16_to_cpu(smeta_buf->header.type);
720                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
721                 spin_unlock(&line->lock);
722
723                 /* Update general metadata */
724                 spin_lock(&l_mg->free_lock);
725                 if (line->seq_nr >= l_mg->d_seq_nr)
726                         l_mg->d_seq_nr = line->seq_nr + 1;
727                 l_mg->nr_free_lines--;
728                 spin_unlock(&l_mg->free_lock);
729
730                 if (pblk_line_recov_alloc(pblk, line))
731                         goto out;
732
733                 pblk_recov_line_add_ordered(&recov_list, line);
734                 found_lines++;
735                 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
736                                                 line->id, smeta_buf->seq_nr);
737         }
738
739         if (!found_lines) {
740                 pblk_setup_uuid(pblk);
741
742                 spin_lock(&l_mg->free_lock);
743                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
744                                                         &l_mg->meta_bitmap));
745                 spin_unlock(&l_mg->free_lock);
746
747                 goto out;
748         }
749
750         /* Verify closed blocks and recover this portion of L2P table*/
751         list_for_each_entry_safe(line, tline, &recov_list, list) {
752                 recovered_lines++;
753
754                 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
755                 line->emeta = emeta;
756                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
757
758                 if (pblk_line_is_open(pblk, line)) {
759                         pblk_recov_l2p_from_oob(pblk, line);
760                         goto next;
761                 }
762
763                 if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
764                         pblk_recov_l2p_from_oob(pblk, line);
765                         goto next;
766                 }
767
768                 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
769                         pblk_recov_l2p_from_oob(pblk, line);
770                         goto next;
771                 }
772
773                 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
774                         return ERR_PTR(-EINVAL);
775
776                 pblk_recov_wa_counters(pblk, line->emeta->buf);
777
778                 if (pblk_recov_l2p_from_emeta(pblk, line))
779                         pblk_recov_l2p_from_oob(pblk, line);
780
781 next:
782                 if (pblk_line_is_full(line)) {
783                         struct list_head *move_list;
784
785                         spin_lock(&line->lock);
786                         line->state = PBLK_LINESTATE_CLOSED;
787                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
788                                         line->state);
789                         move_list = pblk_line_gc_list(pblk, line);
790                         spin_unlock(&line->lock);
791
792                         spin_lock(&l_mg->gc_lock);
793                         list_move_tail(&line->list, move_list);
794                         spin_unlock(&l_mg->gc_lock);
795
796                         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
797                         line->map_bitmap = NULL;
798                         line->smeta = NULL;
799                         line->emeta = NULL;
800                 } else {
801                         spin_lock(&line->lock);
802                         line->state = PBLK_LINESTATE_OPEN;
803                         spin_unlock(&line->lock);
804
805                         line->emeta->mem = 0;
806                         atomic_set(&line->emeta->sync, 0);
807
808                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
809                                         line->state);
810
811                         data_line = line;
812                         line->meta_line = meta_line;
813
814                         open_lines++;
815                 }
816         }
817
818         if (!open_lines) {
819                 spin_lock(&l_mg->free_lock);
820                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
821                                                         &l_mg->meta_bitmap));
822                 spin_unlock(&l_mg->free_lock);
823         } else {
824                 spin_lock(&l_mg->free_lock);
825                 /* Allocate next line for preparation */
826                 l_mg->data_next = pblk_line_get(pblk);
827                 if (l_mg->data_next) {
828                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
829                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
830                         is_next = 1;
831                 }
832                 spin_unlock(&l_mg->free_lock);
833         }
834
835         if (is_next)
836                 pblk_line_erase(pblk, l_mg->data_next);
837
838 out:
839         if (found_lines != recovered_lines)
840                 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
841                                                 found_lines, recovered_lines);
842
843         return data_line;
844 }
845
846 /*
847  * Pad current line
848  */
849 int pblk_recov_pad(struct pblk *pblk)
850 {
851         struct pblk_line *line;
852         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
853         int left_msecs;
854         int ret = 0;
855
856         spin_lock(&l_mg->free_lock);
857         line = l_mg->data_line;
858         left_msecs = line->left_msecs;
859         spin_unlock(&l_mg->free_lock);
860
861         ret = pblk_recov_pad_line(pblk, line, left_msecs);
862         if (ret) {
863                 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
864                 return ret;
865         }
866
867         pblk_line_close_meta(pblk, line);
868         return ret;
869 }