Merge tag 'kconfig-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial: Javier Gonzalez <javier@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-recovery.c - pblk's recovery path
16  *
17  * The L2P recovery path is single threaded as the L2P table is updated in order
18  * following the line sequence ID.
19  */
20
21 #include "pblk.h"
22 #include "pblk-trace.h"
23
24 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
25 {
26         u32 crc;
27
28         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
29         if (le32_to_cpu(emeta_buf->crc) != crc)
30                 return 1;
31
32         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
33                 return 1;
34
35         return 0;
36 }
37
38 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
39 {
40         struct nvm_tgt_dev *dev = pblk->dev;
41         struct nvm_geo *geo = &dev->geo;
42         struct pblk_line_meta *lm = &pblk->lm;
43         struct pblk_emeta *emeta = line->emeta;
44         struct line_emeta *emeta_buf = emeta->buf;
45         __le64 *lba_list;
46         u64 data_start, data_end;
47         u64 nr_valid_lbas, nr_lbas = 0;
48         u64 i;
49
50         lba_list = emeta_to_lbas(pblk, emeta_buf);
51         if (!lba_list)
52                 return 1;
53
54         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
55         data_end = line->emeta_ssec;
56         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
57
58         for (i = data_start; i < data_end; i++) {
59                 struct ppa_addr ppa;
60                 int pos;
61
62                 ppa = addr_to_gen_ppa(pblk, i, line->id);
63                 pos = pblk_ppa_to_pos(geo, ppa);
64
65                 /* Do not update bad blocks */
66                 if (test_bit(pos, line->blk_bitmap))
67                         continue;
68
69                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
70                         spin_lock(&line->lock);
71                         if (test_and_set_bit(i, line->invalid_bitmap))
72                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
73                         else
74                                 le32_add_cpu(line->vsc, -1);
75                         spin_unlock(&line->lock);
76
77                         continue;
78                 }
79
80                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
81                 nr_lbas++;
82         }
83
84         if (nr_valid_lbas != nr_lbas)
85                 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
86                                 line->id, nr_valid_lbas, nr_lbas);
87
88         line->left_msecs = 0;
89
90         return 0;
91 }
92
93 static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
94                                 u64 written_secs)
95 {
96         int i;
97
98         for (i = 0; i < written_secs; i += pblk->min_write_pgs)
99                 pblk_alloc_page(pblk, line, pblk->min_write_pgs);
100 }
101
102 static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
103 {
104         struct pblk_line_meta *lm = &pblk->lm;
105         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
106         u64 written_secs = 0;
107         int valid_chunks = 0;
108         int i;
109
110         for (i = 0; i < lm->blk_per_line; i++) {
111                 struct nvm_chk_meta *chunk = &line->chks[i];
112
113                 if (chunk->state & NVM_CHK_ST_OFFLINE)
114                         continue;
115
116                 written_secs += chunk->wp;
117                 valid_chunks++;
118         }
119
120         if (lm->blk_per_line - nr_bb != valid_chunks)
121                 pblk_err(pblk, "recovery line %d is bad\n", line->id);
122
123         pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
124
125         return written_secs;
126 }
127
128 struct pblk_recov_alloc {
129         struct ppa_addr *ppa_list;
130         void *meta_list;
131         struct nvm_rq *rqd;
132         void *data;
133         dma_addr_t dma_ppa_list;
134         dma_addr_t dma_meta_list;
135 };
136
137 static void pblk_recov_complete(struct kref *ref)
138 {
139         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
140
141         complete(&pad_rq->wait);
142 }
143
144 static void pblk_end_io_recov(struct nvm_rq *rqd)
145 {
146         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
147         struct pblk_pad_rq *pad_rq = rqd->private;
148         struct pblk *pblk = pad_rq->pblk;
149
150         pblk_up_chunk(pblk, ppa_list[0]);
151
152         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
153
154         atomic_dec(&pblk->inflight_io);
155         kref_put(&pad_rq->ref, pblk_recov_complete);
156 }
157
158 /* pad line using line bitmap.  */
159 static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
160                                int left_ppas)
161 {
162         struct nvm_tgt_dev *dev = pblk->dev;
163         struct nvm_geo *geo = &dev->geo;
164         void *meta_list;
165         struct pblk_pad_rq *pad_rq;
166         struct nvm_rq *rqd;
167         struct bio *bio;
168         void *data;
169         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
170         u64 w_ptr = line->cur_sec;
171         int left_line_ppas, rq_ppas, rq_len;
172         int i, j;
173         int ret = 0;
174
175         spin_lock(&line->lock);
176         left_line_ppas = line->left_msecs;
177         spin_unlock(&line->lock);
178
179         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
180         if (!pad_rq)
181                 return -ENOMEM;
182
183         data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
184         if (!data) {
185                 ret = -ENOMEM;
186                 goto free_rq;
187         }
188
189         pad_rq->pblk = pblk;
190         init_completion(&pad_rq->wait);
191         kref_init(&pad_rq->ref);
192
193 next_pad_rq:
194         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
195         if (rq_ppas < pblk->min_write_pgs) {
196                 pblk_err(pblk, "corrupted pad line %d\n", line->id);
197                 goto fail_free_pad;
198         }
199
200         rq_len = rq_ppas * geo->csecs;
201
202         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
203                                                 PBLK_VMALLOC_META, GFP_KERNEL);
204         if (IS_ERR(bio)) {
205                 ret = PTR_ERR(bio);
206                 goto fail_free_pad;
207         }
208
209         bio->bi_iter.bi_sector = 0; /* internal bio */
210         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
211
212         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
213
214         ret = pblk_alloc_rqd_meta(pblk, rqd);
215         if (ret)
216                 goto fail_free_rqd;
217
218         rqd->bio = bio;
219         rqd->opcode = NVM_OP_PWRITE;
220         rqd->is_seq = 1;
221         rqd->nr_ppas = rq_ppas;
222         rqd->end_io = pblk_end_io_recov;
223         rqd->private = pad_rq;
224
225         meta_list = rqd->meta_list;
226
227         for (i = 0; i < rqd->nr_ppas; ) {
228                 struct ppa_addr ppa;
229                 int pos;
230
231                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
232                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
233                 pos = pblk_ppa_to_pos(geo, ppa);
234
235                 while (test_bit(pos, line->blk_bitmap)) {
236                         w_ptr += pblk->min_write_pgs;
237                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
238                         pos = pblk_ppa_to_pos(geo, ppa);
239                 }
240
241                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
242                         struct ppa_addr dev_ppa;
243                         struct pblk_sec_meta *meta;
244                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
245
246                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
247
248                         pblk_map_invalidate(pblk, dev_ppa);
249                         lba_list[w_ptr] = addr_empty;
250                         meta = pblk_get_meta(pblk, meta_list, i);
251                         meta->lba = addr_empty;
252                         rqd->ppa_list[i] = dev_ppa;
253                 }
254         }
255
256         kref_get(&pad_rq->ref);
257         pblk_down_chunk(pblk, rqd->ppa_list[0]);
258
259         ret = pblk_submit_io(pblk, rqd);
260         if (ret) {
261                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
262                 pblk_up_chunk(pblk, rqd->ppa_list[0]);
263                 goto fail_free_rqd;
264         }
265
266         left_line_ppas -= rq_ppas;
267         left_ppas -= rq_ppas;
268         if (left_ppas && left_line_ppas)
269                 goto next_pad_rq;
270
271         kref_put(&pad_rq->ref, pblk_recov_complete);
272
273         if (!wait_for_completion_io_timeout(&pad_rq->wait,
274                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
275                 pblk_err(pblk, "pad write timed out\n");
276                 ret = -ETIME;
277         }
278
279         if (!pblk_line_is_full(line))
280                 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
281
282         vfree(data);
283 free_rq:
284         kfree(pad_rq);
285         return ret;
286
287 fail_free_rqd:
288         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
289         bio_put(bio);
290 fail_free_pad:
291         kfree(pad_rq);
292         vfree(data);
293         return ret;
294 }
295
296 static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
297 {
298         struct nvm_tgt_dev *dev = pblk->dev;
299         struct nvm_geo *geo = &dev->geo;
300         int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
301
302         return (distance > line->left_msecs) ? line->left_msecs : distance;
303 }
304
305 static int pblk_line_wp_is_unbalanced(struct pblk *pblk,
306                                       struct pblk_line *line)
307 {
308         struct nvm_tgt_dev *dev = pblk->dev;
309         struct nvm_geo *geo = &dev->geo;
310         struct pblk_line_meta *lm = &pblk->lm;
311         struct pblk_lun *rlun;
312         struct nvm_chk_meta *chunk;
313         struct ppa_addr ppa;
314         u64 line_wp;
315         int pos, i;
316
317         rlun = &pblk->luns[0];
318         ppa = rlun->bppa;
319         pos = pblk_ppa_to_pos(geo, ppa);
320         chunk = &line->chks[pos];
321
322         line_wp = chunk->wp;
323
324         for (i = 1; i < lm->blk_per_line; i++) {
325                 rlun = &pblk->luns[i];
326                 ppa = rlun->bppa;
327                 pos = pblk_ppa_to_pos(geo, ppa);
328                 chunk = &line->chks[pos];
329
330                 if (chunk->wp > line_wp)
331                         return 1;
332                 else if (chunk->wp < line_wp)
333                         line_wp = chunk->wp;
334         }
335
336         return 0;
337 }
338
339 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
340                                struct pblk_recov_alloc p)
341 {
342         struct nvm_tgt_dev *dev = pblk->dev;
343         struct pblk_line_meta *lm = &pblk->lm;
344         struct nvm_geo *geo = &dev->geo;
345         struct ppa_addr *ppa_list;
346         void *meta_list;
347         struct nvm_rq *rqd;
348         struct bio *bio;
349         void *data;
350         dma_addr_t dma_ppa_list, dma_meta_list;
351         __le64 *lba_list;
352         u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
353         bool padded = false;
354         int rq_ppas, rq_len;
355         int i, j;
356         int ret;
357         u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
358
359         if (pblk_line_wp_is_unbalanced(pblk, line))
360                 pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
361
362         ppa_list = p.ppa_list;
363         meta_list = p.meta_list;
364         rqd = p.rqd;
365         data = p.data;
366         dma_ppa_list = p.dma_ppa_list;
367         dma_meta_list = p.dma_meta_list;
368
369         lba_list = emeta_to_lbas(pblk, line->emeta->buf);
370
371 next_rq:
372         memset(rqd, 0, pblk_g_rq_size);
373
374         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
375         if (!rq_ppas)
376                 rq_ppas = pblk->min_write_pgs;
377         rq_len = rq_ppas * geo->csecs;
378
379 retry_rq:
380         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
381         if (IS_ERR(bio))
382                 return PTR_ERR(bio);
383
384         bio->bi_iter.bi_sector = 0; /* internal bio */
385         bio_set_op_attrs(bio, REQ_OP_READ, 0);
386         bio_get(bio);
387
388         rqd->bio = bio;
389         rqd->opcode = NVM_OP_PREAD;
390         rqd->meta_list = meta_list;
391         rqd->nr_ppas = rq_ppas;
392         rqd->ppa_list = ppa_list;
393         rqd->dma_ppa_list = dma_ppa_list;
394         rqd->dma_meta_list = dma_meta_list;
395
396         if (pblk_io_aligned(pblk, rq_ppas))
397                 rqd->is_seq = 1;
398
399         for (i = 0; i < rqd->nr_ppas; ) {
400                 struct ppa_addr ppa;
401                 int pos;
402
403                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
404                 pos = pblk_ppa_to_pos(geo, ppa);
405
406                 while (test_bit(pos, line->blk_bitmap)) {
407                         paddr += pblk->min_write_pgs;
408                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
409                         pos = pblk_ppa_to_pos(geo, ppa);
410                 }
411
412                 for (j = 0; j < pblk->min_write_pgs; j++, i++)
413                         rqd->ppa_list[i] =
414                                 addr_to_gen_ppa(pblk, paddr + j, line->id);
415         }
416
417         ret = pblk_submit_io_sync(pblk, rqd);
418         if (ret) {
419                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
420                 bio_put(bio);
421                 bio_put(bio);
422                 return ret;
423         }
424
425         atomic_dec(&pblk->inflight_io);
426
427         /* If a read fails, do a best effort by padding the line and retrying */
428         if (rqd->error) {
429                 int pad_distance, ret;
430
431                 if (padded) {
432                         pblk_log_read_err(pblk, rqd);
433                         bio_put(bio);
434                         return -EINTR;
435                 }
436
437                 pad_distance = pblk_pad_distance(pblk, line);
438                 ret = pblk_recov_pad_line(pblk, line, pad_distance);
439                 if (ret) {
440                         bio_put(bio);
441                         return ret;
442                 }
443
444                 padded = true;
445                 bio_put(bio);
446                 goto retry_rq;
447         }
448
449         pblk_get_packed_meta(pblk, rqd);
450         bio_put(bio);
451
452         for (i = 0; i < rqd->nr_ppas; i++) {
453                 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
454                 u64 lba = le64_to_cpu(meta->lba);
455
456                 lba_list[paddr++] = cpu_to_le64(lba);
457
458                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
459                         continue;
460
461                 line->nr_valid_lbas++;
462                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
463         }
464
465         left_ppas -= rq_ppas;
466         if (left_ppas > 0)
467                 goto next_rq;
468
469 #ifdef CONFIG_NVM_PBLK_DEBUG
470         WARN_ON(padded && !pblk_line_is_full(line));
471 #endif
472
473         return 0;
474 }
475
476 /* Scan line for lbas on out of bound area */
477 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
478 {
479         struct nvm_tgt_dev *dev = pblk->dev;
480         struct nvm_geo *geo = &dev->geo;
481         struct nvm_rq *rqd;
482         struct ppa_addr *ppa_list;
483         void *meta_list;
484         struct pblk_recov_alloc p;
485         void *data;
486         dma_addr_t dma_ppa_list, dma_meta_list;
487         int ret = 0;
488
489         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
490         if (!meta_list)
491                 return -ENOMEM;
492
493         ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
494         dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
495
496         data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
497         if (!data) {
498                 ret = -ENOMEM;
499                 goto free_meta_list;
500         }
501
502         rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
503         memset(rqd, 0, pblk_g_rq_size);
504
505         p.ppa_list = ppa_list;
506         p.meta_list = meta_list;
507         p.rqd = rqd;
508         p.data = data;
509         p.dma_ppa_list = dma_ppa_list;
510         p.dma_meta_list = dma_meta_list;
511
512         ret = pblk_recov_scan_oob(pblk, line, p);
513         if (ret) {
514                 pblk_err(pblk, "could not recover L2P form OOB\n");
515                 goto out;
516         }
517
518         if (pblk_line_is_full(line))
519                 pblk_line_recov_close(pblk, line);
520
521 out:
522         mempool_free(rqd, &pblk->r_rq_pool);
523         kfree(data);
524 free_meta_list:
525         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
526
527         return ret;
528 }
529
530 /* Insert lines ordered by sequence number (seq_num) on list */
531 static void pblk_recov_line_add_ordered(struct list_head *head,
532                                         struct pblk_line *line)
533 {
534         struct pblk_line *t = NULL;
535
536         list_for_each_entry(t, head, list)
537                 if (t->seq_nr > line->seq_nr)
538                         break;
539
540         __list_add(&line->list, t->list.prev, &t->list);
541 }
542
543 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
544 {
545         struct nvm_tgt_dev *dev = pblk->dev;
546         struct nvm_geo *geo = &dev->geo;
547         struct pblk_line_meta *lm = &pblk->lm;
548         unsigned int emeta_secs;
549         u64 emeta_start;
550         struct ppa_addr ppa;
551         int pos;
552
553         emeta_secs = lm->emeta_sec[0];
554         emeta_start = lm->sec_per_line;
555
556         while (emeta_secs) {
557                 emeta_start--;
558                 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
559                 pos = pblk_ppa_to_pos(geo, ppa);
560                 if (!test_bit(pos, line->blk_bitmap))
561                         emeta_secs--;
562         }
563
564         return emeta_start;
565 }
566
567 static int pblk_recov_check_line_version(struct pblk *pblk,
568                                          struct line_emeta *emeta)
569 {
570         struct line_header *header = &emeta->header;
571
572         if (header->version_major != EMETA_VERSION_MAJOR) {
573                 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
574                          header->version_major, EMETA_VERSION_MAJOR);
575                 return 1;
576         }
577
578 #ifdef CONFIG_NVM_PBLK_DEBUG
579         if (header->version_minor > EMETA_VERSION_MINOR)
580                 pblk_info(pblk, "newer line minor version found: %d\n",
581                                 header->version_minor);
582 #endif
583
584         return 0;
585 }
586
587 static void pblk_recov_wa_counters(struct pblk *pblk,
588                                    struct line_emeta *emeta)
589 {
590         struct pblk_line_meta *lm = &pblk->lm;
591         struct line_header *header = &emeta->header;
592         struct wa_counters *wa = emeta_to_wa(lm, emeta);
593
594         /* WA counters were introduced in emeta version 0.2 */
595         if (header->version_major > 0 || header->version_minor >= 2) {
596                 u64 user = le64_to_cpu(wa->user);
597                 u64 pad = le64_to_cpu(wa->pad);
598                 u64 gc = le64_to_cpu(wa->gc);
599
600                 atomic64_set(&pblk->user_wa, user);
601                 atomic64_set(&pblk->pad_wa, pad);
602                 atomic64_set(&pblk->gc_wa, gc);
603
604                 pblk->user_rst_wa = user;
605                 pblk->pad_rst_wa = pad;
606                 pblk->gc_rst_wa = gc;
607         }
608 }
609
610 static int pblk_line_was_written(struct pblk_line *line,
611                                  struct pblk *pblk)
612 {
613
614         struct pblk_line_meta *lm = &pblk->lm;
615         struct nvm_tgt_dev *dev = pblk->dev;
616         struct nvm_geo *geo = &dev->geo;
617         struct nvm_chk_meta *chunk;
618         struct ppa_addr bppa;
619         int smeta_blk;
620
621         if (line->state == PBLK_LINESTATE_BAD)
622                 return 0;
623
624         smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
625         if (smeta_blk >= lm->blk_per_line)
626                 return 0;
627
628         bppa = pblk->luns[smeta_blk].bppa;
629         chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
630
631         if (chunk->state & NVM_CHK_ST_FREE)
632                 return 0;
633
634         return 1;
635 }
636
637 static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
638 {
639         struct pblk_line_meta *lm = &pblk->lm;
640         int i;
641
642         for (i = 0; i < lm->blk_per_line; i++)
643                 if (line->chks[i].state & NVM_CHK_ST_OPEN)
644                         return true;
645
646         return false;
647 }
648
649 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
650 {
651         struct pblk_line_meta *lm = &pblk->lm;
652         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
653         struct pblk_line *line, *tline, *data_line = NULL;
654         struct pblk_smeta *smeta;
655         struct pblk_emeta *emeta;
656         struct line_smeta *smeta_buf;
657         int found_lines = 0, recovered_lines = 0, open_lines = 0;
658         int is_next = 0;
659         int meta_line;
660         int i, valid_uuid = 0;
661         LIST_HEAD(recov_list);
662
663         /* TODO: Implement FTL snapshot */
664
665         /* Scan recovery - takes place when FTL snapshot fails */
666         spin_lock(&l_mg->free_lock);
667         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
668         set_bit(meta_line, &l_mg->meta_bitmap);
669         smeta = l_mg->sline_meta[meta_line];
670         emeta = l_mg->eline_meta[meta_line];
671         smeta_buf = (struct line_smeta *)smeta;
672         spin_unlock(&l_mg->free_lock);
673
674         /* Order data lines using their sequence number */
675         for (i = 0; i < l_mg->nr_lines; i++) {
676                 u32 crc;
677
678                 line = &pblk->lines[i];
679
680                 memset(smeta, 0, lm->smeta_len);
681                 line->smeta = smeta;
682                 line->lun_bitmap = ((void *)(smeta_buf)) +
683                                                 sizeof(struct line_smeta);
684
685                 if (!pblk_line_was_written(line, pblk))
686                         continue;
687
688                 /* Lines that cannot be read are assumed as not written here */
689                 if (pblk_line_smeta_read(pblk, line))
690                         continue;
691
692                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
693                 if (le32_to_cpu(smeta_buf->crc) != crc)
694                         continue;
695
696                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
697                         continue;
698
699                 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
700                         pblk_err(pblk, "found incompatible line version %u\n",
701                                         smeta_buf->header.version_major);
702                         return ERR_PTR(-EINVAL);
703                 }
704
705                 /* The first valid instance uuid is used for initialization */
706                 if (!valid_uuid) {
707                         memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
708                         valid_uuid = 1;
709                 }
710
711                 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
712                         pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
713                                         i);
714                         continue;
715                 }
716
717                 /* Update line metadata */
718                 spin_lock(&line->lock);
719                 line->id = le32_to_cpu(smeta_buf->header.id);
720                 line->type = le16_to_cpu(smeta_buf->header.type);
721                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
722                 spin_unlock(&line->lock);
723
724                 /* Update general metadata */
725                 spin_lock(&l_mg->free_lock);
726                 if (line->seq_nr >= l_mg->d_seq_nr)
727                         l_mg->d_seq_nr = line->seq_nr + 1;
728                 l_mg->nr_free_lines--;
729                 spin_unlock(&l_mg->free_lock);
730
731                 if (pblk_line_recov_alloc(pblk, line))
732                         goto out;
733
734                 pblk_recov_line_add_ordered(&recov_list, line);
735                 found_lines++;
736                 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
737                                                 line->id, smeta_buf->seq_nr);
738         }
739
740         if (!found_lines) {
741                 pblk_setup_uuid(pblk);
742
743                 spin_lock(&l_mg->free_lock);
744                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
745                                                         &l_mg->meta_bitmap));
746                 spin_unlock(&l_mg->free_lock);
747
748                 goto out;
749         }
750
751         /* Verify closed blocks and recover this portion of L2P table*/
752         list_for_each_entry_safe(line, tline, &recov_list, list) {
753                 recovered_lines++;
754
755                 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
756                 line->emeta = emeta;
757                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
758
759                 if (pblk_line_is_open(pblk, line)) {
760                         pblk_recov_l2p_from_oob(pblk, line);
761                         goto next;
762                 }
763
764                 if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
765                         pblk_recov_l2p_from_oob(pblk, line);
766                         goto next;
767                 }
768
769                 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
770                         pblk_recov_l2p_from_oob(pblk, line);
771                         goto next;
772                 }
773
774                 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
775                         return ERR_PTR(-EINVAL);
776
777                 pblk_recov_wa_counters(pblk, line->emeta->buf);
778
779                 if (pblk_recov_l2p_from_emeta(pblk, line))
780                         pblk_recov_l2p_from_oob(pblk, line);
781
782 next:
783                 if (pblk_line_is_full(line)) {
784                         struct list_head *move_list;
785
786                         spin_lock(&line->lock);
787                         line->state = PBLK_LINESTATE_CLOSED;
788                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
789                                         line->state);
790                         move_list = pblk_line_gc_list(pblk, line);
791                         spin_unlock(&line->lock);
792
793                         spin_lock(&l_mg->gc_lock);
794                         list_move_tail(&line->list, move_list);
795                         spin_unlock(&l_mg->gc_lock);
796
797                         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
798                         line->map_bitmap = NULL;
799                         line->smeta = NULL;
800                         line->emeta = NULL;
801                 } else {
802                         spin_lock(&line->lock);
803                         line->state = PBLK_LINESTATE_OPEN;
804                         spin_unlock(&line->lock);
805
806                         line->emeta->mem = 0;
807                         atomic_set(&line->emeta->sync, 0);
808
809                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
810                                         line->state);
811
812                         data_line = line;
813                         line->meta_line = meta_line;
814
815                         open_lines++;
816                 }
817         }
818
819         if (!open_lines) {
820                 spin_lock(&l_mg->free_lock);
821                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
822                                                         &l_mg->meta_bitmap));
823                 spin_unlock(&l_mg->free_lock);
824         } else {
825                 spin_lock(&l_mg->free_lock);
826                 /* Allocate next line for preparation */
827                 l_mg->data_next = pblk_line_get(pblk);
828                 if (l_mg->data_next) {
829                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
830                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
831                         is_next = 1;
832                 }
833                 spin_unlock(&l_mg->free_lock);
834         }
835
836         if (is_next)
837                 pblk_line_erase(pblk, l_mg->data_next);
838
839 out:
840         if (found_lines != recovered_lines)
841                 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
842                                                 found_lines, recovered_lines);
843
844         return data_line;
845 }
846
847 /*
848  * Pad current line
849  */
850 int pblk_recov_pad(struct pblk *pblk)
851 {
852         struct pblk_line *line;
853         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
854         int left_msecs;
855         int ret = 0;
856
857         spin_lock(&l_mg->free_lock);
858         line = l_mg->data_line;
859         left_msecs = line->left_msecs;
860         spin_unlock(&l_mg->free_lock);
861
862         ret = pblk_recov_pad_line(pblk, line, left_msecs);
863         if (ret) {
864                 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
865                 return ret;
866         }
867
868         pblk_line_close_meta(pblk, line);
869         return ret;
870 }