Merge branch 'stable/for-jens-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial: Javier Gonzalez <javier@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-recovery.c - pblk's recovery path
16  *
17  * The L2P recovery path is single threaded as the L2P table is updated in order
18  * following the line sequence ID.
19  */
20
21 #include "pblk.h"
22 #include "pblk-trace.h"
23
24 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
25 {
26         u32 crc;
27
28         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
29         if (le32_to_cpu(emeta_buf->crc) != crc)
30                 return 1;
31
32         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
33                 return 1;
34
35         return 0;
36 }
37
38 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
39 {
40         struct nvm_tgt_dev *dev = pblk->dev;
41         struct nvm_geo *geo = &dev->geo;
42         struct pblk_line_meta *lm = &pblk->lm;
43         struct pblk_emeta *emeta = line->emeta;
44         struct line_emeta *emeta_buf = emeta->buf;
45         __le64 *lba_list;
46         u64 data_start, data_end;
47         u64 nr_valid_lbas, nr_lbas = 0;
48         u64 i;
49
50         lba_list = emeta_to_lbas(pblk, emeta_buf);
51         if (!lba_list)
52                 return 1;
53
54         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
55         data_end = line->emeta_ssec;
56         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
57
58         for (i = data_start; i < data_end; i++) {
59                 struct ppa_addr ppa;
60                 int pos;
61
62                 ppa = addr_to_gen_ppa(pblk, i, line->id);
63                 pos = pblk_ppa_to_pos(geo, ppa);
64
65                 /* Do not update bad blocks */
66                 if (test_bit(pos, line->blk_bitmap))
67                         continue;
68
69                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
70                         spin_lock(&line->lock);
71                         if (test_and_set_bit(i, line->invalid_bitmap))
72                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
73                         else
74                                 le32_add_cpu(line->vsc, -1);
75                         spin_unlock(&line->lock);
76
77                         continue;
78                 }
79
80                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
81                 nr_lbas++;
82         }
83
84         if (nr_valid_lbas != nr_lbas)
85                 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
86                                 line->id, nr_valid_lbas, nr_lbas);
87
88         line->left_msecs = 0;
89
90         return 0;
91 }
92
93 static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
94                                 u64 written_secs)
95 {
96         int i;
97
98         for (i = 0; i < written_secs; i += pblk->min_write_pgs)
99                 pblk_alloc_page(pblk, line, pblk->min_write_pgs);
100 }
101
102 static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
103 {
104         struct pblk_line_meta *lm = &pblk->lm;
105         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
106         u64 written_secs = 0;
107         int valid_chunks = 0;
108         int i;
109
110         for (i = 0; i < lm->blk_per_line; i++) {
111                 struct nvm_chk_meta *chunk = &line->chks[i];
112
113                 if (chunk->state & NVM_CHK_ST_OFFLINE)
114                         continue;
115
116                 written_secs += chunk->wp;
117                 valid_chunks++;
118         }
119
120         if (lm->blk_per_line - nr_bb != valid_chunks)
121                 pblk_err(pblk, "recovery line %d is bad\n", line->id);
122
123         pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
124
125         return written_secs;
126 }
127
128 struct pblk_recov_alloc {
129         struct ppa_addr *ppa_list;
130         void *meta_list;
131         struct nvm_rq *rqd;
132         void *data;
133         dma_addr_t dma_ppa_list;
134         dma_addr_t dma_meta_list;
135 };
136
137 static void pblk_recov_complete(struct kref *ref)
138 {
139         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
140
141         complete(&pad_rq->wait);
142 }
143
144 static void pblk_end_io_recov(struct nvm_rq *rqd)
145 {
146         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
147         struct pblk_pad_rq *pad_rq = rqd->private;
148         struct pblk *pblk = pad_rq->pblk;
149
150         pblk_up_chunk(pblk, ppa_list[0]);
151
152         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
153
154         atomic_dec(&pblk->inflight_io);
155         kref_put(&pad_rq->ref, pblk_recov_complete);
156 }
157
158 /* pad line using line bitmap.  */
159 static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
160                                int left_ppas)
161 {
162         struct nvm_tgt_dev *dev = pblk->dev;
163         struct nvm_geo *geo = &dev->geo;
164         void *meta_list;
165         struct pblk_pad_rq *pad_rq;
166         struct nvm_rq *rqd;
167         struct bio *bio;
168         void *data;
169         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
170         u64 w_ptr = line->cur_sec;
171         int left_line_ppas, rq_ppas, rq_len;
172         int i, j;
173         int ret = 0;
174
175         spin_lock(&line->lock);
176         left_line_ppas = line->left_msecs;
177         spin_unlock(&line->lock);
178
179         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
180         if (!pad_rq)
181                 return -ENOMEM;
182
183         data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
184         if (!data) {
185                 ret = -ENOMEM;
186                 goto free_rq;
187         }
188
189         pad_rq->pblk = pblk;
190         init_completion(&pad_rq->wait);
191         kref_init(&pad_rq->ref);
192
193 next_pad_rq:
194         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
195         if (rq_ppas < pblk->min_write_pgs) {
196                 pblk_err(pblk, "corrupted pad line %d\n", line->id);
197                 goto fail_free_pad;
198         }
199
200         rq_len = rq_ppas * geo->csecs;
201
202         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
203                                                 PBLK_VMALLOC_META, GFP_KERNEL);
204         if (IS_ERR(bio)) {
205                 ret = PTR_ERR(bio);
206                 goto fail_free_pad;
207         }
208
209         bio->bi_iter.bi_sector = 0; /* internal bio */
210         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
211
212         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
213
214         ret = pblk_alloc_rqd_meta(pblk, rqd);
215         if (ret)
216                 goto fail_free_rqd;
217
218         rqd->bio = bio;
219         rqd->opcode = NVM_OP_PWRITE;
220         rqd->is_seq = 1;
221         rqd->nr_ppas = rq_ppas;
222         rqd->end_io = pblk_end_io_recov;
223         rqd->private = pad_rq;
224
225         meta_list = rqd->meta_list;
226
227         for (i = 0; i < rqd->nr_ppas; ) {
228                 struct ppa_addr ppa;
229                 int pos;
230
231                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
232                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
233                 pos = pblk_ppa_to_pos(geo, ppa);
234
235                 while (test_bit(pos, line->blk_bitmap)) {
236                         w_ptr += pblk->min_write_pgs;
237                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
238                         pos = pblk_ppa_to_pos(geo, ppa);
239                 }
240
241                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
242                         struct ppa_addr dev_ppa;
243                         struct pblk_sec_meta *meta;
244                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
245
246                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
247
248                         pblk_map_invalidate(pblk, dev_ppa);
249                         lba_list[w_ptr] = addr_empty;
250                         meta = pblk_get_meta(pblk, meta_list, i);
251                         meta->lba = addr_empty;
252                         rqd->ppa_list[i] = dev_ppa;
253                 }
254         }
255
256         kref_get(&pad_rq->ref);
257         pblk_down_chunk(pblk, rqd->ppa_list[0]);
258
259         ret = pblk_submit_io(pblk, rqd);
260         if (ret) {
261                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
262                 pblk_up_chunk(pblk, rqd->ppa_list[0]);
263                 goto fail_free_rqd;
264         }
265
266         left_line_ppas -= rq_ppas;
267         left_ppas -= rq_ppas;
268         if (left_ppas && left_line_ppas)
269                 goto next_pad_rq;
270
271         kref_put(&pad_rq->ref, pblk_recov_complete);
272
273         if (!wait_for_completion_io_timeout(&pad_rq->wait,
274                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
275                 pblk_err(pblk, "pad write timed out\n");
276                 ret = -ETIME;
277         }
278
279         if (!pblk_line_is_full(line))
280                 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
281
282         vfree(data);
283 free_rq:
284         kfree(pad_rq);
285         return ret;
286
287 fail_free_rqd:
288         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
289         bio_put(bio);
290 fail_free_pad:
291         kfree(pad_rq);
292         vfree(data);
293         return ret;
294 }
295
296 static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
297 {
298         struct nvm_tgt_dev *dev = pblk->dev;
299         struct nvm_geo *geo = &dev->geo;
300         int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
301
302         return (distance > line->left_msecs) ? line->left_msecs : distance;
303 }
304
305 /* Return a chunk belonging to a line by stripe(write order) index */
306 static struct nvm_chk_meta *pblk_get_stripe_chunk(struct pblk *pblk,
307                                                   struct pblk_line *line,
308                                                   int index)
309 {
310         struct nvm_tgt_dev *dev = pblk->dev;
311         struct nvm_geo *geo = &dev->geo;
312         struct pblk_lun *rlun;
313         struct ppa_addr ppa;
314         int pos;
315
316         rlun = &pblk->luns[index];
317         ppa = rlun->bppa;
318         pos = pblk_ppa_to_pos(geo, ppa);
319
320         return &line->chks[pos];
321 }
322
323 static int pblk_line_wps_are_unbalanced(struct pblk *pblk,
324                                       struct pblk_line *line)
325 {
326         struct pblk_line_meta *lm = &pblk->lm;
327         int blk_in_line = lm->blk_per_line;
328         struct nvm_chk_meta *chunk;
329         u64 max_wp, min_wp;
330         int i;
331
332         i = find_first_zero_bit(line->blk_bitmap, blk_in_line);
333
334         /* If there is one or zero good chunks in the line,
335          * the write pointers can't be unbalanced.
336          */
337         if (i >= (blk_in_line - 1))
338                 return 0;
339
340         chunk = pblk_get_stripe_chunk(pblk, line, i);
341         max_wp = chunk->wp;
342         if (max_wp > pblk->max_write_pgs)
343                 min_wp = max_wp - pblk->max_write_pgs;
344         else
345                 min_wp = 0;
346
347         i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
348         while (i < blk_in_line) {
349                 chunk = pblk_get_stripe_chunk(pblk, line, i);
350                 if (chunk->wp > max_wp || chunk->wp < min_wp)
351                         return 1;
352
353                 i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
354         }
355
356         return 0;
357 }
358
359 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
360                                struct pblk_recov_alloc p)
361 {
362         struct nvm_tgt_dev *dev = pblk->dev;
363         struct pblk_line_meta *lm = &pblk->lm;
364         struct nvm_geo *geo = &dev->geo;
365         struct ppa_addr *ppa_list;
366         void *meta_list;
367         struct nvm_rq *rqd;
368         struct bio *bio;
369         void *data;
370         dma_addr_t dma_ppa_list, dma_meta_list;
371         __le64 *lba_list;
372         u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
373         bool padded = false;
374         int rq_ppas, rq_len;
375         int i, j;
376         int ret;
377         u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
378
379         if (pblk_line_wps_are_unbalanced(pblk, line))
380                 pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
381
382         ppa_list = p.ppa_list;
383         meta_list = p.meta_list;
384         rqd = p.rqd;
385         data = p.data;
386         dma_ppa_list = p.dma_ppa_list;
387         dma_meta_list = p.dma_meta_list;
388
389         lba_list = emeta_to_lbas(pblk, line->emeta->buf);
390
391 next_rq:
392         memset(rqd, 0, pblk_g_rq_size);
393
394         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
395         if (!rq_ppas)
396                 rq_ppas = pblk->min_write_pgs;
397         rq_len = rq_ppas * geo->csecs;
398
399 retry_rq:
400         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
401         if (IS_ERR(bio))
402                 return PTR_ERR(bio);
403
404         bio->bi_iter.bi_sector = 0; /* internal bio */
405         bio_set_op_attrs(bio, REQ_OP_READ, 0);
406         bio_get(bio);
407
408         rqd->bio = bio;
409         rqd->opcode = NVM_OP_PREAD;
410         rqd->meta_list = meta_list;
411         rqd->nr_ppas = rq_ppas;
412         rqd->ppa_list = ppa_list;
413         rqd->dma_ppa_list = dma_ppa_list;
414         rqd->dma_meta_list = dma_meta_list;
415
416         if (pblk_io_aligned(pblk, rq_ppas))
417                 rqd->is_seq = 1;
418
419         for (i = 0; i < rqd->nr_ppas; ) {
420                 struct ppa_addr ppa;
421                 int pos;
422
423                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
424                 pos = pblk_ppa_to_pos(geo, ppa);
425
426                 while (test_bit(pos, line->blk_bitmap)) {
427                         paddr += pblk->min_write_pgs;
428                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
429                         pos = pblk_ppa_to_pos(geo, ppa);
430                 }
431
432                 for (j = 0; j < pblk->min_write_pgs; j++, i++)
433                         rqd->ppa_list[i] =
434                                 addr_to_gen_ppa(pblk, paddr + j, line->id);
435         }
436
437         ret = pblk_submit_io_sync(pblk, rqd);
438         if (ret) {
439                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
440                 bio_put(bio);
441                 return ret;
442         }
443
444         atomic_dec(&pblk->inflight_io);
445
446         /* If a read fails, do a best effort by padding the line and retrying */
447         if (rqd->error) {
448                 int pad_distance, ret;
449
450                 if (padded) {
451                         pblk_log_read_err(pblk, rqd);
452                         bio_put(bio);
453                         return -EINTR;
454                 }
455
456                 pad_distance = pblk_pad_distance(pblk, line);
457                 ret = pblk_recov_pad_line(pblk, line, pad_distance);
458                 if (ret) {
459                         bio_put(bio);
460                         return ret;
461                 }
462
463                 padded = true;
464                 bio_put(bio);
465                 goto retry_rq;
466         }
467
468         pblk_get_packed_meta(pblk, rqd);
469         bio_put(bio);
470
471         for (i = 0; i < rqd->nr_ppas; i++) {
472                 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
473                 u64 lba = le64_to_cpu(meta->lba);
474
475                 lba_list[paddr++] = cpu_to_le64(lba);
476
477                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
478                         continue;
479
480                 line->nr_valid_lbas++;
481                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
482         }
483
484         left_ppas -= rq_ppas;
485         if (left_ppas > 0)
486                 goto next_rq;
487
488 #ifdef CONFIG_NVM_PBLK_DEBUG
489         WARN_ON(padded && !pblk_line_is_full(line));
490 #endif
491
492         return 0;
493 }
494
495 /* Scan line for lbas on out of bound area */
496 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
497 {
498         struct nvm_tgt_dev *dev = pblk->dev;
499         struct nvm_geo *geo = &dev->geo;
500         struct nvm_rq *rqd;
501         struct ppa_addr *ppa_list;
502         void *meta_list;
503         struct pblk_recov_alloc p;
504         void *data;
505         dma_addr_t dma_ppa_list, dma_meta_list;
506         int ret = 0;
507
508         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
509         if (!meta_list)
510                 return -ENOMEM;
511
512         ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
513         dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
514
515         data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
516         if (!data) {
517                 ret = -ENOMEM;
518                 goto free_meta_list;
519         }
520
521         rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
522         memset(rqd, 0, pblk_g_rq_size);
523
524         p.ppa_list = ppa_list;
525         p.meta_list = meta_list;
526         p.rqd = rqd;
527         p.data = data;
528         p.dma_ppa_list = dma_ppa_list;
529         p.dma_meta_list = dma_meta_list;
530
531         ret = pblk_recov_scan_oob(pblk, line, p);
532         if (ret) {
533                 pblk_err(pblk, "could not recover L2P form OOB\n");
534                 goto out;
535         }
536
537         if (pblk_line_is_full(line))
538                 pblk_line_recov_close(pblk, line);
539
540 out:
541         mempool_free(rqd, &pblk->r_rq_pool);
542         kfree(data);
543 free_meta_list:
544         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
545
546         return ret;
547 }
548
549 /* Insert lines ordered by sequence number (seq_num) on list */
550 static void pblk_recov_line_add_ordered(struct list_head *head,
551                                         struct pblk_line *line)
552 {
553         struct pblk_line *t = NULL;
554
555         list_for_each_entry(t, head, list)
556                 if (t->seq_nr > line->seq_nr)
557                         break;
558
559         __list_add(&line->list, t->list.prev, &t->list);
560 }
561
562 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
563 {
564         struct nvm_tgt_dev *dev = pblk->dev;
565         struct nvm_geo *geo = &dev->geo;
566         struct pblk_line_meta *lm = &pblk->lm;
567         unsigned int emeta_secs;
568         u64 emeta_start;
569         struct ppa_addr ppa;
570         int pos;
571
572         emeta_secs = lm->emeta_sec[0];
573         emeta_start = lm->sec_per_line;
574
575         while (emeta_secs) {
576                 emeta_start--;
577                 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
578                 pos = pblk_ppa_to_pos(geo, ppa);
579                 if (!test_bit(pos, line->blk_bitmap))
580                         emeta_secs--;
581         }
582
583         return emeta_start;
584 }
585
586 static int pblk_recov_check_line_version(struct pblk *pblk,
587                                          struct line_emeta *emeta)
588 {
589         struct line_header *header = &emeta->header;
590
591         if (header->version_major != EMETA_VERSION_MAJOR) {
592                 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
593                          header->version_major, EMETA_VERSION_MAJOR);
594                 return 1;
595         }
596
597 #ifdef CONFIG_NVM_PBLK_DEBUG
598         if (header->version_minor > EMETA_VERSION_MINOR)
599                 pblk_info(pblk, "newer line minor version found: %d\n",
600                                 header->version_minor);
601 #endif
602
603         return 0;
604 }
605
606 static void pblk_recov_wa_counters(struct pblk *pblk,
607                                    struct line_emeta *emeta)
608 {
609         struct pblk_line_meta *lm = &pblk->lm;
610         struct line_header *header = &emeta->header;
611         struct wa_counters *wa = emeta_to_wa(lm, emeta);
612
613         /* WA counters were introduced in emeta version 0.2 */
614         if (header->version_major > 0 || header->version_minor >= 2) {
615                 u64 user = le64_to_cpu(wa->user);
616                 u64 pad = le64_to_cpu(wa->pad);
617                 u64 gc = le64_to_cpu(wa->gc);
618
619                 atomic64_set(&pblk->user_wa, user);
620                 atomic64_set(&pblk->pad_wa, pad);
621                 atomic64_set(&pblk->gc_wa, gc);
622
623                 pblk->user_rst_wa = user;
624                 pblk->pad_rst_wa = pad;
625                 pblk->gc_rst_wa = gc;
626         }
627 }
628
629 static int pblk_line_was_written(struct pblk_line *line,
630                                  struct pblk *pblk)
631 {
632
633         struct pblk_line_meta *lm = &pblk->lm;
634         struct nvm_tgt_dev *dev = pblk->dev;
635         struct nvm_geo *geo = &dev->geo;
636         struct nvm_chk_meta *chunk;
637         struct ppa_addr bppa;
638         int smeta_blk;
639
640         if (line->state == PBLK_LINESTATE_BAD)
641                 return 0;
642
643         smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
644         if (smeta_blk >= lm->blk_per_line)
645                 return 0;
646
647         bppa = pblk->luns[smeta_blk].bppa;
648         chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
649
650         if (chunk->state & NVM_CHK_ST_FREE)
651                 return 0;
652
653         return 1;
654 }
655
656 static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
657 {
658         struct pblk_line_meta *lm = &pblk->lm;
659         int i;
660
661         for (i = 0; i < lm->blk_per_line; i++)
662                 if (line->chks[i].state & NVM_CHK_ST_OPEN)
663                         return true;
664
665         return false;
666 }
667
668 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
669 {
670         struct pblk_line_meta *lm = &pblk->lm;
671         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
672         struct pblk_line *line, *tline, *data_line = NULL;
673         struct pblk_smeta *smeta;
674         struct pblk_emeta *emeta;
675         struct line_smeta *smeta_buf;
676         int found_lines = 0, recovered_lines = 0, open_lines = 0;
677         int is_next = 0;
678         int meta_line;
679         int i, valid_uuid = 0;
680         LIST_HEAD(recov_list);
681
682         /* TODO: Implement FTL snapshot */
683
684         /* Scan recovery - takes place when FTL snapshot fails */
685         spin_lock(&l_mg->free_lock);
686         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
687         set_bit(meta_line, &l_mg->meta_bitmap);
688         smeta = l_mg->sline_meta[meta_line];
689         emeta = l_mg->eline_meta[meta_line];
690         smeta_buf = (struct line_smeta *)smeta;
691         spin_unlock(&l_mg->free_lock);
692
693         /* Order data lines using their sequence number */
694         for (i = 0; i < l_mg->nr_lines; i++) {
695                 u32 crc;
696
697                 line = &pblk->lines[i];
698
699                 memset(smeta, 0, lm->smeta_len);
700                 line->smeta = smeta;
701                 line->lun_bitmap = ((void *)(smeta_buf)) +
702                                                 sizeof(struct line_smeta);
703
704                 if (!pblk_line_was_written(line, pblk))
705                         continue;
706
707                 /* Lines that cannot be read are assumed as not written here */
708                 if (pblk_line_smeta_read(pblk, line))
709                         continue;
710
711                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
712                 if (le32_to_cpu(smeta_buf->crc) != crc)
713                         continue;
714
715                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
716                         continue;
717
718                 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
719                         pblk_err(pblk, "found incompatible line version %u\n",
720                                         smeta_buf->header.version_major);
721                         return ERR_PTR(-EINVAL);
722                 }
723
724                 /* The first valid instance uuid is used for initialization */
725                 if (!valid_uuid) {
726                         guid_copy(&pblk->instance_uuid,
727                                   (guid_t *)&smeta_buf->header.uuid);
728                         valid_uuid = 1;
729                 }
730
731                 if (!guid_equal(&pblk->instance_uuid,
732                                 (guid_t *)&smeta_buf->header.uuid)) {
733                         pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
734                                         i);
735                         continue;
736                 }
737
738                 /* Update line metadata */
739                 spin_lock(&line->lock);
740                 line->id = le32_to_cpu(smeta_buf->header.id);
741                 line->type = le16_to_cpu(smeta_buf->header.type);
742                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
743                 spin_unlock(&line->lock);
744
745                 /* Update general metadata */
746                 spin_lock(&l_mg->free_lock);
747                 if (line->seq_nr >= l_mg->d_seq_nr)
748                         l_mg->d_seq_nr = line->seq_nr + 1;
749                 l_mg->nr_free_lines--;
750                 spin_unlock(&l_mg->free_lock);
751
752                 if (pblk_line_recov_alloc(pblk, line))
753                         goto out;
754
755                 pblk_recov_line_add_ordered(&recov_list, line);
756                 found_lines++;
757                 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
758                                                 line->id, smeta_buf->seq_nr);
759         }
760
761         if (!found_lines) {
762                 guid_gen(&pblk->instance_uuid);
763
764                 spin_lock(&l_mg->free_lock);
765                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
766                                                         &l_mg->meta_bitmap));
767                 spin_unlock(&l_mg->free_lock);
768
769                 goto out;
770         }
771
772         /* Verify closed blocks and recover this portion of L2P table*/
773         list_for_each_entry_safe(line, tline, &recov_list, list) {
774                 recovered_lines++;
775
776                 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
777                 line->emeta = emeta;
778                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
779
780                 if (pblk_line_is_open(pblk, line)) {
781                         pblk_recov_l2p_from_oob(pblk, line);
782                         goto next;
783                 }
784
785                 if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
786                         pblk_recov_l2p_from_oob(pblk, line);
787                         goto next;
788                 }
789
790                 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
791                         pblk_recov_l2p_from_oob(pblk, line);
792                         goto next;
793                 }
794
795                 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
796                         return ERR_PTR(-EINVAL);
797
798                 pblk_recov_wa_counters(pblk, line->emeta->buf);
799
800                 if (pblk_recov_l2p_from_emeta(pblk, line))
801                         pblk_recov_l2p_from_oob(pblk, line);
802
803 next:
804                 if (pblk_line_is_full(line)) {
805                         struct list_head *move_list;
806
807                         spin_lock(&line->lock);
808                         line->state = PBLK_LINESTATE_CLOSED;
809                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
810                                         line->state);
811                         move_list = pblk_line_gc_list(pblk, line);
812                         spin_unlock(&line->lock);
813
814                         spin_lock(&l_mg->gc_lock);
815                         list_move_tail(&line->list, move_list);
816                         spin_unlock(&l_mg->gc_lock);
817
818                         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
819                         line->map_bitmap = NULL;
820                         line->smeta = NULL;
821                         line->emeta = NULL;
822                 } else {
823                         spin_lock(&line->lock);
824                         line->state = PBLK_LINESTATE_OPEN;
825                         spin_unlock(&line->lock);
826
827                         line->emeta->mem = 0;
828                         atomic_set(&line->emeta->sync, 0);
829
830                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
831                                         line->state);
832
833                         data_line = line;
834                         line->meta_line = meta_line;
835
836                         open_lines++;
837                 }
838         }
839
840         if (!open_lines) {
841                 spin_lock(&l_mg->free_lock);
842                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
843                                                         &l_mg->meta_bitmap));
844                 spin_unlock(&l_mg->free_lock);
845         } else {
846                 spin_lock(&l_mg->free_lock);
847                 /* Allocate next line for preparation */
848                 l_mg->data_next = pblk_line_get(pblk);
849                 if (l_mg->data_next) {
850                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
851                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
852                         is_next = 1;
853                 }
854                 spin_unlock(&l_mg->free_lock);
855         }
856
857         if (is_next)
858                 pblk_line_erase(pblk, l_mg->data_next);
859
860 out:
861         if (found_lines != recovered_lines)
862                 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
863                                                 found_lines, recovered_lines);
864
865         return data_line;
866 }
867
868 /*
869  * Pad current line
870  */
871 int pblk_recov_pad(struct pblk *pblk)
872 {
873         struct pblk_line *line;
874         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
875         int left_msecs;
876         int ret = 0;
877
878         spin_lock(&l_mg->free_lock);
879         line = l_mg->data_line;
880         left_msecs = line->left_msecs;
881         spin_unlock(&l_mg->free_lock);
882
883         ret = pblk_recov_pad_line(pblk, line, left_msecs);
884         if (ret) {
885                 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
886                 return ret;
887         }
888
889         pblk_line_close_meta(pblk, line);
890         return ret;
891 }