treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 131
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial: Javier Gonzalez <javier@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-recovery.c - pblk's recovery path
16  *
17  * The L2P recovery path is single threaded as the L2P table is updated in order
18  * following the line sequence ID.
19  */
20
21 #include "pblk.h"
22 #include "pblk-trace.h"
23
24 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
25 {
26         u32 crc;
27
28         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
29         if (le32_to_cpu(emeta_buf->crc) != crc)
30                 return 1;
31
32         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
33                 return 1;
34
35         return 0;
36 }
37
38 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
39 {
40         struct nvm_tgt_dev *dev = pblk->dev;
41         struct nvm_geo *geo = &dev->geo;
42         struct pblk_line_meta *lm = &pblk->lm;
43         struct pblk_emeta *emeta = line->emeta;
44         struct line_emeta *emeta_buf = emeta->buf;
45         __le64 *lba_list;
46         u64 data_start, data_end;
47         u64 nr_valid_lbas, nr_lbas = 0;
48         u64 i;
49
50         lba_list = emeta_to_lbas(pblk, emeta_buf);
51         if (!lba_list)
52                 return 1;
53
54         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
55         data_end = line->emeta_ssec;
56         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
57
58         for (i = data_start; i < data_end; i++) {
59                 struct ppa_addr ppa;
60                 int pos;
61
62                 ppa = addr_to_gen_ppa(pblk, i, line->id);
63                 pos = pblk_ppa_to_pos(geo, ppa);
64
65                 /* Do not update bad blocks */
66                 if (test_bit(pos, line->blk_bitmap))
67                         continue;
68
69                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
70                         spin_lock(&line->lock);
71                         if (test_and_set_bit(i, line->invalid_bitmap))
72                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
73                         else
74                                 le32_add_cpu(line->vsc, -1);
75                         spin_unlock(&line->lock);
76
77                         continue;
78                 }
79
80                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
81                 nr_lbas++;
82         }
83
84         if (nr_valid_lbas != nr_lbas)
85                 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
86                                 line->id, nr_valid_lbas, nr_lbas);
87
88         line->left_msecs = 0;
89
90         return 0;
91 }
92
93 static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
94                                 u64 written_secs)
95 {
96         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
97         int i;
98
99         for (i = 0; i < written_secs; i += pblk->min_write_pgs)
100                 __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
101
102         spin_lock(&l_mg->free_lock);
103         if (written_secs > line->left_msecs) {
104                 /*
105                  * We have all data sectors written
106                  * and some emeta sectors written too.
107                  */
108                 line->left_msecs = 0;
109         } else {
110                 /* We have only some data sectors written. */
111                 line->left_msecs -= written_secs;
112         }
113         spin_unlock(&l_mg->free_lock);
114 }
115
116 static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
117 {
118         struct pblk_line_meta *lm = &pblk->lm;
119         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
120         u64 written_secs = 0;
121         int valid_chunks = 0;
122         int i;
123
124         for (i = 0; i < lm->blk_per_line; i++) {
125                 struct nvm_chk_meta *chunk = &line->chks[i];
126
127                 if (chunk->state & NVM_CHK_ST_OFFLINE)
128                         continue;
129
130                 written_secs += chunk->wp;
131                 valid_chunks++;
132         }
133
134         if (lm->blk_per_line - nr_bb != valid_chunks)
135                 pblk_err(pblk, "recovery line %d is bad\n", line->id);
136
137         pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
138
139         return written_secs;
140 }
141
142 struct pblk_recov_alloc {
143         struct ppa_addr *ppa_list;
144         void *meta_list;
145         struct nvm_rq *rqd;
146         void *data;
147         dma_addr_t dma_ppa_list;
148         dma_addr_t dma_meta_list;
149 };
150
151 static void pblk_recov_complete(struct kref *ref)
152 {
153         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
154
155         complete(&pad_rq->wait);
156 }
157
158 static void pblk_end_io_recov(struct nvm_rq *rqd)
159 {
160         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
161         struct pblk_pad_rq *pad_rq = rqd->private;
162         struct pblk *pblk = pad_rq->pblk;
163
164         pblk_up_chunk(pblk, ppa_list[0]);
165
166         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
167
168         atomic_dec(&pblk->inflight_io);
169         kref_put(&pad_rq->ref, pblk_recov_complete);
170 }
171
172 /* pad line using line bitmap.  */
173 static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
174                                int left_ppas)
175 {
176         struct nvm_tgt_dev *dev = pblk->dev;
177         struct nvm_geo *geo = &dev->geo;
178         void *meta_list;
179         struct pblk_pad_rq *pad_rq;
180         struct nvm_rq *rqd;
181         struct bio *bio;
182         struct ppa_addr *ppa_list;
183         void *data;
184         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
185         u64 w_ptr = line->cur_sec;
186         int left_line_ppas, rq_ppas, rq_len;
187         int i, j;
188         int ret = 0;
189
190         spin_lock(&line->lock);
191         left_line_ppas = line->left_msecs;
192         spin_unlock(&line->lock);
193
194         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
195         if (!pad_rq)
196                 return -ENOMEM;
197
198         data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
199         if (!data) {
200                 ret = -ENOMEM;
201                 goto free_rq;
202         }
203
204         pad_rq->pblk = pblk;
205         init_completion(&pad_rq->wait);
206         kref_init(&pad_rq->ref);
207
208 next_pad_rq:
209         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
210         if (rq_ppas < pblk->min_write_pgs) {
211                 pblk_err(pblk, "corrupted pad line %d\n", line->id);
212                 goto fail_complete;
213         }
214
215         rq_len = rq_ppas * geo->csecs;
216
217         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
218                                                 PBLK_VMALLOC_META, GFP_KERNEL);
219         if (IS_ERR(bio)) {
220                 ret = PTR_ERR(bio);
221                 goto fail_complete;
222         }
223
224         bio->bi_iter.bi_sector = 0; /* internal bio */
225         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
226
227         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
228
229         ret = pblk_alloc_rqd_meta(pblk, rqd);
230         if (ret) {
231                 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
232                 bio_put(bio);
233                 goto fail_complete;
234         }
235
236         rqd->bio = bio;
237         rqd->opcode = NVM_OP_PWRITE;
238         rqd->is_seq = 1;
239         rqd->nr_ppas = rq_ppas;
240         rqd->end_io = pblk_end_io_recov;
241         rqd->private = pad_rq;
242
243         ppa_list = nvm_rq_to_ppa_list(rqd);
244         meta_list = rqd->meta_list;
245
246         for (i = 0; i < rqd->nr_ppas; ) {
247                 struct ppa_addr ppa;
248                 int pos;
249
250                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
251                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
252                 pos = pblk_ppa_to_pos(geo, ppa);
253
254                 while (test_bit(pos, line->blk_bitmap)) {
255                         w_ptr += pblk->min_write_pgs;
256                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
257                         pos = pblk_ppa_to_pos(geo, ppa);
258                 }
259
260                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
261                         struct ppa_addr dev_ppa;
262                         struct pblk_sec_meta *meta;
263                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
264
265                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
266
267                         pblk_map_invalidate(pblk, dev_ppa);
268                         lba_list[w_ptr] = addr_empty;
269                         meta = pblk_get_meta(pblk, meta_list, i);
270                         meta->lba = addr_empty;
271                         ppa_list[i] = dev_ppa;
272                 }
273         }
274
275         kref_get(&pad_rq->ref);
276         pblk_down_chunk(pblk, ppa_list[0]);
277
278         ret = pblk_submit_io(pblk, rqd);
279         if (ret) {
280                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
281                 pblk_up_chunk(pblk, ppa_list[0]);
282                 kref_put(&pad_rq->ref, pblk_recov_complete);
283                 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
284                 bio_put(bio);
285                 goto fail_complete;
286         }
287
288         left_line_ppas -= rq_ppas;
289         left_ppas -= rq_ppas;
290         if (left_ppas && left_line_ppas)
291                 goto next_pad_rq;
292
293 fail_complete:
294         kref_put(&pad_rq->ref, pblk_recov_complete);
295         wait_for_completion(&pad_rq->wait);
296
297         if (!pblk_line_is_full(line))
298                 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
299
300         vfree(data);
301 free_rq:
302         kfree(pad_rq);
303         return ret;
304 }
305
306 static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
307 {
308         struct nvm_tgt_dev *dev = pblk->dev;
309         struct nvm_geo *geo = &dev->geo;
310         int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
311
312         return (distance > line->left_msecs) ? line->left_msecs : distance;
313 }
314
315 /* Return a chunk belonging to a line by stripe(write order) index */
316 static struct nvm_chk_meta *pblk_get_stripe_chunk(struct pblk *pblk,
317                                                   struct pblk_line *line,
318                                                   int index)
319 {
320         struct nvm_tgt_dev *dev = pblk->dev;
321         struct nvm_geo *geo = &dev->geo;
322         struct pblk_lun *rlun;
323         struct ppa_addr ppa;
324         int pos;
325
326         rlun = &pblk->luns[index];
327         ppa = rlun->bppa;
328         pos = pblk_ppa_to_pos(geo, ppa);
329
330         return &line->chks[pos];
331 }
332
333 static int pblk_line_wps_are_unbalanced(struct pblk *pblk,
334                                       struct pblk_line *line)
335 {
336         struct pblk_line_meta *lm = &pblk->lm;
337         int blk_in_line = lm->blk_per_line;
338         struct nvm_chk_meta *chunk;
339         u64 max_wp, min_wp;
340         int i;
341
342         i = find_first_zero_bit(line->blk_bitmap, blk_in_line);
343
344         /* If there is one or zero good chunks in the line,
345          * the write pointers can't be unbalanced.
346          */
347         if (i >= (blk_in_line - 1))
348                 return 0;
349
350         chunk = pblk_get_stripe_chunk(pblk, line, i);
351         max_wp = chunk->wp;
352         if (max_wp > pblk->max_write_pgs)
353                 min_wp = max_wp - pblk->max_write_pgs;
354         else
355                 min_wp = 0;
356
357         i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
358         while (i < blk_in_line) {
359                 chunk = pblk_get_stripe_chunk(pblk, line, i);
360                 if (chunk->wp > max_wp || chunk->wp < min_wp)
361                         return 1;
362
363                 i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
364         }
365
366         return 0;
367 }
368
369 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
370                                struct pblk_recov_alloc p)
371 {
372         struct nvm_tgt_dev *dev = pblk->dev;
373         struct pblk_line_meta *lm = &pblk->lm;
374         struct nvm_geo *geo = &dev->geo;
375         struct ppa_addr *ppa_list;
376         void *meta_list;
377         struct nvm_rq *rqd;
378         struct bio *bio;
379         void *data;
380         dma_addr_t dma_ppa_list, dma_meta_list;
381         __le64 *lba_list;
382         u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
383         bool padded = false;
384         int rq_ppas, rq_len;
385         int i, j;
386         int ret;
387         u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
388
389         if (pblk_line_wps_are_unbalanced(pblk, line))
390                 pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
391
392         ppa_list = p.ppa_list;
393         meta_list = p.meta_list;
394         rqd = p.rqd;
395         data = p.data;
396         dma_ppa_list = p.dma_ppa_list;
397         dma_meta_list = p.dma_meta_list;
398
399         lba_list = emeta_to_lbas(pblk, line->emeta->buf);
400
401 next_rq:
402         memset(rqd, 0, pblk_g_rq_size);
403
404         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
405         if (!rq_ppas)
406                 rq_ppas = pblk->min_write_pgs;
407         rq_len = rq_ppas * geo->csecs;
408
409 retry_rq:
410         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
411         if (IS_ERR(bio))
412                 return PTR_ERR(bio);
413
414         bio->bi_iter.bi_sector = 0; /* internal bio */
415         bio_set_op_attrs(bio, REQ_OP_READ, 0);
416         bio_get(bio);
417
418         rqd->bio = bio;
419         rqd->opcode = NVM_OP_PREAD;
420         rqd->meta_list = meta_list;
421         rqd->nr_ppas = rq_ppas;
422         rqd->ppa_list = ppa_list;
423         rqd->dma_ppa_list = dma_ppa_list;
424         rqd->dma_meta_list = dma_meta_list;
425         ppa_list = nvm_rq_to_ppa_list(rqd);
426
427         if (pblk_io_aligned(pblk, rq_ppas))
428                 rqd->is_seq = 1;
429
430         for (i = 0; i < rqd->nr_ppas; ) {
431                 struct ppa_addr ppa;
432                 int pos;
433
434                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
435                 pos = pblk_ppa_to_pos(geo, ppa);
436
437                 while (test_bit(pos, line->blk_bitmap)) {
438                         paddr += pblk->min_write_pgs;
439                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
440                         pos = pblk_ppa_to_pos(geo, ppa);
441                 }
442
443                 for (j = 0; j < pblk->min_write_pgs; j++, i++)
444                         ppa_list[i] =
445                                 addr_to_gen_ppa(pblk, paddr + j, line->id);
446         }
447
448         ret = pblk_submit_io_sync(pblk, rqd);
449         if (ret) {
450                 pblk_err(pblk, "I/O submission failed: %d\n", ret);
451                 bio_put(bio);
452                 return ret;
453         }
454
455         atomic_dec(&pblk->inflight_io);
456
457         /* If a read fails, do a best effort by padding the line and retrying */
458         if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
459                 int pad_distance, ret;
460
461                 if (padded) {
462                         pblk_log_read_err(pblk, rqd);
463                         bio_put(bio);
464                         return -EINTR;
465                 }
466
467                 pad_distance = pblk_pad_distance(pblk, line);
468                 ret = pblk_recov_pad_line(pblk, line, pad_distance);
469                 if (ret) {
470                         bio_put(bio);
471                         return ret;
472                 }
473
474                 padded = true;
475                 bio_put(bio);
476                 goto retry_rq;
477         }
478
479         pblk_get_packed_meta(pblk, rqd);
480         bio_put(bio);
481
482         for (i = 0; i < rqd->nr_ppas; i++) {
483                 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
484                 u64 lba = le64_to_cpu(meta->lba);
485
486                 lba_list[paddr++] = cpu_to_le64(lba);
487
488                 if (lba == ADDR_EMPTY || lba >= pblk->capacity)
489                         continue;
490
491                 line->nr_valid_lbas++;
492                 pblk_update_map(pblk, lba, ppa_list[i]);
493         }
494
495         left_ppas -= rq_ppas;
496         if (left_ppas > 0)
497                 goto next_rq;
498
499 #ifdef CONFIG_NVM_PBLK_DEBUG
500         WARN_ON(padded && !pblk_line_is_full(line));
501 #endif
502
503         return 0;
504 }
505
506 /* Scan line for lbas on out of bound area */
507 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
508 {
509         struct nvm_tgt_dev *dev = pblk->dev;
510         struct nvm_geo *geo = &dev->geo;
511         struct nvm_rq *rqd;
512         struct ppa_addr *ppa_list;
513         void *meta_list;
514         struct pblk_recov_alloc p;
515         void *data;
516         dma_addr_t dma_ppa_list, dma_meta_list;
517         int ret = 0;
518
519         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
520         if (!meta_list)
521                 return -ENOMEM;
522
523         ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
524         dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
525
526         data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
527         if (!data) {
528                 ret = -ENOMEM;
529                 goto free_meta_list;
530         }
531
532         rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
533         memset(rqd, 0, pblk_g_rq_size);
534
535         p.ppa_list = ppa_list;
536         p.meta_list = meta_list;
537         p.rqd = rqd;
538         p.data = data;
539         p.dma_ppa_list = dma_ppa_list;
540         p.dma_meta_list = dma_meta_list;
541
542         ret = pblk_recov_scan_oob(pblk, line, p);
543         if (ret) {
544                 pblk_err(pblk, "could not recover L2P form OOB\n");
545                 goto out;
546         }
547
548         if (pblk_line_is_full(line))
549                 pblk_line_recov_close(pblk, line);
550
551 out:
552         mempool_free(rqd, &pblk->r_rq_pool);
553         kfree(data);
554 free_meta_list:
555         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
556
557         return ret;
558 }
559
560 /* Insert lines ordered by sequence number (seq_num) on list */
561 static void pblk_recov_line_add_ordered(struct list_head *head,
562                                         struct pblk_line *line)
563 {
564         struct pblk_line *t = NULL;
565
566         list_for_each_entry(t, head, list)
567                 if (t->seq_nr > line->seq_nr)
568                         break;
569
570         __list_add(&line->list, t->list.prev, &t->list);
571 }
572
573 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
574 {
575         struct nvm_tgt_dev *dev = pblk->dev;
576         struct nvm_geo *geo = &dev->geo;
577         struct pblk_line_meta *lm = &pblk->lm;
578         unsigned int emeta_secs;
579         u64 emeta_start;
580         struct ppa_addr ppa;
581         int pos;
582
583         emeta_secs = lm->emeta_sec[0];
584         emeta_start = lm->sec_per_line;
585
586         while (emeta_secs) {
587                 emeta_start--;
588                 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
589                 pos = pblk_ppa_to_pos(geo, ppa);
590                 if (!test_bit(pos, line->blk_bitmap))
591                         emeta_secs--;
592         }
593
594         return emeta_start;
595 }
596
597 static int pblk_recov_check_line_version(struct pblk *pblk,
598                                          struct line_emeta *emeta)
599 {
600         struct line_header *header = &emeta->header;
601
602         if (header->version_major != EMETA_VERSION_MAJOR) {
603                 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
604                          header->version_major, EMETA_VERSION_MAJOR);
605                 return 1;
606         }
607
608 #ifdef CONFIG_NVM_PBLK_DEBUG
609         if (header->version_minor > EMETA_VERSION_MINOR)
610                 pblk_info(pblk, "newer line minor version found: %d\n",
611                                 header->version_minor);
612 #endif
613
614         return 0;
615 }
616
617 static void pblk_recov_wa_counters(struct pblk *pblk,
618                                    struct line_emeta *emeta)
619 {
620         struct pblk_line_meta *lm = &pblk->lm;
621         struct line_header *header = &emeta->header;
622         struct wa_counters *wa = emeta_to_wa(lm, emeta);
623
624         /* WA counters were introduced in emeta version 0.2 */
625         if (header->version_major > 0 || header->version_minor >= 2) {
626                 u64 user = le64_to_cpu(wa->user);
627                 u64 pad = le64_to_cpu(wa->pad);
628                 u64 gc = le64_to_cpu(wa->gc);
629
630                 atomic64_set(&pblk->user_wa, user);
631                 atomic64_set(&pblk->pad_wa, pad);
632                 atomic64_set(&pblk->gc_wa, gc);
633
634                 pblk->user_rst_wa = user;
635                 pblk->pad_rst_wa = pad;
636                 pblk->gc_rst_wa = gc;
637         }
638 }
639
640 static int pblk_line_was_written(struct pblk_line *line,
641                                  struct pblk *pblk)
642 {
643
644         struct pblk_line_meta *lm = &pblk->lm;
645         struct nvm_tgt_dev *dev = pblk->dev;
646         struct nvm_geo *geo = &dev->geo;
647         struct nvm_chk_meta *chunk;
648         struct ppa_addr bppa;
649         int smeta_blk;
650
651         if (line->state == PBLK_LINESTATE_BAD)
652                 return 0;
653
654         smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
655         if (smeta_blk >= lm->blk_per_line)
656                 return 0;
657
658         bppa = pblk->luns[smeta_blk].bppa;
659         chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
660
661         if (chunk->state & NVM_CHK_ST_CLOSED ||
662             (chunk->state & NVM_CHK_ST_OPEN
663              && chunk->wp >= lm->smeta_sec))
664                 return 1;
665
666         return 0;
667 }
668
669 static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
670 {
671         struct pblk_line_meta *lm = &pblk->lm;
672         int i;
673
674         for (i = 0; i < lm->blk_per_line; i++)
675                 if (line->chks[i].state & NVM_CHK_ST_OPEN)
676                         return true;
677
678         return false;
679 }
680
681 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
682 {
683         struct pblk_line_meta *lm = &pblk->lm;
684         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
685         struct pblk_line *line, *tline, *data_line = NULL;
686         struct pblk_smeta *smeta;
687         struct pblk_emeta *emeta;
688         struct line_smeta *smeta_buf;
689         int found_lines = 0, recovered_lines = 0, open_lines = 0;
690         int is_next = 0;
691         int meta_line;
692         int i, valid_uuid = 0;
693         LIST_HEAD(recov_list);
694
695         /* TODO: Implement FTL snapshot */
696
697         /* Scan recovery - takes place when FTL snapshot fails */
698         spin_lock(&l_mg->free_lock);
699         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
700         set_bit(meta_line, &l_mg->meta_bitmap);
701         smeta = l_mg->sline_meta[meta_line];
702         emeta = l_mg->eline_meta[meta_line];
703         smeta_buf = (struct line_smeta *)smeta;
704         spin_unlock(&l_mg->free_lock);
705
706         /* Order data lines using their sequence number */
707         for (i = 0; i < l_mg->nr_lines; i++) {
708                 u32 crc;
709
710                 line = &pblk->lines[i];
711
712                 memset(smeta, 0, lm->smeta_len);
713                 line->smeta = smeta;
714                 line->lun_bitmap = ((void *)(smeta_buf)) +
715                                                 sizeof(struct line_smeta);
716
717                 if (!pblk_line_was_written(line, pblk))
718                         continue;
719
720                 /* Lines that cannot be read are assumed as not written here */
721                 if (pblk_line_smeta_read(pblk, line))
722                         continue;
723
724                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
725                 if (le32_to_cpu(smeta_buf->crc) != crc)
726                         continue;
727
728                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
729                         continue;
730
731                 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
732                         pblk_err(pblk, "found incompatible line version %u\n",
733                                         smeta_buf->header.version_major);
734                         return ERR_PTR(-EINVAL);
735                 }
736
737                 /* The first valid instance uuid is used for initialization */
738                 if (!valid_uuid) {
739                         guid_copy(&pblk->instance_uuid,
740                                   (guid_t *)&smeta_buf->header.uuid);
741                         valid_uuid = 1;
742                 }
743
744                 if (!guid_equal(&pblk->instance_uuid,
745                                 (guid_t *)&smeta_buf->header.uuid)) {
746                         pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
747                                         i);
748                         continue;
749                 }
750
751                 /* Update line metadata */
752                 spin_lock(&line->lock);
753                 line->id = le32_to_cpu(smeta_buf->header.id);
754                 line->type = le16_to_cpu(smeta_buf->header.type);
755                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
756                 spin_unlock(&line->lock);
757
758                 /* Update general metadata */
759                 spin_lock(&l_mg->free_lock);
760                 if (line->seq_nr >= l_mg->d_seq_nr)
761                         l_mg->d_seq_nr = line->seq_nr + 1;
762                 l_mg->nr_free_lines--;
763                 spin_unlock(&l_mg->free_lock);
764
765                 if (pblk_line_recov_alloc(pblk, line))
766                         goto out;
767
768                 pblk_recov_line_add_ordered(&recov_list, line);
769                 found_lines++;
770                 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
771                                                 line->id, smeta_buf->seq_nr);
772         }
773
774         if (!found_lines) {
775                 guid_gen(&pblk->instance_uuid);
776
777                 spin_lock(&l_mg->free_lock);
778                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
779                                                         &l_mg->meta_bitmap));
780                 spin_unlock(&l_mg->free_lock);
781
782                 goto out;
783         }
784
785         /* Verify closed blocks and recover this portion of L2P table*/
786         list_for_each_entry_safe(line, tline, &recov_list, list) {
787                 recovered_lines++;
788
789                 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
790                 line->emeta = emeta;
791                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
792
793                 if (pblk_line_is_open(pblk, line)) {
794                         pblk_recov_l2p_from_oob(pblk, line);
795                         goto next;
796                 }
797
798                 if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
799                         pblk_recov_l2p_from_oob(pblk, line);
800                         goto next;
801                 }
802
803                 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
804                         pblk_recov_l2p_from_oob(pblk, line);
805                         goto next;
806                 }
807
808                 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
809                         return ERR_PTR(-EINVAL);
810
811                 pblk_recov_wa_counters(pblk, line->emeta->buf);
812
813                 if (pblk_recov_l2p_from_emeta(pblk, line))
814                         pblk_recov_l2p_from_oob(pblk, line);
815
816 next:
817                 if (pblk_line_is_full(line)) {
818                         struct list_head *move_list;
819
820                         spin_lock(&line->lock);
821                         line->state = PBLK_LINESTATE_CLOSED;
822                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
823                                         line->state);
824                         move_list = pblk_line_gc_list(pblk, line);
825                         spin_unlock(&line->lock);
826
827                         spin_lock(&l_mg->gc_lock);
828                         list_move_tail(&line->list, move_list);
829                         spin_unlock(&l_mg->gc_lock);
830
831                         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
832                         line->map_bitmap = NULL;
833                         line->smeta = NULL;
834                         line->emeta = NULL;
835                 } else {
836                         spin_lock(&line->lock);
837                         line->state = PBLK_LINESTATE_OPEN;
838                         spin_unlock(&line->lock);
839
840                         line->emeta->mem = 0;
841                         atomic_set(&line->emeta->sync, 0);
842
843                         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
844                                         line->state);
845
846                         data_line = line;
847                         line->meta_line = meta_line;
848
849                         open_lines++;
850                 }
851         }
852
853         if (!open_lines) {
854                 spin_lock(&l_mg->free_lock);
855                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
856                                                         &l_mg->meta_bitmap));
857                 spin_unlock(&l_mg->free_lock);
858         } else {
859                 spin_lock(&l_mg->free_lock);
860                 l_mg->data_line = data_line;
861                 /* Allocate next line for preparation */
862                 l_mg->data_next = pblk_line_get(pblk);
863                 if (l_mg->data_next) {
864                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
865                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
866                         is_next = 1;
867                 }
868                 spin_unlock(&l_mg->free_lock);
869         }
870
871         if (is_next)
872                 pblk_line_erase(pblk, l_mg->data_next);
873
874 out:
875         if (found_lines != recovered_lines)
876                 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
877                                                 found_lines, recovered_lines);
878
879         return data_line;
880 }
881
882 /*
883  * Pad current line
884  */
885 int pblk_recov_pad(struct pblk *pblk)
886 {
887         struct pblk_line *line;
888         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
889         int left_msecs;
890         int ret = 0;
891
892         spin_lock(&l_mg->free_lock);
893         line = l_mg->data_line;
894         left_msecs = line->left_msecs;
895         spin_unlock(&l_mg->free_lock);
896
897         ret = pblk_recov_pad_line(pblk, line, left_msecs);
898         if (ret) {
899                 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
900                 return ret;
901         }
902
903         pblk_line_close_meta(pblk, line);
904         return ret;
905 }