Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-init.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * Implementation of a physical block-device target for Open-channel SSDs.
17  *
18  * pblk-init.c - pblk's initialization.
19  */
20
21 #include "pblk.h"
22
23 static unsigned int write_buffer_size;
24
25 module_param(write_buffer_size, uint, 0644);
26 MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
27
28 static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
29                                 *pblk_w_rq_cache;
30 static DECLARE_RWSEM(pblk_lock);
31 struct bio_set pblk_bio_set;
32
33 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
34                           struct bio *bio)
35 {
36         int ret;
37
38         /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
39          * constraint. Writes can be of arbitrary size.
40          */
41         if (bio_data_dir(bio) == READ) {
42                 blk_queue_split(q, &bio);
43                 ret = pblk_submit_read(pblk, bio);
44                 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
45                         bio_put(bio);
46
47                 return ret;
48         }
49
50         /* Prevent deadlock in the case of a modest LUN configuration and large
51          * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
52          * available for user I/O.
53          */
54         if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
55                 blk_queue_split(q, &bio);
56
57         return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
58 }
59
60 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
61 {
62         struct pblk *pblk = q->queuedata;
63
64         if (bio_op(bio) == REQ_OP_DISCARD) {
65                 pblk_discard(pblk, bio);
66                 if (!(bio->bi_opf & REQ_PREFLUSH)) {
67                         bio_endio(bio);
68                         return BLK_QC_T_NONE;
69                 }
70         }
71
72         switch (pblk_rw_io(q, pblk, bio)) {
73         case NVM_IO_ERR:
74                 bio_io_error(bio);
75                 break;
76         case NVM_IO_DONE:
77                 bio_endio(bio);
78                 break;
79         }
80
81         return BLK_QC_T_NONE;
82 }
83
84 static size_t pblk_trans_map_size(struct pblk *pblk)
85 {
86         int entry_size = 8;
87
88         if (pblk->addrf_len < 32)
89                 entry_size = 4;
90
91         return entry_size * pblk->rl.nr_secs;
92 }
93
94 #ifdef CONFIG_NVM_DEBUG
95 static u32 pblk_l2p_crc(struct pblk *pblk)
96 {
97         size_t map_size;
98         u32 crc = ~(u32)0;
99
100         map_size = pblk_trans_map_size(pblk);
101         crc = crc32_le(crc, pblk->trans_map, map_size);
102         return crc;
103 }
104 #endif
105
106 static void pblk_l2p_free(struct pblk *pblk)
107 {
108         vfree(pblk->trans_map);
109 }
110
111 static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
112 {
113         struct pblk_line *line = NULL;
114
115         if (factory_init) {
116                 pblk_setup_uuid(pblk);
117         } else {
118                 line = pblk_recov_l2p(pblk);
119                 if (IS_ERR(line)) {
120                         pr_err("pblk: could not recover l2p table\n");
121                         return -EFAULT;
122                 }
123         }
124
125 #ifdef CONFIG_NVM_DEBUG
126         pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
127 #endif
128
129         /* Free full lines directly as GC has not been started yet */
130         pblk_gc_free_full_lines(pblk);
131
132         if (!line) {
133                 /* Configure next line for user data */
134                 line = pblk_line_get_first_data(pblk);
135                 if (!line)
136                         return -EFAULT;
137         }
138
139         return 0;
140 }
141
142 static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
143 {
144         sector_t i;
145         struct ppa_addr ppa;
146         size_t map_size;
147         int ret = 0;
148
149         map_size = pblk_trans_map_size(pblk);
150         pblk->trans_map = vmalloc(map_size);
151         if (!pblk->trans_map)
152                 return -ENOMEM;
153
154         pblk_ppa_set_empty(&ppa);
155
156         for (i = 0; i < pblk->rl.nr_secs; i++)
157                 pblk_trans_map_set(pblk, i, ppa);
158
159         ret = pblk_l2p_recover(pblk, factory_init);
160         if (ret)
161                 vfree(pblk->trans_map);
162
163         return ret;
164 }
165
166 static void pblk_rwb_free(struct pblk *pblk)
167 {
168         if (pblk_rb_tear_down_check(&pblk->rwb))
169                 pr_err("pblk: write buffer error on tear down\n");
170
171         pblk_rb_data_free(&pblk->rwb);
172         vfree(pblk_rb_entries_ref(&pblk->rwb));
173 }
174
175 static int pblk_rwb_init(struct pblk *pblk)
176 {
177         struct nvm_tgt_dev *dev = pblk->dev;
178         struct nvm_geo *geo = &dev->geo;
179         struct pblk_rb_entry *entries;
180         unsigned long nr_entries, buffer_size;
181         unsigned int power_size, power_seg_sz;
182
183         if (write_buffer_size && (write_buffer_size > pblk->pgs_in_buffer))
184                 buffer_size = write_buffer_size;
185         else
186                 buffer_size = pblk->pgs_in_buffer;
187
188         nr_entries = pblk_rb_calculate_size(buffer_size);
189
190         entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
191         if (!entries)
192                 return -ENOMEM;
193
194         power_size = get_count_order(nr_entries);
195         power_seg_sz = get_count_order(geo->csecs);
196
197         return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
198 }
199
200 /* Minimum pages needed within a lun */
201 #define ADDR_POOL_SIZE 64
202
203 static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
204 {
205         struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
206         int power_len;
207
208         /* Re-calculate channel and lun format to adapt to configuration */
209         power_len = get_count_order(geo->num_ch);
210         if (1 << power_len != geo->num_ch) {
211                 pr_err("pblk: supports only power-of-two channel config.\n");
212                 return -EINVAL;
213         }
214         dst->ch_len = power_len;
215
216         power_len = get_count_order(geo->num_lun);
217         if (1 << power_len != geo->num_lun) {
218                 pr_err("pblk: supports only power-of-two LUN config.\n");
219                 return -EINVAL;
220         }
221         dst->lun_len = power_len;
222
223         dst->blk_len = src->blk_len;
224         dst->pg_len = src->pg_len;
225         dst->pln_len = src->pln_len;
226         dst->sec_len = src->sec_len;
227
228         dst->sec_offset = 0;
229         dst->pln_offset = dst->sec_len;
230         dst->ch_offset = dst->pln_offset + dst->pln_len;
231         dst->lun_offset = dst->ch_offset + dst->ch_len;
232         dst->pg_offset = dst->lun_offset + dst->lun_len;
233         dst->blk_offset = dst->pg_offset + dst->pg_len;
234
235         dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
236         dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
237         dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
238         dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
239         dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
240         dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
241
242         return dst->blk_offset + src->blk_len;
243 }
244
245 static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
246                              struct pblk_addrf *udst)
247 {
248         struct nvm_addrf *src = &geo->addrf;
249
250         adst->ch_len = get_count_order(geo->num_ch);
251         adst->lun_len = get_count_order(geo->num_lun);
252         adst->chk_len = src->chk_len;
253         adst->sec_len = src->sec_len;
254
255         adst->sec_offset = 0;
256         adst->ch_offset = adst->sec_len;
257         adst->lun_offset = adst->ch_offset + adst->ch_len;
258         adst->chk_offset = adst->lun_offset + adst->lun_len;
259
260         adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
261         adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
262         adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
263         adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
264
265         udst->sec_stripe = geo->ws_opt;
266         udst->ch_stripe = geo->num_ch;
267         udst->lun_stripe = geo->num_lun;
268
269         udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
270         udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
271
272         return adst->chk_offset + adst->chk_len;
273 }
274
275 static int pblk_set_addrf(struct pblk *pblk)
276 {
277         struct nvm_tgt_dev *dev = pblk->dev;
278         struct nvm_geo *geo = &dev->geo;
279         int mod;
280
281         switch (geo->version) {
282         case NVM_OCSSD_SPEC_12:
283                 div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
284                 if (mod) {
285                         pr_err("pblk: bad configuration of sectors/pages\n");
286                         return -EINVAL;
287                 }
288
289                 pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf);
290                 break;
291         case NVM_OCSSD_SPEC_20:
292                 pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
293                                                                 &pblk->uaddrf);
294                 break;
295         default:
296                 pr_err("pblk: OCSSD revision not supported (%d)\n",
297                                                                 geo->version);
298                 return -EINVAL;
299         }
300
301         return 0;
302 }
303
304 static int pblk_init_global_caches(struct pblk *pblk)
305 {
306         down_write(&pblk_lock);
307         pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
308                                 sizeof(struct pblk_line_ws), 0, 0, NULL);
309         if (!pblk_ws_cache) {
310                 up_write(&pblk_lock);
311                 return -ENOMEM;
312         }
313
314         pblk_rec_cache = kmem_cache_create("pblk_rec",
315                                 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
316         if (!pblk_rec_cache) {
317                 kmem_cache_destroy(pblk_ws_cache);
318                 up_write(&pblk_lock);
319                 return -ENOMEM;
320         }
321
322         pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
323                                 0, 0, NULL);
324         if (!pblk_g_rq_cache) {
325                 kmem_cache_destroy(pblk_ws_cache);
326                 kmem_cache_destroy(pblk_rec_cache);
327                 up_write(&pblk_lock);
328                 return -ENOMEM;
329         }
330
331         pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
332                                 0, 0, NULL);
333         if (!pblk_w_rq_cache) {
334                 kmem_cache_destroy(pblk_ws_cache);
335                 kmem_cache_destroy(pblk_rec_cache);
336                 kmem_cache_destroy(pblk_g_rq_cache);
337                 up_write(&pblk_lock);
338                 return -ENOMEM;
339         }
340         up_write(&pblk_lock);
341
342         return 0;
343 }
344
345 static void pblk_free_global_caches(struct pblk *pblk)
346 {
347         kmem_cache_destroy(pblk_ws_cache);
348         kmem_cache_destroy(pblk_rec_cache);
349         kmem_cache_destroy(pblk_g_rq_cache);
350         kmem_cache_destroy(pblk_w_rq_cache);
351 }
352
353 static int pblk_core_init(struct pblk *pblk)
354 {
355         struct nvm_tgt_dev *dev = pblk->dev;
356         struct nvm_geo *geo = &dev->geo;
357         int ret, max_write_ppas;
358
359         atomic64_set(&pblk->user_wa, 0);
360         atomic64_set(&pblk->pad_wa, 0);
361         atomic64_set(&pblk->gc_wa, 0);
362         pblk->user_rst_wa = 0;
363         pblk->pad_rst_wa = 0;
364         pblk->gc_rst_wa = 0;
365
366         atomic64_set(&pblk->nr_flush, 0);
367         pblk->nr_flush_rst = 0;
368
369         pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns;
370
371         pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
372         max_write_ppas = pblk->min_write_pgs * geo->all_luns;
373         pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
374         pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
375
376         if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
377                 pr_err("pblk: vector list too big(%u > %u)\n",
378                                 pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
379                 return -EINVAL;
380         }
381
382         pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t),
383                                                                 GFP_KERNEL);
384         if (!pblk->pad_dist)
385                 return -ENOMEM;
386
387         if (pblk_init_global_caches(pblk))
388                 goto fail_free_pad_dist;
389
390         /* Internal bios can be at most the sectors signaled by the device. */
391         ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
392         if (ret)
393                 goto free_global_caches;
394
395         ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
396                                      pblk_ws_cache);
397         if (ret)
398                 goto free_page_bio_pool;
399
400         ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
401                                      pblk_rec_cache);
402         if (ret)
403                 goto free_gen_ws_pool;
404
405         ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
406                                      pblk_g_rq_cache);
407         if (ret)
408                 goto free_rec_pool;
409
410         ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
411                                      pblk_g_rq_cache);
412         if (ret)
413                 goto free_r_rq_pool;
414
415         ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
416                                      pblk_w_rq_cache);
417         if (ret)
418                 goto free_e_rq_pool;
419
420         pblk->close_wq = alloc_workqueue("pblk-close-wq",
421                         WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
422         if (!pblk->close_wq)
423                 goto free_w_rq_pool;
424
425         pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
426                         WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
427         if (!pblk->bb_wq)
428                 goto free_close_wq;
429
430         pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
431                         WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
432         if (!pblk->r_end_wq)
433                 goto free_bb_wq;
434
435         if (pblk_set_addrf(pblk))
436                 goto free_r_end_wq;
437
438         INIT_LIST_HEAD(&pblk->compl_list);
439         INIT_LIST_HEAD(&pblk->resubmit_list);
440
441         return 0;
442
443 free_r_end_wq:
444         destroy_workqueue(pblk->r_end_wq);
445 free_bb_wq:
446         destroy_workqueue(pblk->bb_wq);
447 free_close_wq:
448         destroy_workqueue(pblk->close_wq);
449 free_w_rq_pool:
450         mempool_exit(&pblk->w_rq_pool);
451 free_e_rq_pool:
452         mempool_exit(&pblk->e_rq_pool);
453 free_r_rq_pool:
454         mempool_exit(&pblk->r_rq_pool);
455 free_rec_pool:
456         mempool_exit(&pblk->rec_pool);
457 free_gen_ws_pool:
458         mempool_exit(&pblk->gen_ws_pool);
459 free_page_bio_pool:
460         mempool_exit(&pblk->page_bio_pool);
461 free_global_caches:
462         pblk_free_global_caches(pblk);
463 fail_free_pad_dist:
464         kfree(pblk->pad_dist);
465         return -ENOMEM;
466 }
467
468 static void pblk_core_free(struct pblk *pblk)
469 {
470         if (pblk->close_wq)
471                 destroy_workqueue(pblk->close_wq);
472
473         if (pblk->r_end_wq)
474                 destroy_workqueue(pblk->r_end_wq);
475
476         if (pblk->bb_wq)
477                 destroy_workqueue(pblk->bb_wq);
478
479         mempool_exit(&pblk->page_bio_pool);
480         mempool_exit(&pblk->gen_ws_pool);
481         mempool_exit(&pblk->rec_pool);
482         mempool_exit(&pblk->r_rq_pool);
483         mempool_exit(&pblk->e_rq_pool);
484         mempool_exit(&pblk->w_rq_pool);
485
486         pblk_free_global_caches(pblk);
487         kfree(pblk->pad_dist);
488 }
489
490 static void pblk_line_mg_free(struct pblk *pblk)
491 {
492         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
493         int i;
494
495         kfree(l_mg->bb_template);
496         kfree(l_mg->bb_aux);
497         kfree(l_mg->vsc_list);
498
499         for (i = 0; i < PBLK_DATA_LINES; i++) {
500                 kfree(l_mg->sline_meta[i]);
501                 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
502                 kfree(l_mg->eline_meta[i]);
503         }
504 }
505
506 static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
507                                 struct pblk_line *line)
508 {
509         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
510
511         kfree(line->blk_bitmap);
512         kfree(line->erase_bitmap);
513         kfree(line->chks);
514
515         pblk_mfree(w_err_gc->lba_list, l_mg->emeta_alloc_type);
516         kfree(w_err_gc);
517 }
518
519 static void pblk_lines_free(struct pblk *pblk)
520 {
521         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
522         struct pblk_line *line;
523         int i;
524
525         spin_lock(&l_mg->free_lock);
526         for (i = 0; i < l_mg->nr_lines; i++) {
527                 line = &pblk->lines[i];
528
529                 pblk_line_free(line);
530                 pblk_line_meta_free(l_mg, line);
531         }
532         spin_unlock(&l_mg->free_lock);
533
534         pblk_line_mg_free(pblk);
535
536         kfree(pblk->luns);
537         kfree(pblk->lines);
538 }
539
540 static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
541                            u8 *blks, int nr_blks)
542 {
543         struct ppa_addr ppa;
544         int ret;
545
546         ppa.ppa = 0;
547         ppa.g.ch = rlun->bppa.g.ch;
548         ppa.g.lun = rlun->bppa.g.lun;
549
550         ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
551         if (ret)
552                 return ret;
553
554         nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
555         if (nr_blks < 0)
556                 return -EIO;
557
558         return 0;
559 }
560
561 static void *pblk_bb_get_meta(struct pblk *pblk)
562 {
563         struct nvm_tgt_dev *dev = pblk->dev;
564         struct nvm_geo *geo = &dev->geo;
565         u8 *meta;
566         int i, nr_blks, blk_per_lun;
567         int ret;
568
569         blk_per_lun = geo->num_chk * geo->pln_mode;
570         nr_blks = blk_per_lun * geo->all_luns;
571
572         meta = kmalloc(nr_blks, GFP_KERNEL);
573         if (!meta)
574                 return ERR_PTR(-ENOMEM);
575
576         for (i = 0; i < geo->all_luns; i++) {
577                 struct pblk_lun *rlun = &pblk->luns[i];
578                 u8 *meta_pos = meta + i * blk_per_lun;
579
580                 ret = pblk_bb_get_tbl(dev, rlun, meta_pos, blk_per_lun);
581                 if (ret) {
582                         kfree(meta);
583                         return ERR_PTR(-EIO);
584                 }
585         }
586
587         return meta;
588 }
589
590 static void *pblk_chunk_get_meta(struct pblk *pblk)
591 {
592         struct nvm_tgt_dev *dev = pblk->dev;
593         struct nvm_geo *geo = &dev->geo;
594
595         if (geo->version == NVM_OCSSD_SPEC_12)
596                 return pblk_bb_get_meta(pblk);
597         else
598                 return pblk_chunk_get_info(pblk);
599 }
600
601 static int pblk_luns_init(struct pblk *pblk)
602 {
603         struct nvm_tgt_dev *dev = pblk->dev;
604         struct nvm_geo *geo = &dev->geo;
605         struct pblk_lun *rlun;
606         int i;
607
608         /* TODO: Implement unbalanced LUN support */
609         if (geo->num_lun < 0) {
610                 pr_err("pblk: unbalanced LUN config.\n");
611                 return -EINVAL;
612         }
613
614         pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
615                                                                 GFP_KERNEL);
616         if (!pblk->luns)
617                 return -ENOMEM;
618
619         for (i = 0; i < geo->all_luns; i++) {
620                 /* Stripe across channels */
621                 int ch = i % geo->num_ch;
622                 int lun_raw = i / geo->num_ch;
623                 int lunid = lun_raw + ch * geo->num_lun;
624
625                 rlun = &pblk->luns[i];
626                 rlun->bppa = dev->luns[lunid];
627
628                 sema_init(&rlun->wr_sem, 1);
629         }
630
631         return 0;
632 }
633
634 /* See comment over struct line_emeta definition */
635 static unsigned int calc_emeta_len(struct pblk *pblk)
636 {
637         struct pblk_line_meta *lm = &pblk->lm;
638         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
639         struct nvm_tgt_dev *dev = pblk->dev;
640         struct nvm_geo *geo = &dev->geo;
641
642         /* Round to sector size so that lba_list starts on its own sector */
643         lm->emeta_sec[1] = DIV_ROUND_UP(
644                         sizeof(struct line_emeta) + lm->blk_bitmap_len +
645                         sizeof(struct wa_counters), geo->csecs);
646         lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
647
648         /* Round to sector size so that vsc_list starts on its own sector */
649         lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
650         lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
651                         geo->csecs);
652         lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
653
654         lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
655                         geo->csecs);
656         lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
657
658         lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
659
660         return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
661 }
662
663 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
664 {
665         struct nvm_tgt_dev *dev = pblk->dev;
666         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
667         struct pblk_line_meta *lm = &pblk->lm;
668         struct nvm_geo *geo = &dev->geo;
669         sector_t provisioned;
670         int sec_meta, blk_meta;
671
672         if (geo->op == NVM_TARGET_DEFAULT_OP)
673                 pblk->op = PBLK_DEFAULT_OP;
674         else
675                 pblk->op = geo->op;
676
677         provisioned = nr_free_blks;
678         provisioned *= (100 - pblk->op);
679         sector_div(provisioned, 100);
680
681         pblk->op_blks = nr_free_blks - provisioned;
682
683         /* Internally pblk manages all free blocks, but all calculations based
684          * on user capacity consider only provisioned blocks
685          */
686         pblk->rl.total_blocks = nr_free_blks;
687         pblk->rl.nr_secs = nr_free_blks * geo->clba;
688
689         /* Consider sectors used for metadata */
690         sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
691         blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
692
693         pblk->capacity = (provisioned - blk_meta) * geo->clba;
694
695         atomic_set(&pblk->rl.free_blocks, nr_free_blks);
696         atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
697 }
698
699 static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line,
700                                    void *chunk_meta)
701 {
702         struct nvm_tgt_dev *dev = pblk->dev;
703         struct nvm_geo *geo = &dev->geo;
704         struct pblk_line_meta *lm = &pblk->lm;
705         int i, chk_per_lun, nr_bad_chks = 0;
706
707         chk_per_lun = geo->num_chk * geo->pln_mode;
708
709         for (i = 0; i < lm->blk_per_line; i++) {
710                 struct pblk_lun *rlun = &pblk->luns[i];
711                 struct nvm_chk_meta *chunk;
712                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
713                 u8 *lun_bb_meta = chunk_meta + pos * chk_per_lun;
714
715                 chunk = &line->chks[pos];
716
717                 /*
718                  * In 1.2 spec. chunk state is not persisted by the device. Thus
719                  * some of the values are reset each time pblk is instantiated.
720                  */
721                 if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
722                         chunk->state =  NVM_CHK_ST_FREE;
723                 else
724                         chunk->state = NVM_CHK_ST_OFFLINE;
725
726                 chunk->type = NVM_CHK_TP_W_SEQ;
727                 chunk->wi = 0;
728                 chunk->slba = -1;
729                 chunk->cnlb = geo->clba;
730                 chunk->wp = 0;
731
732                 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
733                         continue;
734
735                 set_bit(pos, line->blk_bitmap);
736                 nr_bad_chks++;
737         }
738
739         return nr_bad_chks;
740 }
741
742 static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
743                                    struct nvm_chk_meta *meta)
744 {
745         struct nvm_tgt_dev *dev = pblk->dev;
746         struct nvm_geo *geo = &dev->geo;
747         struct pblk_line_meta *lm = &pblk->lm;
748         int i, nr_bad_chks = 0;
749
750         for (i = 0; i < lm->blk_per_line; i++) {
751                 struct pblk_lun *rlun = &pblk->luns[i];
752                 struct nvm_chk_meta *chunk;
753                 struct nvm_chk_meta *chunk_meta;
754                 struct ppa_addr ppa;
755                 int pos;
756
757                 ppa = rlun->bppa;
758                 pos = pblk_ppa_to_pos(geo, ppa);
759                 chunk = &line->chks[pos];
760
761                 ppa.m.chk = line->id;
762                 chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
763
764                 chunk->state = chunk_meta->state;
765                 chunk->type = chunk_meta->type;
766                 chunk->wi = chunk_meta->wi;
767                 chunk->slba = chunk_meta->slba;
768                 chunk->cnlb = chunk_meta->cnlb;
769                 chunk->wp = chunk_meta->wp;
770
771                 if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
772                         WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
773                         continue;
774                 }
775
776                 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
777                         continue;
778
779                 set_bit(pos, line->blk_bitmap);
780                 nr_bad_chks++;
781         }
782
783         return nr_bad_chks;
784 }
785
786 static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
787                                  void *chunk_meta, int line_id)
788 {
789         struct nvm_tgt_dev *dev = pblk->dev;
790         struct nvm_geo *geo = &dev->geo;
791         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
792         struct pblk_line_meta *lm = &pblk->lm;
793         long nr_bad_chks, chk_in_line;
794
795         line->pblk = pblk;
796         line->id = line_id;
797         line->type = PBLK_LINETYPE_FREE;
798         line->state = PBLK_LINESTATE_NEW;
799         line->gc_group = PBLK_LINEGC_NONE;
800         line->vsc = &l_mg->vsc_list[line_id];
801         spin_lock_init(&line->lock);
802
803         if (geo->version == NVM_OCSSD_SPEC_12)
804                 nr_bad_chks = pblk_setup_line_meta_12(pblk, line, chunk_meta);
805         else
806                 nr_bad_chks = pblk_setup_line_meta_20(pblk, line, chunk_meta);
807
808         chk_in_line = lm->blk_per_line - nr_bad_chks;
809         if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
810                                         chk_in_line < lm->min_blk_line) {
811                 line->state = PBLK_LINESTATE_BAD;
812                 list_add_tail(&line->list, &l_mg->bad_list);
813                 return 0;
814         }
815
816         atomic_set(&line->blk_in_line, chk_in_line);
817         list_add_tail(&line->list, &l_mg->free_list);
818         l_mg->nr_free_lines++;
819
820         return chk_in_line;
821 }
822
823 static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
824 {
825         struct pblk_line_meta *lm = &pblk->lm;
826
827         line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
828         if (!line->blk_bitmap)
829                 return -ENOMEM;
830
831         line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
832         if (!line->erase_bitmap)
833                 goto free_blk_bitmap;
834
835
836         line->chks = kmalloc(lm->blk_per_line * sizeof(struct nvm_chk_meta),
837                                                                 GFP_KERNEL);
838         if (!line->chks)
839                 goto free_erase_bitmap;
840
841         line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
842         if (!line->w_err_gc)
843                 goto free_chks;
844
845         return 0;
846
847 free_chks:
848         kfree(line->chks);
849 free_erase_bitmap:
850         kfree(line->erase_bitmap);
851 free_blk_bitmap:
852         kfree(line->blk_bitmap);
853         return -ENOMEM;
854 }
855
856 static int pblk_line_mg_init(struct pblk *pblk)
857 {
858         struct nvm_tgt_dev *dev = pblk->dev;
859         struct nvm_geo *geo = &dev->geo;
860         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
861         struct pblk_line_meta *lm = &pblk->lm;
862         int i, bb_distance;
863
864         l_mg->nr_lines = geo->num_chk;
865         l_mg->log_line = l_mg->data_line = NULL;
866         l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
867         l_mg->nr_free_lines = 0;
868         bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
869
870         INIT_LIST_HEAD(&l_mg->free_list);
871         INIT_LIST_HEAD(&l_mg->corrupt_list);
872         INIT_LIST_HEAD(&l_mg->bad_list);
873         INIT_LIST_HEAD(&l_mg->gc_full_list);
874         INIT_LIST_HEAD(&l_mg->gc_high_list);
875         INIT_LIST_HEAD(&l_mg->gc_mid_list);
876         INIT_LIST_HEAD(&l_mg->gc_low_list);
877         INIT_LIST_HEAD(&l_mg->gc_empty_list);
878         INIT_LIST_HEAD(&l_mg->gc_werr_list);
879
880         INIT_LIST_HEAD(&l_mg->emeta_list);
881
882         l_mg->gc_lists[0] = &l_mg->gc_werr_list;
883         l_mg->gc_lists[1] = &l_mg->gc_high_list;
884         l_mg->gc_lists[2] = &l_mg->gc_mid_list;
885         l_mg->gc_lists[3] = &l_mg->gc_low_list;
886
887         spin_lock_init(&l_mg->free_lock);
888         spin_lock_init(&l_mg->close_lock);
889         spin_lock_init(&l_mg->gc_lock);
890
891         l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
892         if (!l_mg->vsc_list)
893                 goto fail;
894
895         l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
896         if (!l_mg->bb_template)
897                 goto fail_free_vsc_list;
898
899         l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
900         if (!l_mg->bb_aux)
901                 goto fail_free_bb_template;
902
903         /* smeta is always small enough to fit on a kmalloc memory allocation,
904          * emeta depends on the number of LUNs allocated to the pblk instance
905          */
906         for (i = 0; i < PBLK_DATA_LINES; i++) {
907                 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
908                 if (!l_mg->sline_meta[i])
909                         goto fail_free_smeta;
910         }
911
912         /* emeta allocates three different buffers for managing metadata with
913          * in-memory and in-media layouts
914          */
915         for (i = 0; i < PBLK_DATA_LINES; i++) {
916                 struct pblk_emeta *emeta;
917
918                 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
919                 if (!emeta)
920                         goto fail_free_emeta;
921
922                 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
923                         l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
924
925                         emeta->buf = vmalloc(lm->emeta_len[0]);
926                         if (!emeta->buf) {
927                                 kfree(emeta);
928                                 goto fail_free_emeta;
929                         }
930
931                         emeta->nr_entries = lm->emeta_sec[0];
932                         l_mg->eline_meta[i] = emeta;
933                 } else {
934                         l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
935
936                         emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
937                         if (!emeta->buf) {
938                                 kfree(emeta);
939                                 goto fail_free_emeta;
940                         }
941
942                         emeta->nr_entries = lm->emeta_sec[0];
943                         l_mg->eline_meta[i] = emeta;
944                 }
945         }
946
947         for (i = 0; i < l_mg->nr_lines; i++)
948                 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
949
950         bb_distance = (geo->all_luns) * geo->ws_opt;
951         for (i = 0; i < lm->sec_per_line; i += bb_distance)
952                 bitmap_set(l_mg->bb_template, i, geo->ws_opt);
953
954         return 0;
955
956 fail_free_emeta:
957         while (--i >= 0) {
958                 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
959                         vfree(l_mg->eline_meta[i]->buf);
960                 else
961                         kfree(l_mg->eline_meta[i]->buf);
962                 kfree(l_mg->eline_meta[i]);
963         }
964 fail_free_smeta:
965         for (i = 0; i < PBLK_DATA_LINES; i++)
966                 kfree(l_mg->sline_meta[i]);
967         kfree(l_mg->bb_aux);
968 fail_free_bb_template:
969         kfree(l_mg->bb_template);
970 fail_free_vsc_list:
971         kfree(l_mg->vsc_list);
972 fail:
973         return -ENOMEM;
974 }
975
976 static int pblk_line_meta_init(struct pblk *pblk)
977 {
978         struct nvm_tgt_dev *dev = pblk->dev;
979         struct nvm_geo *geo = &dev->geo;
980         struct pblk_line_meta *lm = &pblk->lm;
981         unsigned int smeta_len, emeta_len;
982         int i;
983
984         lm->sec_per_line = geo->clba * geo->all_luns;
985         lm->blk_per_line = geo->all_luns;
986         lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
987         lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
988         lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
989         lm->mid_thrs = lm->sec_per_line / 2;
990         lm->high_thrs = lm->sec_per_line / 4;
991         lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
992
993         /* Calculate necessary pages for smeta. See comment over struct
994          * line_smeta definition
995          */
996         i = 1;
997 add_smeta_page:
998         lm->smeta_sec = i * geo->ws_opt;
999         lm->smeta_len = lm->smeta_sec * geo->csecs;
1000
1001         smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
1002         if (smeta_len > lm->smeta_len) {
1003                 i++;
1004                 goto add_smeta_page;
1005         }
1006
1007         /* Calculate necessary pages for emeta. See comment over struct
1008          * line_emeta definition
1009          */
1010         i = 1;
1011 add_emeta_page:
1012         lm->emeta_sec[0] = i * geo->ws_opt;
1013         lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
1014
1015         emeta_len = calc_emeta_len(pblk);
1016         if (emeta_len > lm->emeta_len[0]) {
1017                 i++;
1018                 goto add_emeta_page;
1019         }
1020
1021         lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
1022
1023         lm->min_blk_line = 1;
1024         if (geo->all_luns > 1)
1025                 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
1026                                         lm->emeta_sec[0], geo->clba);
1027
1028         if (lm->min_blk_line > lm->blk_per_line) {
1029                 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
1030                                                         lm->blk_per_line);
1031                 return -EINVAL;
1032         }
1033
1034         return 0;
1035 }
1036
1037 static int pblk_lines_init(struct pblk *pblk)
1038 {
1039         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1040         struct pblk_line *line;
1041         void *chunk_meta;
1042         long nr_free_chks = 0;
1043         int i, ret;
1044
1045         ret = pblk_line_meta_init(pblk);
1046         if (ret)
1047                 return ret;
1048
1049         ret = pblk_line_mg_init(pblk);
1050         if (ret)
1051                 return ret;
1052
1053         ret = pblk_luns_init(pblk);
1054         if (ret)
1055                 goto fail_free_meta;
1056
1057         chunk_meta = pblk_chunk_get_meta(pblk);
1058         if (IS_ERR(chunk_meta)) {
1059                 ret = PTR_ERR(chunk_meta);
1060                 goto fail_free_luns;
1061         }
1062
1063         pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
1064                                                                 GFP_KERNEL);
1065         if (!pblk->lines) {
1066                 ret = -ENOMEM;
1067                 goto fail_free_chunk_meta;
1068         }
1069
1070         for (i = 0; i < l_mg->nr_lines; i++) {
1071                 line = &pblk->lines[i];
1072
1073                 ret = pblk_alloc_line_meta(pblk, line);
1074                 if (ret)
1075                         goto fail_free_lines;
1076
1077                 nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
1078         }
1079
1080         if (!nr_free_chks) {
1081                 pr_err("pblk: too many bad blocks prevent for sane instance\n");
1082                 return -EINTR;
1083         }
1084
1085         pblk_set_provision(pblk, nr_free_chks);
1086
1087         kfree(chunk_meta);
1088         return 0;
1089
1090 fail_free_lines:
1091         while (--i >= 0)
1092                 pblk_line_meta_free(l_mg, &pblk->lines[i]);
1093         kfree(pblk->lines);
1094 fail_free_chunk_meta:
1095         kfree(chunk_meta);
1096 fail_free_luns:
1097         kfree(pblk->luns);
1098 fail_free_meta:
1099         pblk_line_mg_free(pblk);
1100
1101         return ret;
1102 }
1103
1104 static int pblk_writer_init(struct pblk *pblk)
1105 {
1106         pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
1107         if (IS_ERR(pblk->writer_ts)) {
1108                 int err = PTR_ERR(pblk->writer_ts);
1109
1110                 if (err != -EINTR)
1111                         pr_err("pblk: could not allocate writer kthread (%d)\n",
1112                                         err);
1113                 return err;
1114         }
1115
1116         timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
1117         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
1118
1119         return 0;
1120 }
1121
1122 static void pblk_writer_stop(struct pblk *pblk)
1123 {
1124         /* The pipeline must be stopped and the write buffer emptied before the
1125          * write thread is stopped
1126          */
1127         WARN(pblk_rb_read_count(&pblk->rwb),
1128                         "Stopping not fully persisted write buffer\n");
1129
1130         WARN(pblk_rb_sync_count(&pblk->rwb),
1131                         "Stopping not fully synced write buffer\n");
1132
1133         del_timer_sync(&pblk->wtimer);
1134         if (pblk->writer_ts)
1135                 kthread_stop(pblk->writer_ts);
1136 }
1137
1138 static void pblk_free(struct pblk *pblk)
1139 {
1140         pblk_lines_free(pblk);
1141         pblk_l2p_free(pblk);
1142         pblk_rwb_free(pblk);
1143         pblk_core_free(pblk);
1144
1145         kfree(pblk);
1146 }
1147
1148 static void pblk_tear_down(struct pblk *pblk, bool graceful)
1149 {
1150         if (graceful)
1151                 __pblk_pipeline_flush(pblk);
1152         __pblk_pipeline_stop(pblk);
1153         pblk_writer_stop(pblk);
1154         pblk_rb_sync_l2p(&pblk->rwb);
1155         pblk_rl_free(&pblk->rl);
1156
1157         pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful);
1158 }
1159
1160 static void pblk_exit(void *private, bool graceful)
1161 {
1162         struct pblk *pblk = private;
1163
1164         down_write(&pblk_lock);
1165         pblk_gc_exit(pblk, graceful);
1166         pblk_tear_down(pblk, graceful);
1167
1168 #ifdef CONFIG_NVM_DEBUG
1169         pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
1170 #endif
1171
1172         pblk_free(pblk);
1173         up_write(&pblk_lock);
1174 }
1175
1176 static sector_t pblk_capacity(void *private)
1177 {
1178         struct pblk *pblk = private;
1179
1180         return pblk->capacity * NR_PHY_IN_LOG;
1181 }
1182
1183 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1184                        int flags)
1185 {
1186         struct nvm_geo *geo = &dev->geo;
1187         struct request_queue *bqueue = dev->q;
1188         struct request_queue *tqueue = tdisk->queue;
1189         struct pblk *pblk;
1190         int ret;
1191
1192         /* pblk supports 1.2 and 2.0 versions */
1193         if (!(geo->version == NVM_OCSSD_SPEC_12 ||
1194                                         geo->version == NVM_OCSSD_SPEC_20)) {
1195                 pr_err("pblk: OCSSD version not supported (%u)\n",
1196                                                         geo->version);
1197                 return ERR_PTR(-EINVAL);
1198         }
1199
1200         if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
1201                 pr_err("pblk: host-side L2P table not supported. (%x)\n",
1202                                                         geo->dom);
1203                 return ERR_PTR(-EINVAL);
1204         }
1205
1206         pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1207         if (!pblk)
1208                 return ERR_PTR(-ENOMEM);
1209
1210         pblk->dev = dev;
1211         pblk->disk = tdisk;
1212         pblk->state = PBLK_STATE_RUNNING;
1213         pblk->gc.gc_enabled = 0;
1214
1215         spin_lock_init(&pblk->resubmit_lock);
1216         spin_lock_init(&pblk->trans_lock);
1217         spin_lock_init(&pblk->lock);
1218
1219 #ifdef CONFIG_NVM_DEBUG
1220         atomic_long_set(&pblk->inflight_writes, 0);
1221         atomic_long_set(&pblk->padded_writes, 0);
1222         atomic_long_set(&pblk->padded_wb, 0);
1223         atomic_long_set(&pblk->req_writes, 0);
1224         atomic_long_set(&pblk->sub_writes, 0);
1225         atomic_long_set(&pblk->sync_writes, 0);
1226         atomic_long_set(&pblk->inflight_reads, 0);
1227         atomic_long_set(&pblk->cache_reads, 0);
1228         atomic_long_set(&pblk->sync_reads, 0);
1229         atomic_long_set(&pblk->recov_writes, 0);
1230         atomic_long_set(&pblk->recov_writes, 0);
1231         atomic_long_set(&pblk->recov_gc_writes, 0);
1232         atomic_long_set(&pblk->recov_gc_reads, 0);
1233 #endif
1234
1235         atomic_long_set(&pblk->read_failed, 0);
1236         atomic_long_set(&pblk->read_empty, 0);
1237         atomic_long_set(&pblk->read_high_ecc, 0);
1238         atomic_long_set(&pblk->read_failed_gc, 0);
1239         atomic_long_set(&pblk->write_failed, 0);
1240         atomic_long_set(&pblk->erase_failed, 0);
1241
1242         ret = pblk_core_init(pblk);
1243         if (ret) {
1244                 pr_err("pblk: could not initialize core\n");
1245                 goto fail;
1246         }
1247
1248         ret = pblk_lines_init(pblk);
1249         if (ret) {
1250                 pr_err("pblk: could not initialize lines\n");
1251                 goto fail_free_core;
1252         }
1253
1254         ret = pblk_rwb_init(pblk);
1255         if (ret) {
1256                 pr_err("pblk: could not initialize write buffer\n");
1257                 goto fail_free_lines;
1258         }
1259
1260         ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
1261         if (ret) {
1262                 pr_err("pblk: could not initialize maps\n");
1263                 goto fail_free_rwb;
1264         }
1265
1266         ret = pblk_writer_init(pblk);
1267         if (ret) {
1268                 if (ret != -EINTR)
1269                         pr_err("pblk: could not initialize write thread\n");
1270                 goto fail_free_l2p;
1271         }
1272
1273         ret = pblk_gc_init(pblk);
1274         if (ret) {
1275                 pr_err("pblk: could not initialize gc\n");
1276                 goto fail_stop_writer;
1277         }
1278
1279         /* inherit the size from the underlying device */
1280         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1281         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1282
1283         blk_queue_write_cache(tqueue, true, false);
1284
1285         tqueue->limits.discard_granularity = geo->clba * geo->csecs;
1286         tqueue->limits.discard_alignment = 0;
1287         blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1288         blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1289
1290         pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1291                         tdisk->disk_name,
1292                         geo->all_luns, pblk->l_mg.nr_lines,
1293                         (unsigned long long)pblk->rl.nr_secs,
1294                         pblk->rwb.nr_entries);
1295
1296         wake_up_process(pblk->writer_ts);
1297
1298         /* Check if we need to start GC */
1299         pblk_gc_should_kick(pblk);
1300
1301         return pblk;
1302
1303 fail_stop_writer:
1304         pblk_writer_stop(pblk);
1305 fail_free_l2p:
1306         pblk_l2p_free(pblk);
1307 fail_free_rwb:
1308         pblk_rwb_free(pblk);
1309 fail_free_lines:
1310         pblk_lines_free(pblk);
1311 fail_free_core:
1312         pblk_core_free(pblk);
1313 fail:
1314         kfree(pblk);
1315         return ERR_PTR(ret);
1316 }
1317
1318 /* physical block device target */
1319 static struct nvm_tgt_type tt_pblk = {
1320         .name           = "pblk",
1321         .version        = {1, 0, 0},
1322
1323         .make_rq        = pblk_make_rq,
1324         .capacity       = pblk_capacity,
1325
1326         .init           = pblk_init,
1327         .exit           = pblk_exit,
1328
1329         .sysfs_init     = pblk_sysfs_init,
1330         .sysfs_exit     = pblk_sysfs_exit,
1331         .owner          = THIS_MODULE,
1332 };
1333
1334 static int __init pblk_module_init(void)
1335 {
1336         int ret;
1337
1338         ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
1339         if (ret)
1340                 return ret;
1341         ret = nvm_register_tgt_type(&tt_pblk);
1342         if (ret)
1343                 bioset_exit(&pblk_bio_set);
1344         return ret;
1345 }
1346
1347 static void pblk_module_exit(void)
1348 {
1349         bioset_exit(&pblk_bio_set);
1350         nvm_unregister_tgt_type(&tt_pblk);
1351 }
1352
1353 module_init(pblk_module_init);
1354 module_exit(pblk_module_exit);
1355 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1356 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1357 MODULE_LICENSE("GPL v2");
1358 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");