2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Implementation of a physical block-device target for Open-channel SSDs.
18 * pblk-init.c - pblk's initialization.
23 static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
25 static DECLARE_RWSEM(pblk_lock);
26 struct bio_set *pblk_bio_set;
28 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
33 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34 * constraint. Writes can be of arbitrary size.
36 if (bio_data_dir(bio) == READ) {
37 blk_queue_split(q, &bio);
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
45 /* Prevent deadlock in the case of a modest LUN configuration and large
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O.
49 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
50 blk_queue_split(q, &bio);
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
55 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
57 struct pblk *pblk = q->queuedata;
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
67 switch (pblk_rw_io(q, pblk, bio)) {
79 static size_t pblk_trans_map_size(struct pblk *pblk)
83 if (pblk->ppaf_bitsize < 32)
86 return entry_size * pblk->rl.nr_secs;
89 #ifdef CONFIG_NVM_DEBUG
90 static u32 pblk_l2p_crc(struct pblk *pblk)
95 map_size = pblk_trans_map_size(pblk);
96 crc = crc32_le(crc, pblk->trans_map, map_size);
101 static void pblk_l2p_free(struct pblk *pblk)
103 vfree(pblk->trans_map);
106 static int pblk_l2p_init(struct pblk *pblk)
112 map_size = pblk_trans_map_size(pblk);
113 pblk->trans_map = vmalloc(map_size);
114 if (!pblk->trans_map)
117 pblk_ppa_set_empty(&ppa);
119 for (i = 0; i < pblk->rl.nr_secs; i++)
120 pblk_trans_map_set(pblk, i, ppa);
125 static void pblk_rwb_free(struct pblk *pblk)
127 if (pblk_rb_tear_down_check(&pblk->rwb))
128 pr_err("pblk: write buffer error on tear down\n");
130 pblk_rb_data_free(&pblk->rwb);
131 vfree(pblk_rb_entries_ref(&pblk->rwb));
134 static int pblk_rwb_init(struct pblk *pblk)
136 struct nvm_tgt_dev *dev = pblk->dev;
137 struct nvm_geo *geo = &dev->geo;
138 struct pblk_rb_entry *entries;
139 unsigned long nr_entries;
140 unsigned int power_size, power_seg_sz;
142 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
144 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
148 power_size = get_count_order(nr_entries);
149 power_seg_sz = get_count_order(geo->sec_size);
151 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
154 /* Minimum pages needed within a lun */
155 #define ADDR_POOL_SIZE 64
157 static int pblk_set_ppaf(struct pblk *pblk)
159 struct nvm_tgt_dev *dev = pblk->dev;
160 struct nvm_geo *geo = &dev->geo;
161 struct nvm_addr_format ppaf = geo->ppaf;
164 /* Re-calculate channel and lun format to adapt to configuration */
165 power_len = get_count_order(geo->nr_chnls);
166 if (1 << power_len != geo->nr_chnls) {
167 pr_err("pblk: supports only power-of-two channel config.\n");
170 ppaf.ch_len = power_len;
172 power_len = get_count_order(geo->nr_luns);
173 if (1 << power_len != geo->nr_luns) {
174 pr_err("pblk: supports only power-of-two LUN config.\n");
177 ppaf.lun_len = power_len;
179 pblk->ppaf.sec_offset = 0;
180 pblk->ppaf.pln_offset = ppaf.sect_len;
181 pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
182 pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
183 pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
184 pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
185 pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
186 pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
187 pblk->ppaf.pln_offset;
188 pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
189 pblk->ppaf.ch_offset;
190 pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
191 pblk->ppaf.lun_offset;
192 pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
193 pblk->ppaf.pg_offset;
194 pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
195 pblk->ppaf.blk_offset;
197 pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
202 static int pblk_init_global_caches(struct pblk *pblk)
204 down_write(&pblk_lock);
205 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
206 sizeof(struct pblk_line_ws), 0, 0, NULL);
207 if (!pblk_ws_cache) {
208 up_write(&pblk_lock);
212 pblk_rec_cache = kmem_cache_create("pblk_rec",
213 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
214 if (!pblk_rec_cache) {
215 kmem_cache_destroy(pblk_ws_cache);
216 up_write(&pblk_lock);
220 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
222 if (!pblk_g_rq_cache) {
223 kmem_cache_destroy(pblk_ws_cache);
224 kmem_cache_destroy(pblk_rec_cache);
225 up_write(&pblk_lock);
229 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
231 if (!pblk_w_rq_cache) {
232 kmem_cache_destroy(pblk_ws_cache);
233 kmem_cache_destroy(pblk_rec_cache);
234 kmem_cache_destroy(pblk_g_rq_cache);
235 up_write(&pblk_lock);
238 up_write(&pblk_lock);
243 static void pblk_free_global_caches(struct pblk *pblk)
245 kmem_cache_destroy(pblk_ws_cache);
246 kmem_cache_destroy(pblk_rec_cache);
247 kmem_cache_destroy(pblk_g_rq_cache);
248 kmem_cache_destroy(pblk_w_rq_cache);
251 static int pblk_core_init(struct pblk *pblk)
253 struct nvm_tgt_dev *dev = pblk->dev;
254 struct nvm_geo *geo = &dev->geo;
256 pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
257 geo->nr_planes * geo->all_luns;
259 if (pblk_init_global_caches(pblk))
262 /* Internal bios can be at most the sectors signaled by the device. */
263 pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
265 if (!pblk->page_bio_pool)
266 goto free_global_caches;
268 pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
270 if (!pblk->gen_ws_pool)
271 goto free_page_bio_pool;
273 pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
276 goto free_gen_ws_pool;
278 pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
280 if (!pblk->r_rq_pool)
283 pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
285 if (!pblk->e_rq_pool)
288 pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
290 if (!pblk->w_rq_pool)
293 pblk->close_wq = alloc_workqueue("pblk-close-wq",
294 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
298 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
299 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
303 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
304 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
308 if (pblk_set_ppaf(pblk))
311 if (pblk_rwb_init(pblk))
314 INIT_LIST_HEAD(&pblk->compl_list);
318 destroy_workqueue(pblk->r_end_wq);
320 destroy_workqueue(pblk->bb_wq);
322 destroy_workqueue(pblk->close_wq);
324 mempool_destroy(pblk->w_rq_pool);
326 mempool_destroy(pblk->e_rq_pool);
328 mempool_destroy(pblk->r_rq_pool);
330 mempool_destroy(pblk->rec_pool);
332 mempool_destroy(pblk->gen_ws_pool);
334 mempool_destroy(pblk->page_bio_pool);
336 pblk_free_global_caches(pblk);
340 static void pblk_core_free(struct pblk *pblk)
343 destroy_workqueue(pblk->close_wq);
346 destroy_workqueue(pblk->r_end_wq);
349 destroy_workqueue(pblk->bb_wq);
351 mempool_destroy(pblk->page_bio_pool);
352 mempool_destroy(pblk->gen_ws_pool);
353 mempool_destroy(pblk->rec_pool);
354 mempool_destroy(pblk->r_rq_pool);
355 mempool_destroy(pblk->e_rq_pool);
356 mempool_destroy(pblk->w_rq_pool);
360 pblk_free_global_caches(pblk);
363 static void pblk_luns_free(struct pblk *pblk)
368 static void pblk_line_mg_free(struct pblk *pblk)
370 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
373 kfree(l_mg->bb_template);
375 kfree(l_mg->vsc_list);
377 for (i = 0; i < PBLK_DATA_LINES; i++) {
378 kfree(l_mg->sline_meta[i]);
379 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
380 kfree(l_mg->eline_meta[i]);
386 static void pblk_line_meta_free(struct pblk_line *line)
388 kfree(line->blk_bitmap);
389 kfree(line->erase_bitmap);
392 static void pblk_lines_free(struct pblk *pblk)
394 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
395 struct pblk_line *line;
398 spin_lock(&l_mg->free_lock);
399 for (i = 0; i < l_mg->nr_lines; i++) {
400 line = &pblk->lines[i];
402 pblk_line_free(pblk, line);
403 pblk_line_meta_free(line);
405 spin_unlock(&l_mg->free_lock);
408 static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
409 u8 *blks, int nr_blks)
415 ppa.g.ch = rlun->bppa.g.ch;
416 ppa.g.lun = rlun->bppa.g.lun;
418 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
422 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
429 static void *pblk_bb_get_log(struct pblk *pblk)
431 struct nvm_tgt_dev *dev = pblk->dev;
432 struct nvm_geo *geo = &dev->geo;
434 int i, nr_blks, blk_per_lun;
437 blk_per_lun = geo->nr_chks * geo->plane_mode;
438 nr_blks = blk_per_lun * geo->all_luns;
440 log = kmalloc(nr_blks, GFP_KERNEL);
442 return ERR_PTR(-ENOMEM);
444 for (i = 0; i < geo->all_luns; i++) {
445 struct pblk_lun *rlun = &pblk->luns[i];
446 u8 *log_pos = log + i * blk_per_lun;
448 ret = pblk_bb_get_tbl(dev, rlun, log_pos, blk_per_lun);
451 return ERR_PTR(-EIO);
458 static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
459 u8 *bb_log, int blk_per_line)
461 struct nvm_tgt_dev *dev = pblk->dev;
462 struct nvm_geo *geo = &dev->geo;
465 for (i = 0; i < blk_per_line; i++) {
466 struct pblk_lun *rlun = &pblk->luns[i];
467 u8 *lun_bb_log = bb_log + i * blk_per_line;
469 if (lun_bb_log[line->id] == NVM_BLK_T_FREE)
472 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
479 static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
481 struct nvm_tgt_dev *dev = pblk->dev;
482 struct nvm_geo *geo = &dev->geo;
483 struct pblk_lun *rlun;
486 /* TODO: Implement unbalanced LUN support */
487 if (geo->nr_luns < 0) {
488 pr_err("pblk: unbalanced LUN config.\n");
492 pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
497 for (i = 0; i < geo->all_luns; i++) {
498 /* Stripe across channels */
499 int ch = i % geo->nr_chnls;
500 int lun_raw = i / geo->nr_chnls;
501 int lunid = lun_raw + ch * geo->nr_luns;
503 rlun = &pblk->luns[i];
504 rlun->bppa = luns[lunid];
506 sema_init(&rlun->wr_sem, 1);
512 static int pblk_lines_configure(struct pblk *pblk, int flags)
514 struct pblk_line *line = NULL;
517 if (!(flags & NVM_TARGET_FACTORY)) {
518 line = pblk_recov_l2p(pblk);
520 pr_err("pblk: could not recover l2p table\n");
525 #ifdef CONFIG_NVM_DEBUG
526 pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
529 /* Free full lines directly as GC has not been started yet */
530 pblk_gc_free_full_lines(pblk);
533 /* Configure next line for user data */
534 line = pblk_line_get_first_data(pblk);
536 pr_err("pblk: line list corrupted\n");
544 /* See comment over struct line_emeta definition */
545 static unsigned int calc_emeta_len(struct pblk *pblk)
547 struct pblk_line_meta *lm = &pblk->lm;
548 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
549 struct nvm_tgt_dev *dev = pblk->dev;
550 struct nvm_geo *geo = &dev->geo;
552 /* Round to sector size so that lba_list starts on its own sector */
553 lm->emeta_sec[1] = DIV_ROUND_UP(
554 sizeof(struct line_emeta) + lm->blk_bitmap_len +
555 sizeof(struct wa_counters), geo->sec_size);
556 lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
558 /* Round to sector size so that vsc_list starts on its own sector */
559 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
560 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
562 lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
564 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
566 lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
568 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
570 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
573 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
575 struct nvm_tgt_dev *dev = pblk->dev;
576 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
577 struct pblk_line_meta *lm = &pblk->lm;
578 struct nvm_geo *geo = &dev->geo;
579 sector_t provisioned;
580 int sec_meta, blk_meta;
582 if (geo->op == NVM_TARGET_DEFAULT_OP)
583 pblk->op = PBLK_DEFAULT_OP;
587 provisioned = nr_free_blks;
588 provisioned *= (100 - pblk->op);
589 sector_div(provisioned, 100);
591 pblk->op_blks = nr_free_blks - provisioned;
593 /* Internally pblk manages all free blocks, but all calculations based
594 * on user capacity consider only provisioned blocks
596 pblk->rl.total_blocks = nr_free_blks;
597 pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
599 /* Consider sectors used for metadata */
600 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
601 blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
603 pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
605 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
606 atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
609 static int pblk_lines_alloc_metadata(struct pblk *pblk)
611 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
612 struct pblk_line_meta *lm = &pblk->lm;
615 /* smeta is always small enough to fit on a kmalloc memory allocation,
616 * emeta depends on the number of LUNs allocated to the pblk instance
618 for (i = 0; i < PBLK_DATA_LINES; i++) {
619 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
620 if (!l_mg->sline_meta[i])
621 goto fail_free_smeta;
624 /* emeta allocates three different buffers for managing metadata with
625 * in-memory and in-media layouts
627 for (i = 0; i < PBLK_DATA_LINES; i++) {
628 struct pblk_emeta *emeta;
630 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
632 goto fail_free_emeta;
634 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
635 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
637 emeta->buf = vmalloc(lm->emeta_len[0]);
640 goto fail_free_emeta;
643 emeta->nr_entries = lm->emeta_sec[0];
644 l_mg->eline_meta[i] = emeta;
646 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
648 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
651 goto fail_free_emeta;
654 emeta->nr_entries = lm->emeta_sec[0];
655 l_mg->eline_meta[i] = emeta;
659 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
661 goto fail_free_emeta;
663 for (i = 0; i < l_mg->nr_lines; i++)
664 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
670 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
671 vfree(l_mg->eline_meta[i]->buf);
673 kfree(l_mg->eline_meta[i]->buf);
674 kfree(l_mg->eline_meta[i]);
678 for (i = 0; i < PBLK_DATA_LINES; i++)
679 kfree(l_mg->sline_meta[i]);
684 static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
685 void *chunk_log, long *nr_bad_blks)
687 struct pblk_line_meta *lm = &pblk->lm;
689 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
690 if (!line->blk_bitmap)
693 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
694 if (!line->erase_bitmap) {
695 kfree(line->blk_bitmap);
699 *nr_bad_blks = pblk_bb_line(pblk, line, chunk_log, lm->blk_per_line);
704 static int pblk_lines_init(struct pblk *pblk)
706 struct nvm_tgt_dev *dev = pblk->dev;
707 struct nvm_geo *geo = &dev->geo;
708 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
709 struct pblk_line_meta *lm = &pblk->lm;
710 struct pblk_line *line;
712 unsigned int smeta_len, emeta_len;
713 long nr_bad_blks = 0, nr_free_blks = 0;
714 int bb_distance, max_write_ppas, mod;
717 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
718 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
719 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
720 max_write_ppas : nvm_max_phys_sects(dev);
721 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
723 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
724 pr_err("pblk: cannot support device max_phys_sect\n");
728 div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
730 pr_err("pblk: bad configuration of sectors/pages\n");
734 l_mg->nr_lines = geo->nr_chks;
735 l_mg->log_line = l_mg->data_line = NULL;
736 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
737 l_mg->nr_free_lines = 0;
738 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
740 lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
741 lm->blk_per_line = geo->all_luns;
742 lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
743 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
744 lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
745 lm->mid_thrs = lm->sec_per_line / 2;
746 lm->high_thrs = lm->sec_per_line / 4;
747 lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
749 /* Calculate necessary pages for smeta. See comment over struct
750 * line_smeta definition
754 lm->smeta_sec = i * geo->sec_per_pl;
755 lm->smeta_len = lm->smeta_sec * geo->sec_size;
757 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
758 if (smeta_len > lm->smeta_len) {
763 /* Calculate necessary pages for emeta. See comment over struct
764 * line_emeta definition
768 lm->emeta_sec[0] = i * geo->sec_per_pl;
769 lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
771 emeta_len = calc_emeta_len(pblk);
772 if (emeta_len > lm->emeta_len[0]) {
777 lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
779 lm->min_blk_line = 1;
780 if (geo->all_luns > 1)
781 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
782 lm->emeta_sec[0], geo->sec_per_chk);
784 if (lm->min_blk_line > lm->blk_per_line) {
785 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
790 ret = pblk_lines_alloc_metadata(pblk);
794 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
795 if (!l_mg->bb_template) {
800 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
803 goto fail_free_bb_template;
806 bb_distance = (geo->all_luns) * geo->sec_per_pl;
807 for (i = 0; i < lm->sec_per_line; i += bb_distance)
808 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
810 INIT_LIST_HEAD(&l_mg->free_list);
811 INIT_LIST_HEAD(&l_mg->corrupt_list);
812 INIT_LIST_HEAD(&l_mg->bad_list);
813 INIT_LIST_HEAD(&l_mg->gc_full_list);
814 INIT_LIST_HEAD(&l_mg->gc_high_list);
815 INIT_LIST_HEAD(&l_mg->gc_mid_list);
816 INIT_LIST_HEAD(&l_mg->gc_low_list);
817 INIT_LIST_HEAD(&l_mg->gc_empty_list);
819 INIT_LIST_HEAD(&l_mg->emeta_list);
821 l_mg->gc_lists[0] = &l_mg->gc_high_list;
822 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
823 l_mg->gc_lists[2] = &l_mg->gc_low_list;
825 spin_lock_init(&l_mg->free_lock);
826 spin_lock_init(&l_mg->close_lock);
827 spin_lock_init(&l_mg->gc_lock);
829 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
833 goto fail_free_bb_aux;
836 chunk_log = pblk_bb_get_log(pblk);
837 if (IS_ERR(chunk_log)) {
838 pr_err("pblk: could not get bad block log (%lu)\n",
840 ret = PTR_ERR(chunk_log);
841 goto fail_free_bb_aux;
844 for (i = 0; i < l_mg->nr_lines; i++) {
847 line = &pblk->lines[i];
851 line->type = PBLK_LINETYPE_FREE;
852 line->state = PBLK_LINESTATE_FREE;
853 line->gc_group = PBLK_LINEGC_NONE;
854 line->vsc = &l_mg->vsc_list[i];
855 spin_lock_init(&line->lock);
857 ret = pblk_setup_line_meta(pblk, line, chunk_log, &nr_bad_blks);
859 goto fail_free_chunk_log;
861 chk_in_line = lm->blk_per_line - nr_bad_blks;
862 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line ||
863 chk_in_line < lm->min_blk_line) {
864 line->state = PBLK_LINESTATE_BAD;
865 list_add_tail(&line->list, &l_mg->bad_list);
869 nr_free_blks += chk_in_line;
870 atomic_set(&line->blk_in_line, chk_in_line);
872 l_mg->nr_free_lines++;
873 list_add_tail(&line->list, &l_mg->free_list);
876 pblk_set_provision(pblk, nr_free_blks);
884 pblk_line_meta_free(&pblk->lines[i]);
887 fail_free_bb_template:
888 kfree(l_mg->bb_template);
890 pblk_line_mg_free(pblk);
895 static int pblk_writer_init(struct pblk *pblk)
897 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
898 if (IS_ERR(pblk->writer_ts)) {
899 int err = PTR_ERR(pblk->writer_ts);
902 pr_err("pblk: could not allocate writer kthread (%d)\n",
907 timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
908 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
913 static void pblk_writer_stop(struct pblk *pblk)
915 /* The pipeline must be stopped and the write buffer emptied before the
916 * write thread is stopped
918 WARN(pblk_rb_read_count(&pblk->rwb),
919 "Stopping not fully persisted write buffer\n");
921 WARN(pblk_rb_sync_count(&pblk->rwb),
922 "Stopping not fully synced write buffer\n");
924 del_timer_sync(&pblk->wtimer);
926 kthread_stop(pblk->writer_ts);
929 static void pblk_free(struct pblk *pblk)
931 pblk_luns_free(pblk);
932 pblk_lines_free(pblk);
933 kfree(pblk->pad_dist);
934 pblk_line_mg_free(pblk);
935 pblk_core_free(pblk);
941 static void pblk_tear_down(struct pblk *pblk)
943 pblk_pipeline_stop(pblk);
944 pblk_writer_stop(pblk);
945 pblk_rb_sync_l2p(&pblk->rwb);
946 pblk_rl_free(&pblk->rl);
948 pr_debug("pblk: consistent tear down\n");
951 static void pblk_exit(void *private)
953 struct pblk *pblk = private;
955 down_write(&pblk_lock);
957 pblk_tear_down(pblk);
959 #ifdef CONFIG_NVM_DEBUG
960 pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
964 up_write(&pblk_lock);
967 static sector_t pblk_capacity(void *private)
969 struct pblk *pblk = private;
971 return pblk->capacity * NR_PHY_IN_LOG;
974 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
977 struct nvm_geo *geo = &dev->geo;
978 struct request_queue *bqueue = dev->q;
979 struct request_queue *tqueue = tdisk->queue;
983 if (dev->identity.dom & NVM_RSP_L2P) {
984 pr_err("pblk: host-side L2P table not supported. (%x)\n",
986 return ERR_PTR(-EINVAL);
989 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
991 return ERR_PTR(-ENOMEM);
995 pblk->state = PBLK_STATE_RUNNING;
996 pblk->gc.gc_enabled = 0;
998 spin_lock_init(&pblk->trans_lock);
999 spin_lock_init(&pblk->lock);
1001 if (flags & NVM_TARGET_FACTORY)
1002 pblk_setup_uuid(pblk);
1004 atomic64_set(&pblk->user_wa, 0);
1005 atomic64_set(&pblk->pad_wa, 0);
1006 atomic64_set(&pblk->gc_wa, 0);
1007 pblk->user_rst_wa = 0;
1008 pblk->pad_rst_wa = 0;
1009 pblk->gc_rst_wa = 0;
1011 atomic64_set(&pblk->nr_flush, 0);
1012 pblk->nr_flush_rst = 0;
1014 #ifdef CONFIG_NVM_DEBUG
1015 atomic_long_set(&pblk->inflight_writes, 0);
1016 atomic_long_set(&pblk->padded_writes, 0);
1017 atomic_long_set(&pblk->padded_wb, 0);
1018 atomic_long_set(&pblk->req_writes, 0);
1019 atomic_long_set(&pblk->sub_writes, 0);
1020 atomic_long_set(&pblk->sync_writes, 0);
1021 atomic_long_set(&pblk->inflight_reads, 0);
1022 atomic_long_set(&pblk->cache_reads, 0);
1023 atomic_long_set(&pblk->sync_reads, 0);
1024 atomic_long_set(&pblk->recov_writes, 0);
1025 atomic_long_set(&pblk->recov_writes, 0);
1026 atomic_long_set(&pblk->recov_gc_writes, 0);
1027 atomic_long_set(&pblk->recov_gc_reads, 0);
1030 atomic_long_set(&pblk->read_failed, 0);
1031 atomic_long_set(&pblk->read_empty, 0);
1032 atomic_long_set(&pblk->read_high_ecc, 0);
1033 atomic_long_set(&pblk->read_failed_gc, 0);
1034 atomic_long_set(&pblk->write_failed, 0);
1035 atomic_long_set(&pblk->erase_failed, 0);
1037 ret = pblk_luns_init(pblk, dev->luns);
1039 pr_err("pblk: could not initialize luns\n");
1043 ret = pblk_lines_init(pblk);
1045 pr_err("pblk: could not initialize lines\n");
1046 goto fail_free_luns;
1049 pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t),
1051 if (!pblk->pad_dist) {
1053 goto fail_free_line_meta;
1056 ret = pblk_core_init(pblk);
1058 pr_err("pblk: could not initialize core\n");
1059 goto fail_free_pad_dist;
1062 ret = pblk_l2p_init(pblk);
1064 pr_err("pblk: could not initialize maps\n");
1065 goto fail_free_core;
1068 ret = pblk_lines_configure(pblk, flags);
1070 pr_err("pblk: could not configure lines\n");
1074 ret = pblk_writer_init(pblk);
1077 pr_err("pblk: could not initialize write thread\n");
1078 goto fail_free_lines;
1081 ret = pblk_gc_init(pblk);
1083 pr_err("pblk: could not initialize gc\n");
1084 goto fail_stop_writer;
1087 /* inherit the size from the underlying device */
1088 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1089 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1091 blk_queue_write_cache(tqueue, true, false);
1093 tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
1094 tqueue->limits.discard_alignment = 0;
1095 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1096 blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1098 pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1100 geo->all_luns, pblk->l_mg.nr_lines,
1101 (unsigned long long)pblk->rl.nr_secs,
1102 pblk->rwb.nr_entries);
1104 wake_up_process(pblk->writer_ts);
1106 /* Check if we need to start GC */
1107 pblk_gc_should_kick(pblk);
1112 pblk_writer_stop(pblk);
1114 pblk_lines_free(pblk);
1116 pblk_l2p_free(pblk);
1118 pblk_core_free(pblk);
1120 kfree(pblk->pad_dist);
1121 fail_free_line_meta:
1122 pblk_line_mg_free(pblk);
1124 pblk_luns_free(pblk);
1127 return ERR_PTR(ret);
1130 /* physical block device target */
1131 static struct nvm_tgt_type tt_pblk = {
1133 .version = {1, 0, 0},
1135 .make_rq = pblk_make_rq,
1136 .capacity = pblk_capacity,
1141 .sysfs_init = pblk_sysfs_init,
1142 .sysfs_exit = pblk_sysfs_exit,
1143 .owner = THIS_MODULE,
1146 static int __init pblk_module_init(void)
1150 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1153 ret = nvm_register_tgt_type(&tt_pblk);
1155 bioset_free(pblk_bio_set);
1159 static void pblk_module_exit(void)
1161 bioset_free(pblk_bio_set);
1162 nvm_unregister_tgt_type(&tt_pblk);
1165 module_init(pblk_module_init);
1166 module_exit(pblk_module_exit);
1167 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1168 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1169 MODULE_LICENSE("GPL v2");
1170 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");