1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
4 * Copyright (C) 2016 CNEX Labs
5 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
6 * Matias Bjorling <matias@cnexlabs.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * Implementation of a physical block-device target for Open-channel SSDs.
19 * pblk-init.c - pblk's initialization.
23 #include "pblk-trace.h"
25 static unsigned int write_buffer_size;
27 module_param(write_buffer_size, uint, 0644);
28 MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
30 struct pblk_global_caches {
31 struct kmem_cache *ws;
32 struct kmem_cache *rec;
33 struct kmem_cache *g_rq;
34 struct kmem_cache *w_rq;
38 struct mutex mutex; /* Ensures consistency between
43 static struct pblk_global_caches pblk_caches = {
44 .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
48 struct bio_set pblk_bio_set;
50 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
52 struct pblk *pblk = q->queuedata;
54 if (bio_op(bio) == REQ_OP_DISCARD) {
55 pblk_discard(pblk, bio);
56 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
63 * constraint. Writes can be of arbitrary size.
65 if (bio_data_dir(bio) == READ) {
66 blk_queue_split(q, &bio);
67 pblk_submit_read(pblk, bio);
69 /* Prevent deadlock in the case of a modest LUN configuration
70 * and large user I/Os. Unless stalled, the rate limiter
71 * leaves at least 256KB available for user I/O.
73 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
74 blk_queue_split(q, &bio);
76 pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
82 static size_t pblk_trans_map_size(struct pblk *pblk)
86 if (pblk->addrf_len < 32)
89 return entry_size * pblk->capacity;
92 #ifdef CONFIG_NVM_PBLK_DEBUG
93 static u32 pblk_l2p_crc(struct pblk *pblk)
98 map_size = pblk_trans_map_size(pblk);
99 crc = crc32_le(crc, pblk->trans_map, map_size);
104 static void pblk_l2p_free(struct pblk *pblk)
106 vfree(pblk->trans_map);
109 static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
111 struct pblk_line *line = NULL;
114 guid_gen(&pblk->instance_uuid);
116 line = pblk_recov_l2p(pblk);
118 pblk_err(pblk, "could not recover l2p table\n");
123 #ifdef CONFIG_NVM_PBLK_DEBUG
124 pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
127 /* Free full lines directly as GC has not been started yet */
128 pblk_gc_free_full_lines(pblk);
131 /* Configure next line for user data */
132 line = pblk_line_get_first_data(pblk);
140 static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
147 map_size = pblk_trans_map_size(pblk);
148 pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
149 __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
150 if (!pblk->trans_map) {
151 pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
156 pblk_ppa_set_empty(&ppa);
158 for (i = 0; i < pblk->capacity; i++)
159 pblk_trans_map_set(pblk, i, ppa);
161 ret = pblk_l2p_recover(pblk, factory_init);
163 vfree(pblk->trans_map);
168 static void pblk_rwb_free(struct pblk *pblk)
170 if (pblk_rb_tear_down_check(&pblk->rwb))
171 pblk_err(pblk, "write buffer error on tear down\n");
173 pblk_rb_free(&pblk->rwb);
176 static int pblk_rwb_init(struct pblk *pblk)
178 struct nvm_tgt_dev *dev = pblk->dev;
179 struct nvm_geo *geo = &dev->geo;
180 unsigned long buffer_size;
181 int pgs_in_buffer, threshold;
183 threshold = geo->mw_cunits * geo->all_luns;
184 pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
187 if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
188 buffer_size = write_buffer_size;
190 buffer_size = pgs_in_buffer;
192 return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
195 static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
196 struct nvm_addrf_12 *dst)
198 struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
201 /* Re-calculate channel and lun format to adapt to configuration */
202 power_len = get_count_order(geo->num_ch);
203 if (1 << power_len != geo->num_ch) {
204 pblk_err(pblk, "supports only power-of-two channel config.\n");
207 dst->ch_len = power_len;
209 power_len = get_count_order(geo->num_lun);
210 if (1 << power_len != geo->num_lun) {
211 pblk_err(pblk, "supports only power-of-two LUN config.\n");
214 dst->lun_len = power_len;
216 dst->blk_len = src->blk_len;
217 dst->pg_len = src->pg_len;
218 dst->pln_len = src->pln_len;
219 dst->sec_len = src->sec_len;
222 dst->pln_offset = dst->sec_len;
223 dst->ch_offset = dst->pln_offset + dst->pln_len;
224 dst->lun_offset = dst->ch_offset + dst->ch_len;
225 dst->pg_offset = dst->lun_offset + dst->lun_len;
226 dst->blk_offset = dst->pg_offset + dst->pg_len;
228 dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
229 dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
230 dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
231 dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
232 dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
233 dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
235 return dst->blk_offset + src->blk_len;
238 static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
239 struct pblk_addrf *udst)
241 struct nvm_addrf *src = &geo->addrf;
243 adst->ch_len = get_count_order(geo->num_ch);
244 adst->lun_len = get_count_order(geo->num_lun);
245 adst->chk_len = src->chk_len;
246 adst->sec_len = src->sec_len;
248 adst->sec_offset = 0;
249 adst->ch_offset = adst->sec_len;
250 adst->lun_offset = adst->ch_offset + adst->ch_len;
251 adst->chk_offset = adst->lun_offset + adst->lun_len;
253 adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
254 adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
255 adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
256 adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
258 udst->sec_stripe = geo->ws_opt;
259 udst->ch_stripe = geo->num_ch;
260 udst->lun_stripe = geo->num_lun;
262 udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
263 udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
265 return adst->chk_offset + adst->chk_len;
268 static int pblk_set_addrf(struct pblk *pblk)
270 struct nvm_tgt_dev *dev = pblk->dev;
271 struct nvm_geo *geo = &dev->geo;
274 switch (geo->version) {
275 case NVM_OCSSD_SPEC_12:
276 div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
278 pblk_err(pblk, "bad configuration of sectors/pages\n");
282 pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
283 (void *)&pblk->addrf);
285 case NVM_OCSSD_SPEC_20:
286 pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
290 pblk_err(pblk, "OCSSD revision not supported (%d)\n",
298 static int pblk_create_global_caches(void)
301 pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
302 sizeof(struct pblk_line_ws), 0, 0, NULL);
306 pblk_caches.rec = kmem_cache_create("pblk_rec",
307 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
308 if (!pblk_caches.rec)
309 goto fail_destroy_ws;
311 pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
313 if (!pblk_caches.g_rq)
314 goto fail_destroy_rec;
316 pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
318 if (!pblk_caches.w_rq)
319 goto fail_destroy_g_rq;
324 kmem_cache_destroy(pblk_caches.g_rq);
326 kmem_cache_destroy(pblk_caches.rec);
328 kmem_cache_destroy(pblk_caches.ws);
333 static int pblk_get_global_caches(void)
337 mutex_lock(&pblk_caches.mutex);
339 if (kref_get_unless_zero(&pblk_caches.kref))
342 ret = pblk_create_global_caches();
344 kref_init(&pblk_caches.kref);
347 mutex_unlock(&pblk_caches.mutex);
351 static void pblk_destroy_global_caches(struct kref *ref)
353 struct pblk_global_caches *c;
355 c = container_of(ref, struct pblk_global_caches, kref);
357 kmem_cache_destroy(c->ws);
358 kmem_cache_destroy(c->rec);
359 kmem_cache_destroy(c->g_rq);
360 kmem_cache_destroy(c->w_rq);
363 static void pblk_put_global_caches(void)
365 mutex_lock(&pblk_caches.mutex);
366 kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
367 mutex_unlock(&pblk_caches.mutex);
370 static int pblk_core_init(struct pblk *pblk)
372 struct nvm_tgt_dev *dev = pblk->dev;
373 struct nvm_geo *geo = &dev->geo;
374 int ret, max_write_ppas;
376 atomic64_set(&pblk->user_wa, 0);
377 atomic64_set(&pblk->pad_wa, 0);
378 atomic64_set(&pblk->gc_wa, 0);
379 pblk->user_rst_wa = 0;
380 pblk->pad_rst_wa = 0;
383 atomic64_set(&pblk->nr_flush, 0);
384 pblk->nr_flush_rst = 0;
386 pblk->min_write_pgs = geo->ws_opt;
387 pblk->min_write_pgs_data = pblk->min_write_pgs;
388 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
389 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
390 pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
391 queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
392 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
394 pblk->oob_meta_size = geo->sos;
395 if (!pblk_is_oob_meta_supported(pblk)) {
396 /* For drives which does not have OOB metadata feature
397 * in order to support recovery feature we need to use
398 * so called packed metadata. Packed metada will store
399 * the same information as OOB metadata (l2p table mapping,
400 * but in the form of the single page at the end of
401 * every write request.
403 if (pblk->min_write_pgs
404 * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
405 /* We want to keep all the packed metadata on single
406 * page per write requests. So we need to ensure that
409 * This is more like sanity check, since there is
410 * no device with such a big minimal write size
411 * (above 1 metabytes).
413 pblk_err(pblk, "Not supported min write size\n");
416 /* For packed meta approach we do some simplification.
417 * On read path we always issue requests which size
418 * equal to max_write_pgs, with all pages filled with
419 * user payload except of last one page which will be
420 * filled with packed metadata.
422 pblk->max_write_pgs = pblk->min_write_pgs;
423 pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
426 pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
431 if (pblk_get_global_caches())
432 goto fail_free_pad_dist;
434 /* Internal bios can be at most the sectors signaled by the device. */
435 ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
437 goto free_global_caches;
439 ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
442 goto free_page_bio_pool;
444 ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
447 goto free_gen_ws_pool;
449 ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
454 ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
459 ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
464 pblk->close_wq = alloc_workqueue("pblk-close-wq",
465 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
469 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
470 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
474 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
475 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
479 if (pblk_set_addrf(pblk))
482 INIT_LIST_HEAD(&pblk->compl_list);
483 INIT_LIST_HEAD(&pblk->resubmit_list);
488 destroy_workqueue(pblk->r_end_wq);
490 destroy_workqueue(pblk->bb_wq);
492 destroy_workqueue(pblk->close_wq);
494 mempool_exit(&pblk->w_rq_pool);
496 mempool_exit(&pblk->e_rq_pool);
498 mempool_exit(&pblk->r_rq_pool);
500 mempool_exit(&pblk->rec_pool);
502 mempool_exit(&pblk->gen_ws_pool);
504 mempool_exit(&pblk->page_bio_pool);
506 pblk_put_global_caches();
508 kfree(pblk->pad_dist);
512 static void pblk_core_free(struct pblk *pblk)
515 destroy_workqueue(pblk->close_wq);
518 destroy_workqueue(pblk->r_end_wq);
521 destroy_workqueue(pblk->bb_wq);
523 mempool_exit(&pblk->page_bio_pool);
524 mempool_exit(&pblk->gen_ws_pool);
525 mempool_exit(&pblk->rec_pool);
526 mempool_exit(&pblk->r_rq_pool);
527 mempool_exit(&pblk->e_rq_pool);
528 mempool_exit(&pblk->w_rq_pool);
530 pblk_put_global_caches();
531 kfree(pblk->pad_dist);
534 static void pblk_line_mg_free(struct pblk *pblk)
536 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
539 kfree(l_mg->bb_template);
541 kfree(l_mg->vsc_list);
543 for (i = 0; i < PBLK_DATA_LINES; i++) {
544 kfree(l_mg->sline_meta[i]);
545 kvfree(l_mg->eline_meta[i]->buf);
546 kfree(l_mg->eline_meta[i]);
549 mempool_destroy(l_mg->bitmap_pool);
550 kmem_cache_destroy(l_mg->bitmap_cache);
553 static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
554 struct pblk_line *line)
556 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
558 kfree(line->blk_bitmap);
559 kfree(line->erase_bitmap);
562 kvfree(w_err_gc->lba_list);
566 static void pblk_lines_free(struct pblk *pblk)
568 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
569 struct pblk_line *line;
572 for (i = 0; i < l_mg->nr_lines; i++) {
573 line = &pblk->lines[i];
575 pblk_line_free(line);
576 pblk_line_meta_free(l_mg, line);
579 pblk_line_mg_free(pblk);
585 static int pblk_luns_init(struct pblk *pblk)
587 struct nvm_tgt_dev *dev = pblk->dev;
588 struct nvm_geo *geo = &dev->geo;
589 struct pblk_lun *rlun;
592 /* TODO: Implement unbalanced LUN support */
593 if (geo->num_lun < 0) {
594 pblk_err(pblk, "unbalanced LUN config.\n");
598 pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
603 for (i = 0; i < geo->all_luns; i++) {
604 /* Stripe across channels */
605 int ch = i % geo->num_ch;
606 int lun_raw = i / geo->num_ch;
607 int lunid = lun_raw + ch * geo->num_lun;
609 rlun = &pblk->luns[i];
610 rlun->bppa = dev->luns[lunid];
612 sema_init(&rlun->wr_sem, 1);
618 /* See comment over struct line_emeta definition */
619 static unsigned int calc_emeta_len(struct pblk *pblk)
621 struct pblk_line_meta *lm = &pblk->lm;
622 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
623 struct nvm_tgt_dev *dev = pblk->dev;
624 struct nvm_geo *geo = &dev->geo;
626 /* Round to sector size so that lba_list starts on its own sector */
627 lm->emeta_sec[1] = DIV_ROUND_UP(
628 sizeof(struct line_emeta) + lm->blk_bitmap_len +
629 sizeof(struct wa_counters), geo->csecs);
630 lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
632 /* Round to sector size so that vsc_list starts on its own sector */
633 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
634 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
636 lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
638 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
640 lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
642 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
644 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
647 static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
649 struct nvm_tgt_dev *dev = pblk->dev;
650 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
651 struct pblk_line_meta *lm = &pblk->lm;
652 struct nvm_geo *geo = &dev->geo;
653 sector_t provisioned;
654 int sec_meta, blk_meta, clba;
657 if (geo->op == NVM_TARGET_DEFAULT_OP)
658 pblk->op = PBLK_DEFAULT_OP;
662 minimum = pblk_get_min_chks(pblk);
663 provisioned = nr_free_chks;
664 provisioned *= (100 - pblk->op);
665 sector_div(provisioned, 100);
667 if ((nr_free_chks - provisioned) < minimum) {
668 if (geo->op != NVM_TARGET_DEFAULT_OP) {
669 pblk_err(pblk, "OP too small to create a sane instance\n");
673 /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
674 * is not enough, calculate and set sane value
677 provisioned = nr_free_chks - minimum;
678 pblk->op = (100 * minimum) / nr_free_chks;
679 pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
683 pblk->op_blks = nr_free_chks - provisioned;
685 /* Internally pblk manages all free blocks, but all calculations based
686 * on user capacity consider only provisioned blocks
688 pblk->rl.total_blocks = nr_free_chks;
690 /* Consider sectors used for metadata */
691 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
692 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
694 clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
695 pblk->capacity = (provisioned - blk_meta) * clba;
697 atomic_set(&pblk->rl.free_blocks, nr_free_chks);
698 atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
703 static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
704 struct nvm_chk_meta *meta)
706 struct nvm_tgt_dev *dev = pblk->dev;
707 struct nvm_geo *geo = &dev->geo;
708 struct pblk_line_meta *lm = &pblk->lm;
709 int i, nr_bad_chks = 0;
711 for (i = 0; i < lm->blk_per_line; i++) {
712 struct pblk_lun *rlun = &pblk->luns[i];
713 struct nvm_chk_meta *chunk;
714 struct nvm_chk_meta *chunk_meta;
719 pos = pblk_ppa_to_pos(geo, ppa);
720 chunk = &line->chks[pos];
722 ppa.m.chk = line->id;
723 chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
725 chunk->state = chunk_meta->state;
726 chunk->type = chunk_meta->type;
727 chunk->wi = chunk_meta->wi;
728 chunk->slba = chunk_meta->slba;
729 chunk->cnlb = chunk_meta->cnlb;
730 chunk->wp = chunk_meta->wp;
732 trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
735 if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
736 WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
740 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
743 set_bit(pos, line->blk_bitmap);
750 static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
751 void *chunk_meta, int line_id)
753 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
754 struct pblk_line_meta *lm = &pblk->lm;
755 long nr_bad_chks, chk_in_line;
759 line->type = PBLK_LINETYPE_FREE;
760 line->state = PBLK_LINESTATE_NEW;
761 line->gc_group = PBLK_LINEGC_NONE;
762 line->vsc = &l_mg->vsc_list[line_id];
763 spin_lock_init(&line->lock);
765 nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
767 chk_in_line = lm->blk_per_line - nr_bad_chks;
768 if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
769 chk_in_line < lm->min_blk_line) {
770 line->state = PBLK_LINESTATE_BAD;
771 list_add_tail(&line->list, &l_mg->bad_list);
775 atomic_set(&line->blk_in_line, chk_in_line);
776 list_add_tail(&line->list, &l_mg->free_list);
777 l_mg->nr_free_lines++;
782 static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
784 struct pblk_line_meta *lm = &pblk->lm;
786 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
787 if (!line->blk_bitmap)
790 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
791 if (!line->erase_bitmap)
792 goto free_blk_bitmap;
795 line->chks = kmalloc_array(lm->blk_per_line,
796 sizeof(struct nvm_chk_meta), GFP_KERNEL);
798 goto free_erase_bitmap;
800 line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
809 kfree(line->erase_bitmap);
811 kfree(line->blk_bitmap);
815 static int pblk_line_mg_init(struct pblk *pblk)
817 struct nvm_tgt_dev *dev = pblk->dev;
818 struct nvm_geo *geo = &dev->geo;
819 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
820 struct pblk_line_meta *lm = &pblk->lm;
823 l_mg->nr_lines = geo->num_chk;
824 l_mg->log_line = l_mg->data_line = NULL;
825 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
826 l_mg->nr_free_lines = 0;
827 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
829 INIT_LIST_HEAD(&l_mg->free_list);
830 INIT_LIST_HEAD(&l_mg->corrupt_list);
831 INIT_LIST_HEAD(&l_mg->bad_list);
832 INIT_LIST_HEAD(&l_mg->gc_full_list);
833 INIT_LIST_HEAD(&l_mg->gc_high_list);
834 INIT_LIST_HEAD(&l_mg->gc_mid_list);
835 INIT_LIST_HEAD(&l_mg->gc_low_list);
836 INIT_LIST_HEAD(&l_mg->gc_empty_list);
837 INIT_LIST_HEAD(&l_mg->gc_werr_list);
839 INIT_LIST_HEAD(&l_mg->emeta_list);
841 l_mg->gc_lists[0] = &l_mg->gc_werr_list;
842 l_mg->gc_lists[1] = &l_mg->gc_high_list;
843 l_mg->gc_lists[2] = &l_mg->gc_mid_list;
844 l_mg->gc_lists[3] = &l_mg->gc_low_list;
846 spin_lock_init(&l_mg->free_lock);
847 spin_lock_init(&l_mg->close_lock);
848 spin_lock_init(&l_mg->gc_lock);
850 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
854 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
855 if (!l_mg->bb_template)
856 goto fail_free_vsc_list;
858 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
860 goto fail_free_bb_template;
862 /* smeta is always small enough to fit on a kmalloc memory allocation,
863 * emeta depends on the number of LUNs allocated to the pblk instance
865 for (i = 0; i < PBLK_DATA_LINES; i++) {
866 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
867 if (!l_mg->sline_meta[i])
868 goto fail_free_smeta;
871 l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
872 lm->sec_bitmap_len, 0, 0, NULL);
873 if (!l_mg->bitmap_cache)
874 goto fail_free_smeta;
876 /* the bitmap pool is used for both valid and map bitmaps */
877 l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
879 if (!l_mg->bitmap_pool)
880 goto fail_destroy_bitmap_cache;
882 /* emeta allocates three different buffers for managing metadata with
883 * in-memory and in-media layouts
885 for (i = 0; i < PBLK_DATA_LINES; i++) {
886 struct pblk_emeta *emeta;
888 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
890 goto fail_free_emeta;
892 emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
895 goto fail_free_emeta;
898 emeta->nr_entries = lm->emeta_sec[0];
899 l_mg->eline_meta[i] = emeta;
902 for (i = 0; i < l_mg->nr_lines; i++)
903 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
905 bb_distance = (geo->all_luns) * geo->ws_opt;
906 for (i = 0; i < lm->sec_per_line; i += bb_distance)
907 bitmap_set(l_mg->bb_template, i, geo->ws_opt);
913 kvfree(l_mg->eline_meta[i]->buf);
914 kfree(l_mg->eline_meta[i]);
917 mempool_destroy(l_mg->bitmap_pool);
918 fail_destroy_bitmap_cache:
919 kmem_cache_destroy(l_mg->bitmap_cache);
921 for (i = 0; i < PBLK_DATA_LINES; i++)
922 kfree(l_mg->sline_meta[i]);
924 fail_free_bb_template:
925 kfree(l_mg->bb_template);
927 kfree(l_mg->vsc_list);
932 static int pblk_line_meta_init(struct pblk *pblk)
934 struct nvm_tgt_dev *dev = pblk->dev;
935 struct nvm_geo *geo = &dev->geo;
936 struct pblk_line_meta *lm = &pblk->lm;
937 unsigned int smeta_len, emeta_len;
940 lm->sec_per_line = geo->clba * geo->all_luns;
941 lm->blk_per_line = geo->all_luns;
942 lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
943 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
944 lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
945 lm->mid_thrs = lm->sec_per_line / 2;
946 lm->high_thrs = lm->sec_per_line / 4;
947 lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
949 /* Calculate necessary pages for smeta. See comment over struct
950 * line_smeta definition
954 lm->smeta_sec = i * geo->ws_opt;
955 lm->smeta_len = lm->smeta_sec * geo->csecs;
957 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
958 if (smeta_len > lm->smeta_len) {
963 /* Calculate necessary pages for emeta. See comment over struct
964 * line_emeta definition
968 lm->emeta_sec[0] = i * geo->ws_opt;
969 lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
971 emeta_len = calc_emeta_len(pblk);
972 if (emeta_len > lm->emeta_len[0]) {
977 lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
979 lm->min_blk_line = 1;
980 if (geo->all_luns > 1)
981 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
982 lm->emeta_sec[0], geo->clba);
984 if (lm->min_blk_line > lm->blk_per_line) {
985 pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
993 static int pblk_lines_init(struct pblk *pblk)
995 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
996 struct pblk_line *line;
998 int nr_free_chks = 0;
1001 ret = pblk_line_meta_init(pblk);
1005 ret = pblk_line_mg_init(pblk);
1009 ret = pblk_luns_init(pblk);
1011 goto fail_free_meta;
1013 chunk_meta = pblk_get_chunk_meta(pblk);
1014 if (IS_ERR(chunk_meta)) {
1015 ret = PTR_ERR(chunk_meta);
1016 goto fail_free_luns;
1019 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
1023 goto fail_free_chunk_meta;
1026 for (i = 0; i < l_mg->nr_lines; i++) {
1027 line = &pblk->lines[i];
1029 ret = pblk_alloc_line_meta(pblk, line);
1031 goto fail_free_lines;
1033 nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
1035 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1039 if (!nr_free_chks) {
1040 pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
1042 goto fail_free_lines;
1045 ret = pblk_set_provision(pblk, nr_free_chks);
1047 goto fail_free_lines;
1054 pblk_line_meta_free(l_mg, &pblk->lines[i]);
1056 fail_free_chunk_meta:
1061 pblk_line_mg_free(pblk);
1066 static int pblk_writer_init(struct pblk *pblk)
1068 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
1069 if (IS_ERR(pblk->writer_ts)) {
1070 int err = PTR_ERR(pblk->writer_ts);
1073 pblk_err(pblk, "could not allocate writer kthread (%d)\n",
1078 timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
1079 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
1084 static void pblk_writer_stop(struct pblk *pblk)
1086 /* The pipeline must be stopped and the write buffer emptied before the
1087 * write thread is stopped
1089 WARN(pblk_rb_read_count(&pblk->rwb),
1090 "Stopping not fully persisted write buffer\n");
1092 WARN(pblk_rb_sync_count(&pblk->rwb),
1093 "Stopping not fully synced write buffer\n");
1095 del_timer_sync(&pblk->wtimer);
1096 if (pblk->writer_ts)
1097 kthread_stop(pblk->writer_ts);
1100 static void pblk_free(struct pblk *pblk)
1102 pblk_lines_free(pblk);
1103 pblk_l2p_free(pblk);
1104 pblk_rwb_free(pblk);
1105 pblk_core_free(pblk);
1110 static void pblk_tear_down(struct pblk *pblk, bool graceful)
1113 __pblk_pipeline_flush(pblk);
1114 __pblk_pipeline_stop(pblk);
1115 pblk_writer_stop(pblk);
1116 pblk_rb_sync_l2p(&pblk->rwb);
1117 pblk_rl_free(&pblk->rl);
1119 pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
1122 static void pblk_exit(void *private, bool graceful)
1124 struct pblk *pblk = private;
1126 pblk_gc_exit(pblk, graceful);
1127 pblk_tear_down(pblk, graceful);
1129 #ifdef CONFIG_NVM_PBLK_DEBUG
1130 pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
1136 static sector_t pblk_capacity(void *private)
1138 struct pblk *pblk = private;
1140 return pblk->capacity * NR_PHY_IN_LOG;
1143 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1146 struct nvm_geo *geo = &dev->geo;
1147 struct request_queue *bqueue = dev->q;
1148 struct request_queue *tqueue = tdisk->queue;
1152 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1154 return ERR_PTR(-ENOMEM);
1158 pblk->state = PBLK_STATE_RUNNING;
1159 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1160 pblk->gc.gc_enabled = 0;
1162 if (!(geo->version == NVM_OCSSD_SPEC_12 ||
1163 geo->version == NVM_OCSSD_SPEC_20)) {
1164 pblk_err(pblk, "OCSSD version not supported (%u)\n",
1167 return ERR_PTR(-EINVAL);
1171 pblk_err(pblk, "extended metadata not supported\n");
1173 return ERR_PTR(-EINVAL);
1176 spin_lock_init(&pblk->resubmit_lock);
1177 spin_lock_init(&pblk->trans_lock);
1178 spin_lock_init(&pblk->lock);
1180 #ifdef CONFIG_NVM_PBLK_DEBUG
1181 atomic_long_set(&pblk->inflight_writes, 0);
1182 atomic_long_set(&pblk->padded_writes, 0);
1183 atomic_long_set(&pblk->padded_wb, 0);
1184 atomic_long_set(&pblk->req_writes, 0);
1185 atomic_long_set(&pblk->sub_writes, 0);
1186 atomic_long_set(&pblk->sync_writes, 0);
1187 atomic_long_set(&pblk->inflight_reads, 0);
1188 atomic_long_set(&pblk->cache_reads, 0);
1189 atomic_long_set(&pblk->sync_reads, 0);
1190 atomic_long_set(&pblk->recov_writes, 0);
1191 atomic_long_set(&pblk->recov_writes, 0);
1192 atomic_long_set(&pblk->recov_gc_writes, 0);
1193 atomic_long_set(&pblk->recov_gc_reads, 0);
1196 atomic_long_set(&pblk->read_failed, 0);
1197 atomic_long_set(&pblk->read_empty, 0);
1198 atomic_long_set(&pblk->read_high_ecc, 0);
1199 atomic_long_set(&pblk->read_failed_gc, 0);
1200 atomic_long_set(&pblk->write_failed, 0);
1201 atomic_long_set(&pblk->erase_failed, 0);
1203 ret = pblk_core_init(pblk);
1205 pblk_err(pblk, "could not initialize core\n");
1209 ret = pblk_lines_init(pblk);
1211 pblk_err(pblk, "could not initialize lines\n");
1212 goto fail_free_core;
1215 ret = pblk_rwb_init(pblk);
1217 pblk_err(pblk, "could not initialize write buffer\n");
1218 goto fail_free_lines;
1221 ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
1223 pblk_err(pblk, "could not initialize maps\n");
1227 ret = pblk_writer_init(pblk);
1230 pblk_err(pblk, "could not initialize write thread\n");
1234 ret = pblk_gc_init(pblk);
1236 pblk_err(pblk, "could not initialize gc\n");
1237 goto fail_stop_writer;
1240 /* inherit the size from the underlying device */
1241 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1242 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1244 blk_queue_write_cache(tqueue, true, false);
1246 tqueue->limits.discard_granularity = geo->clba * geo->csecs;
1247 tqueue->limits.discard_alignment = 0;
1248 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1249 blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1251 pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1252 geo->all_luns, pblk->l_mg.nr_lines,
1253 (unsigned long long)pblk->capacity,
1254 pblk->rwb.nr_entries);
1256 wake_up_process(pblk->writer_ts);
1258 /* Check if we need to start GC */
1259 pblk_gc_should_kick(pblk);
1264 pblk_writer_stop(pblk);
1266 pblk_l2p_free(pblk);
1268 pblk_rwb_free(pblk);
1270 pblk_lines_free(pblk);
1272 pblk_core_free(pblk);
1275 return ERR_PTR(ret);
1278 /* physical block device target */
1279 static struct nvm_tgt_type tt_pblk = {
1281 .version = {1, 0, 0},
1283 .make_rq = pblk_make_rq,
1284 .capacity = pblk_capacity,
1289 .sysfs_init = pblk_sysfs_init,
1290 .sysfs_exit = pblk_sysfs_exit,
1291 .owner = THIS_MODULE,
1294 static int __init pblk_module_init(void)
1298 ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
1301 ret = nvm_register_tgt_type(&tt_pblk);
1303 bioset_exit(&pblk_bio_set);
1307 static void pblk_module_exit(void)
1309 bioset_exit(&pblk_bio_set);
1310 nvm_unregister_tgt_type(&tt_pblk);
1313 module_init(pblk_module_init);
1314 module_exit(pblk_module_exit);
1315 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1316 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1317 MODULE_LICENSE("GPL v2");
1318 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");