1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4 * Initial release: Matias Bjorling <m@bjorling.me>
7 #define pr_fmt(fmt) "nvm: " fmt
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/sem.h>
12 #include <linux/bitmap.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/miscdevice.h>
16 #include <linux/lightnvm.h>
17 #include <linux/sched/sysctl.h>
19 static LIST_HEAD(nvm_tgt_types);
20 static DECLARE_RWSEM(nvm_tgtt_lock);
21 static LIST_HEAD(nvm_devices);
22 static DECLARE_RWSEM(nvm_lock);
24 /* Map between virtual and physical channel and lun */
32 struct nvm_ch_map *chnls;
36 static void nvm_free(struct kref *ref);
38 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
40 struct nvm_target *tgt;
42 list_for_each_entry(tgt, &dev->targets, list)
43 if (!strcmp(name, tgt->disk->disk_name))
49 static bool nvm_target_exists(const char *name)
52 struct nvm_target *tgt;
55 down_write(&nvm_lock);
56 list_for_each_entry(dev, &nvm_devices, devices) {
57 mutex_lock(&dev->mlock);
58 list_for_each_entry(tgt, &dev->targets, list) {
59 if (!strcmp(name, tgt->disk->disk_name)) {
61 mutex_unlock(&dev->mlock);
65 mutex_unlock(&dev->mlock);
73 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
77 for (i = lun_begin; i <= lun_end; i++) {
78 if (test_and_set_bit(i, dev->lun_map)) {
79 pr_err("lun %d already allocated\n", i);
86 while (--i >= lun_begin)
87 clear_bit(i, dev->lun_map);
92 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
97 for (i = lun_begin; i <= lun_end; i++)
98 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
101 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
103 struct nvm_dev *dev = tgt_dev->parent;
104 struct nvm_dev_map *dev_map = tgt_dev->map;
107 for (i = 0; i < dev_map->num_ch; i++) {
108 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
109 int *lun_offs = ch_map->lun_offs;
110 int ch = i + ch_map->ch_off;
113 for (j = 0; j < ch_map->num_lun; j++) {
114 int lun = j + lun_offs[j];
115 int lunid = (ch * dev->geo.num_lun) + lun;
117 WARN_ON(!test_and_clear_bit(lunid,
122 kfree(ch_map->lun_offs);
125 kfree(dev_map->chnls);
128 kfree(tgt_dev->luns);
132 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
133 u16 lun_begin, u16 lun_end,
136 struct nvm_tgt_dev *tgt_dev = NULL;
137 struct nvm_dev_map *dev_rmap = dev->rmap;
138 struct nvm_dev_map *dev_map;
139 struct ppa_addr *luns;
140 int num_lun = lun_end - lun_begin + 1;
141 int luns_left = num_lun;
142 int num_ch = num_lun / dev->geo.num_lun;
143 int num_ch_mod = num_lun % dev->geo.num_lun;
144 int bch = lun_begin / dev->geo.num_lun;
145 int blun = lun_begin % dev->geo.num_lun;
147 int lun_balanced = 1;
148 int sec_per_lun, prev_num_lun;
151 num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
153 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
157 dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
161 luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
165 prev_num_lun = (luns_left > dev->geo.num_lun) ?
166 dev->geo.num_lun : luns_left;
167 for (i = 0; i < num_ch; i++) {
168 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
169 int *lun_roffs = ch_rmap->lun_offs;
170 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
172 int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
173 dev->geo.num_lun : luns_left;
175 if (lun_balanced && prev_num_lun != luns_in_chnl)
178 ch_map->ch_off = ch_rmap->ch_off = bch;
179 ch_map->num_lun = luns_in_chnl;
181 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
185 for (j = 0; j < luns_in_chnl; j++) {
187 luns[lunid].a.ch = i;
188 luns[lunid++].a.lun = j;
191 lun_roffs[j + blun] = blun;
194 ch_map->lun_offs = lun_offs;
196 /* when starting a new channel, lun offset is reset */
198 luns_left -= luns_in_chnl;
201 dev_map->num_ch = num_ch;
203 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
207 /* Inherit device geometry from parent */
208 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
210 /* Target device only owns a portion of the physical device */
211 tgt_dev->geo.num_ch = num_ch;
212 tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
213 tgt_dev->geo.all_luns = num_lun;
214 tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
216 tgt_dev->geo.op = op;
218 sec_per_lun = dev->geo.clba * dev->geo.num_chk;
219 tgt_dev->geo.total_secs = num_lun * sec_per_lun;
222 tgt_dev->map = dev_map;
223 tgt_dev->luns = luns;
224 tgt_dev->parent = dev;
229 kfree(dev_map->chnls[i].lun_offs);
232 kfree(dev_map->chnls);
239 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
241 struct nvm_tgt_type *tt;
243 list_for_each_entry(tt, &nvm_tgt_types, list)
244 if (!strcmp(name, tt->name))
250 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
252 struct nvm_tgt_type *tt;
254 down_write(&nvm_tgtt_lock);
255 tt = __nvm_find_target_type(name);
256 up_write(&nvm_tgtt_lock);
261 static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
264 if (lun_begin > lun_end || lun_end >= geo->all_luns) {
265 pr_err("lun out of bound (%u:%u > %u)\n",
266 lun_begin, lun_end, geo->all_luns - 1);
273 static int __nvm_config_simple(struct nvm_dev *dev,
274 struct nvm_ioctl_create_simple *s)
276 struct nvm_geo *geo = &dev->geo;
278 if (s->lun_begin == -1 && s->lun_end == -1) {
280 s->lun_end = geo->all_luns - 1;
283 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
286 static int __nvm_config_extended(struct nvm_dev *dev,
287 struct nvm_ioctl_create_extended *e)
289 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
291 e->lun_end = dev->geo.all_luns - 1;
294 /* op not set falls into target's default */
295 if (e->op == 0xFFFF) {
296 e->op = NVM_TARGET_DEFAULT_OP;
297 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
298 pr_err("invalid over provisioning value\n");
302 return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
305 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
307 struct nvm_ioctl_create_extended e;
308 struct gendisk *tdisk;
309 struct nvm_tgt_type *tt;
310 struct nvm_target *t;
311 struct nvm_tgt_dev *tgt_dev;
316 switch (create->conf.type) {
317 case NVM_CONFIG_TYPE_SIMPLE:
318 ret = __nvm_config_simple(dev, &create->conf.s);
322 e.lun_begin = create->conf.s.lun_begin;
323 e.lun_end = create->conf.s.lun_end;
324 e.op = NVM_TARGET_DEFAULT_OP;
326 case NVM_CONFIG_TYPE_EXTENDED:
327 ret = __nvm_config_extended(dev, &create->conf.e);
334 pr_err("config type not valid\n");
338 tt = nvm_find_target_type(create->tgttype);
340 pr_err("target type %s not found\n", create->tgttype);
344 if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
345 pr_err("device is incompatible with target L2P type.\n");
349 if (nvm_target_exists(create->tgtname)) {
350 pr_err("target name already exists (%s)\n",
355 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
359 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
365 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
367 pr_err("could not create target device\n");
372 tdisk = blk_alloc_disk(dev->q->node);
378 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
380 tdisk->first_minor = 0;
381 tdisk->fops = tt->bops;
383 targetdata = tt->init(tgt_dev, tdisk, create->flags);
384 if (IS_ERR(targetdata)) {
385 ret = PTR_ERR(targetdata);
389 tdisk->private_data = targetdata;
390 tdisk->queue->queuedata = targetdata;
392 mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
394 mdts = min_t(u32, dev->geo.mdts,
395 (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
397 blk_queue_max_hw_sectors(tdisk->queue, mdts);
399 set_capacity(tdisk, tt->capacity(targetdata));
402 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
411 mutex_lock(&dev->mlock);
412 list_add_tail(&t->list, &dev->targets);
413 mutex_unlock(&dev->mlock);
415 __module_get(tt->owner);
420 tt->exit(targetdata, true);
422 blk_cleanup_disk(tdisk);
424 nvm_remove_tgt_dev(tgt_dev, 0);
428 nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
432 static void __nvm_remove_target(struct nvm_target *t, bool graceful)
434 struct nvm_tgt_type *tt = t->type;
435 struct gendisk *tdisk = t->disk;
440 tt->sysfs_exit(tdisk);
443 tt->exit(tdisk->private_data, graceful);
445 nvm_remove_tgt_dev(t->dev, 1);
446 blk_cleanup_disk(tdisk);
447 module_put(t->type->owner);
454 * nvm_remove_tgt - Removes a target from the media manager
455 * @remove: ioctl structure with target name to remove.
462 static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
464 struct nvm_target *t = NULL;
467 down_read(&nvm_lock);
468 list_for_each_entry(dev, &nvm_devices, devices) {
469 mutex_lock(&dev->mlock);
470 t = nvm_find_target(dev, remove->tgtname);
472 mutex_unlock(&dev->mlock);
475 mutex_unlock(&dev->mlock);
480 pr_err("failed to remove target %s\n",
485 __nvm_remove_target(t, true);
486 kref_put(&dev->ref, nvm_free);
491 static int nvm_register_map(struct nvm_dev *dev)
493 struct nvm_dev_map *rmap;
496 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
500 rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
505 for (i = 0; i < dev->geo.num_ch; i++) {
506 struct nvm_ch_map *ch_rmap;
508 int luns_in_chnl = dev->geo.num_lun;
510 ch_rmap = &rmap->chnls[i];
512 ch_rmap->ch_off = -1;
513 ch_rmap->num_lun = luns_in_chnl;
515 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
519 for (j = 0; j < luns_in_chnl; j++)
522 ch_rmap->lun_offs = lun_roffs;
530 kfree(rmap->chnls[i].lun_offs);
537 static void nvm_unregister_map(struct nvm_dev *dev)
539 struct nvm_dev_map *rmap = dev->rmap;
542 for (i = 0; i < dev->geo.num_ch; i++)
543 kfree(rmap->chnls[i].lun_offs);
549 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
551 struct nvm_dev_map *dev_map = tgt_dev->map;
552 struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
553 int lun_off = ch_map->lun_offs[p->a.lun];
555 p->a.ch += ch_map->ch_off;
559 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
561 struct nvm_dev *dev = tgt_dev->parent;
562 struct nvm_dev_map *dev_rmap = dev->rmap;
563 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
564 int lun_roff = ch_rmap->lun_offs[p->a.lun];
566 p->a.ch -= ch_rmap->ch_off;
567 p->a.lun -= lun_roff;
570 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
571 struct ppa_addr *ppa_list, int nr_ppas)
575 for (i = 0; i < nr_ppas; i++) {
576 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
577 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
581 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
582 struct ppa_addr *ppa_list, int nr_ppas)
586 for (i = 0; i < nr_ppas; i++) {
587 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
588 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
592 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
594 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
596 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
599 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
601 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
603 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
606 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
610 down_write(&nvm_tgtt_lock);
611 if (__nvm_find_target_type(tt->name))
614 list_add(&tt->list, &nvm_tgt_types);
615 up_write(&nvm_tgtt_lock);
619 EXPORT_SYMBOL(nvm_register_tgt_type);
621 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
626 down_write(&nvm_tgtt_lock);
628 up_write(&nvm_tgtt_lock);
630 EXPORT_SYMBOL(nvm_unregister_tgt_type);
632 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
633 dma_addr_t *dma_handler)
635 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
638 EXPORT_SYMBOL(nvm_dev_dma_alloc);
640 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
642 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
644 EXPORT_SYMBOL(nvm_dev_dma_free);
646 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
650 list_for_each_entry(dev, &nvm_devices, devices)
651 if (!strcmp(name, dev->name))
657 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
658 const struct ppa_addr *ppas, int nr_ppas)
660 struct nvm_dev *dev = tgt_dev->parent;
661 struct nvm_geo *geo = &tgt_dev->geo;
662 int i, plane_cnt, pl_idx;
665 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
666 rqd->nr_ppas = nr_ppas;
667 rqd->ppa_addr = ppas[0];
672 rqd->nr_ppas = nr_ppas;
673 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
674 if (!rqd->ppa_list) {
675 pr_err("failed to allocate dma memory\n");
679 plane_cnt = geo->pln_mode;
680 rqd->nr_ppas *= plane_cnt;
682 for (i = 0; i < nr_ppas; i++) {
683 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
686 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
693 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
699 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
702 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
706 if (geo->version == NVM_OCSSD_SPEC_20)
710 flags |= geo->pln_mode >> 1;
712 if (rqd->opcode == NVM_OP_PREAD)
713 flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
714 else if (rqd->opcode == NVM_OP_PWRITE)
715 flags |= NVM_IO_SCRAMBLE_ENABLE;
720 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
722 struct nvm_dev *dev = tgt_dev->parent;
725 if (!dev->ops->submit_io)
728 nvm_rq_tgt_to_dev(tgt_dev, rqd);
731 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
733 /* In case of error, fail with right address format */
734 ret = dev->ops->submit_io(dev, rqd, buf);
736 nvm_rq_dev_to_tgt(tgt_dev, rqd);
739 EXPORT_SYMBOL(nvm_submit_io);
741 static void nvm_sync_end_io(struct nvm_rq *rqd)
743 struct completion *waiting = rqd->private;
748 static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
751 DECLARE_COMPLETION_ONSTACK(wait);
754 rqd->end_io = nvm_sync_end_io;
755 rqd->private = &wait;
757 ret = dev->ops->submit_io(dev, rqd, buf);
761 wait_for_completion_io(&wait);
766 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
769 struct nvm_dev *dev = tgt_dev->parent;
772 if (!dev->ops->submit_io)
775 nvm_rq_tgt_to_dev(tgt_dev, rqd);
778 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
780 ret = nvm_submit_io_wait(dev, rqd, buf);
784 EXPORT_SYMBOL(nvm_submit_io_sync);
786 void nvm_end_io(struct nvm_rq *rqd)
788 struct nvm_tgt_dev *tgt_dev = rqd->dev;
790 /* Convert address space */
792 nvm_rq_dev_to_tgt(tgt_dev, rqd);
797 EXPORT_SYMBOL(nvm_end_io);
799 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
801 if (!dev->ops->submit_io)
805 rqd->flags = nvm_set_flags(&dev->geo, rqd);
807 return nvm_submit_io_wait(dev, rqd, NULL);
810 static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
812 struct nvm_rq rqd = { NULL };
814 struct bio_vec bio_vec;
818 page = alloc_page(GFP_KERNEL);
822 bio_init(&bio, &bio_vec, 1);
823 bio_add_page(&bio, page, PAGE_SIZE, 0);
824 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
827 rqd.opcode = NVM_OP_PREAD;
830 rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
832 ret = nvm_submit_io_sync_raw(dev, &rqd);
841 * Scans a 1.2 chunk first and last page to determine if its state.
842 * If the chunk is found to be open, also scan it to update the write
845 static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
846 struct nvm_chk_meta *meta)
848 struct nvm_geo *geo = &dev->geo;
851 /* sense first page */
852 ret = nvm_bb_chunk_sense(dev, ppa);
853 if (ret < 0) /* io error */
855 else if (ret == 0) /* valid data */
856 meta->state = NVM_CHK_ST_OPEN;
859 * If empty page, the chunk is free, else it is an
860 * actual io error. In that case, mark it offline.
863 case NVM_RSP_ERR_EMPTYPAGE:
864 meta->state = NVM_CHK_ST_FREE;
866 case NVM_RSP_ERR_FAILCRC:
867 case NVM_RSP_ERR_FAILECC:
868 case NVM_RSP_WARN_HIGHECC:
869 meta->state = NVM_CHK_ST_OPEN;
872 return -ret; /* other io error */
876 /* sense last page */
877 ppa.g.pg = geo->num_pg - 1;
878 ppa.g.pl = geo->num_pln - 1;
880 ret = nvm_bb_chunk_sense(dev, ppa);
881 if (ret < 0) /* io error */
883 else if (ret == 0) { /* Chunk fully written */
884 meta->state = NVM_CHK_ST_CLOSED;
885 meta->wp = geo->clba;
887 } else if (ret > 0) {
889 case NVM_RSP_ERR_EMPTYPAGE:
890 case NVM_RSP_ERR_FAILCRC:
891 case NVM_RSP_ERR_FAILECC:
892 case NVM_RSP_WARN_HIGHECC:
893 meta->state = NVM_CHK_ST_OPEN;
896 return -ret; /* other io error */
902 * chunk is open, we scan sequentially to update the write pointer.
903 * We make the assumption that targets write data across all planes
904 * before moving to the next page.
906 for (pg = 0; pg < geo->num_pg; pg++) {
907 for (pl = 0; pl < geo->num_pln; pl++) {
911 ret = nvm_bb_chunk_sense(dev, ppa);
912 if (ret < 0) /* io error */
915 meta->wp += geo->ws_min;
916 } else if (ret > 0) {
918 case NVM_RSP_ERR_EMPTYPAGE:
920 case NVM_RSP_ERR_FAILCRC:
921 case NVM_RSP_ERR_FAILECC:
922 case NVM_RSP_WARN_HIGHECC:
923 meta->wp += geo->ws_min;
926 return -ret; /* other io error */
936 * folds a bad block list from its plane representation to its
937 * chunk representation.
939 * If any of the planes status are bad or grown bad, the chunk is marked
940 * offline. If not bad, the first plane state acts as the chunk state.
942 static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
943 u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
945 struct nvm_geo *geo = &dev->geo;
946 int ret, blk, pl, offset, blktype;
948 for (blk = 0; blk < geo->num_chk; blk++) {
949 offset = blk * geo->pln_mode;
950 blktype = blks[offset];
952 for (pl = 0; pl < geo->pln_mode; pl++) {
953 if (blks[offset + pl] &
954 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
955 blktype = blks[offset + pl];
963 meta->type = NVM_CHK_TP_W_SEQ;
965 meta->slba = generic_to_dev_addr(dev, ppa).ppa;
966 meta->cnlb = dev->geo.clba;
968 if (blktype == NVM_BLK_T_FREE) {
969 ret = nvm_bb_chunk_scan(dev, ppa, meta);
973 meta->state = NVM_CHK_ST_OFFLINE;
982 static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
983 int nchks, struct nvm_chk_meta *meta)
985 struct nvm_geo *geo = &dev->geo;
988 int ch, lun, nr_blks;
992 ppa = dev_to_generic_addr(dev, ppa);
997 if ((nchks % geo->num_chk) != 0)
1000 nr_blks = geo->num_chk * geo->pln_mode;
1002 blks = kmalloc(nr_blks, GFP_KERNEL);
1006 for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1007 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1008 struct ppa_addr ppa_gen, ppa_dev;
1015 ppa_gen.g.lun = lun;
1016 ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1018 ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1022 ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1027 meta += geo->num_chk;
1028 nchks -= geo->num_chk;
1036 int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1037 int nchks, struct nvm_chk_meta *meta)
1039 struct nvm_dev *dev = tgt_dev->parent;
1041 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1043 if (dev->geo.version == NVM_OCSSD_SPEC_12)
1044 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1046 return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1048 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1050 int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1051 int nr_ppas, int type)
1053 struct nvm_dev *dev = tgt_dev->parent;
1057 if (dev->geo.version == NVM_OCSSD_SPEC_20)
1060 if (nr_ppas > NVM_MAX_VLBA) {
1061 pr_err("unable to update all blocks atomically\n");
1065 memset(&rqd, 0, sizeof(struct nvm_rq));
1067 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1068 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1070 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1071 nvm_free_rqd_ppalist(tgt_dev, &rqd);
1077 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1079 static int nvm_core_init(struct nvm_dev *dev)
1081 struct nvm_geo *geo = &dev->geo;
1084 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1085 sizeof(unsigned long), GFP_KERNEL);
1089 INIT_LIST_HEAD(&dev->area_list);
1090 INIT_LIST_HEAD(&dev->targets);
1091 mutex_init(&dev->mlock);
1092 spin_lock_init(&dev->lock);
1094 ret = nvm_register_map(dev);
1100 kfree(dev->lun_map);
1104 static void nvm_free(struct kref *ref)
1106 struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1109 dev->ops->destroy_dma_pool(dev->dma_pool);
1112 nvm_unregister_map(dev);
1114 kfree(dev->lun_map);
1118 static int nvm_init(struct nvm_dev *dev)
1120 struct nvm_geo *geo = &dev->geo;
1123 if (dev->ops->identity(dev)) {
1124 pr_err("device could not be identified\n");
1128 pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1129 geo->minor_ver_id, geo->vmnt);
1131 ret = nvm_core_init(dev);
1133 pr_err("could not initialize core structures.\n");
1137 pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1138 dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1139 dev->geo.num_chk, dev->geo.all_luns,
1143 pr_err("failed to initialize nvm\n");
1147 struct nvm_dev *nvm_alloc_dev(int node)
1149 struct nvm_dev *dev;
1151 dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1153 kref_init(&dev->ref);
1157 EXPORT_SYMBOL(nvm_alloc_dev);
1159 int nvm_register(struct nvm_dev *dev)
1161 int ret, exp_pool_size;
1163 pr_warn_once("lightnvm support is deprecated and will be removed in Linux 5.15.\n");
1165 if (!dev->q || !dev->ops) {
1166 kref_put(&dev->ref, nvm_free);
1170 ret = nvm_init(dev);
1172 kref_put(&dev->ref, nvm_free);
1176 exp_pool_size = max_t(int, PAGE_SIZE,
1177 (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1178 exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1180 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1182 if (!dev->dma_pool) {
1183 pr_err("could not create dma pool\n");
1184 kref_put(&dev->ref, nvm_free);
1188 /* register device with a supported media manager */
1189 down_write(&nvm_lock);
1190 list_add(&dev->devices, &nvm_devices);
1191 up_write(&nvm_lock);
1195 EXPORT_SYMBOL(nvm_register);
1197 void nvm_unregister(struct nvm_dev *dev)
1199 struct nvm_target *t, *tmp;
1201 mutex_lock(&dev->mlock);
1202 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1203 if (t->dev->parent != dev)
1205 __nvm_remove_target(t, false);
1206 kref_put(&dev->ref, nvm_free);
1208 mutex_unlock(&dev->mlock);
1210 down_write(&nvm_lock);
1211 list_del(&dev->devices);
1212 up_write(&nvm_lock);
1214 kref_put(&dev->ref, nvm_free);
1216 EXPORT_SYMBOL(nvm_unregister);
1218 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1220 struct nvm_dev *dev;
1223 down_write(&nvm_lock);
1224 dev = nvm_find_nvm_dev(create->dev);
1225 up_write(&nvm_lock);
1228 pr_err("device not found\n");
1232 kref_get(&dev->ref);
1233 ret = nvm_create_tgt(dev, create);
1235 kref_put(&dev->ref, nvm_free);
1240 static long nvm_ioctl_info(struct file *file, void __user *arg)
1242 struct nvm_ioctl_info *info;
1243 struct nvm_tgt_type *tt;
1246 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1248 return PTR_ERR(info);
1250 info->version[0] = NVM_VERSION_MAJOR;
1251 info->version[1] = NVM_VERSION_MINOR;
1252 info->version[2] = NVM_VERSION_PATCH;
1254 down_write(&nvm_tgtt_lock);
1255 list_for_each_entry(tt, &nvm_tgt_types, list) {
1256 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1258 tgt->version[0] = tt->version[0];
1259 tgt->version[1] = tt->version[1];
1260 tgt->version[2] = tt->version[2];
1261 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1266 info->tgtsize = tgt_iter;
1267 up_write(&nvm_tgtt_lock);
1269 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1278 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1280 struct nvm_ioctl_get_devices *devices;
1281 struct nvm_dev *dev;
1284 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1288 down_write(&nvm_lock);
1289 list_for_each_entry(dev, &nvm_devices, devices) {
1290 struct nvm_ioctl_device_info *info = &devices->info[i];
1292 strlcpy(info->devname, dev->name, sizeof(info->devname));
1294 /* kept for compatibility */
1295 info->bmversion[0] = 1;
1296 info->bmversion[1] = 0;
1297 info->bmversion[2] = 0;
1298 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1301 if (i >= ARRAY_SIZE(devices->info)) {
1302 pr_err("max %zd devices can be reported.\n",
1303 ARRAY_SIZE(devices->info));
1307 up_write(&nvm_lock);
1309 devices->nr_devices = i;
1311 if (copy_to_user(arg, devices,
1312 sizeof(struct nvm_ioctl_get_devices))) {
1321 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1323 struct nvm_ioctl_create create;
1325 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1328 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1329 create.conf.e.rsv != 0) {
1330 pr_err("reserved config field in use\n");
1334 create.dev[DISK_NAME_LEN - 1] = '\0';
1335 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1336 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1338 if (create.flags != 0) {
1339 __u32 flags = create.flags;
1341 /* Check for valid flags */
1342 if (flags & NVM_TARGET_FACTORY)
1343 flags &= ~NVM_TARGET_FACTORY;
1346 pr_err("flag not supported\n");
1351 return __nvm_configure_create(&create);
1354 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1356 struct nvm_ioctl_remove remove;
1358 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1361 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1363 if (remove.flags != 0) {
1364 pr_err("no flags supported\n");
1368 return nvm_remove_tgt(&remove);
1371 /* kept for compatibility reasons */
1372 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1374 struct nvm_ioctl_dev_init init;
1376 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1379 if (init.flags != 0) {
1380 pr_err("no flags supported\n");
1387 /* Kept for compatibility reasons */
1388 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1390 struct nvm_ioctl_dev_factory fact;
1392 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1395 fact.dev[DISK_NAME_LEN - 1] = '\0';
1397 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1403 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1405 void __user *argp = (void __user *)arg;
1407 if (!capable(CAP_SYS_ADMIN))
1412 return nvm_ioctl_info(file, argp);
1413 case NVM_GET_DEVICES:
1414 return nvm_ioctl_get_devices(file, argp);
1415 case NVM_DEV_CREATE:
1416 return nvm_ioctl_dev_create(file, argp);
1417 case NVM_DEV_REMOVE:
1418 return nvm_ioctl_dev_remove(file, argp);
1420 return nvm_ioctl_dev_init(file, argp);
1421 case NVM_DEV_FACTORY:
1422 return nvm_ioctl_dev_factory(file, argp);
1427 static const struct file_operations _ctl_fops = {
1428 .open = nonseekable_open,
1429 .unlocked_ioctl = nvm_ctl_ioctl,
1430 .owner = THIS_MODULE,
1431 .llseek = noop_llseek,
1434 static struct miscdevice _nvm_misc = {
1435 .minor = MISC_DYNAMIC_MINOR,
1437 .nodename = "lightnvm/control",
1440 builtin_misc_device(_nvm_misc);