2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/miscdevice.h>
28 #include <linux/lightnvm.h>
29 #include <linux/sched/sysctl.h>
31 static LIST_HEAD(nvm_tgt_types);
32 static DECLARE_RWSEM(nvm_tgtt_lock);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
36 /* Map between virtual and physical channel and lun */
44 struct nvm_ch_map *chnls;
48 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
50 struct nvm_target *tgt;
52 list_for_each_entry(tgt, &dev->targets, list)
53 if (!strcmp(name, tgt->disk->disk_name))
59 static bool nvm_target_exists(const char *name)
62 struct nvm_target *tgt;
65 down_write(&nvm_lock);
66 list_for_each_entry(dev, &nvm_devices, devices) {
67 mutex_lock(&dev->mlock);
68 list_for_each_entry(tgt, &dev->targets, list) {
69 if (!strcmp(name, tgt->disk->disk_name)) {
71 mutex_unlock(&dev->mlock);
75 mutex_unlock(&dev->mlock);
83 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
87 for (i = lun_begin; i <= lun_end; i++) {
88 if (test_and_set_bit(i, dev->lun_map)) {
89 pr_err("nvm: lun %d already allocated\n", i);
96 while (--i >= lun_begin)
97 clear_bit(i, dev->lun_map);
102 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
107 for (i = lun_begin; i <= lun_end; i++)
108 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
111 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
113 struct nvm_dev *dev = tgt_dev->parent;
114 struct nvm_dev_map *dev_map = tgt_dev->map;
117 for (i = 0; i < dev_map->num_ch; i++) {
118 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
119 int *lun_offs = ch_map->lun_offs;
120 int ch = i + ch_map->ch_off;
123 for (j = 0; j < ch_map->num_lun; j++) {
124 int lun = j + lun_offs[j];
125 int lunid = (ch * dev->geo.num_lun) + lun;
127 WARN_ON(!test_and_clear_bit(lunid,
132 kfree(ch_map->lun_offs);
135 kfree(dev_map->chnls);
138 kfree(tgt_dev->luns);
142 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
143 u16 lun_begin, u16 lun_end,
146 struct nvm_tgt_dev *tgt_dev = NULL;
147 struct nvm_dev_map *dev_rmap = dev->rmap;
148 struct nvm_dev_map *dev_map;
149 struct ppa_addr *luns;
150 int num_lun = lun_end - lun_begin + 1;
151 int luns_left = num_lun;
152 int num_ch = num_lun / dev->geo.num_lun;
153 int num_ch_mod = num_lun % dev->geo.num_lun;
154 int bch = lun_begin / dev->geo.num_lun;
155 int blun = lun_begin % dev->geo.num_lun;
157 int lun_balanced = 1;
158 int sec_per_lun, prev_num_lun;
161 num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
163 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
167 dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
171 luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
175 prev_num_lun = (luns_left > dev->geo.num_lun) ?
176 dev->geo.num_lun : luns_left;
177 for (i = 0; i < num_ch; i++) {
178 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
179 int *lun_roffs = ch_rmap->lun_offs;
180 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
182 int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
183 dev->geo.num_lun : luns_left;
185 if (lun_balanced && prev_num_lun != luns_in_chnl)
188 ch_map->ch_off = ch_rmap->ch_off = bch;
189 ch_map->num_lun = luns_in_chnl;
191 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
195 for (j = 0; j < luns_in_chnl; j++) {
197 luns[lunid].a.ch = i;
198 luns[lunid++].a.lun = j;
201 lun_roffs[j + blun] = blun;
204 ch_map->lun_offs = lun_offs;
206 /* when starting a new channel, lun offset is reset */
208 luns_left -= luns_in_chnl;
211 dev_map->num_ch = num_ch;
213 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
217 /* Inherit device geometry from parent */
218 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
220 /* Target device only owns a portion of the physical device */
221 tgt_dev->geo.num_ch = num_ch;
222 tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
223 tgt_dev->geo.all_luns = num_lun;
224 tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
226 tgt_dev->geo.op = op;
228 sec_per_lun = dev->geo.clba * dev->geo.num_chk;
229 tgt_dev->geo.total_secs = num_lun * sec_per_lun;
232 tgt_dev->map = dev_map;
233 tgt_dev->luns = luns;
234 tgt_dev->parent = dev;
239 kfree(dev_map->chnls[i].lun_offs);
242 kfree(dev_map->chnls);
249 static const struct block_device_operations nvm_fops = {
250 .owner = THIS_MODULE,
253 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
255 struct nvm_tgt_type *tt;
257 list_for_each_entry(tt, &nvm_tgt_types, list)
258 if (!strcmp(name, tt->name))
264 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
266 struct nvm_tgt_type *tt;
268 down_write(&nvm_tgtt_lock);
269 tt = __nvm_find_target_type(name);
270 up_write(&nvm_tgtt_lock);
275 static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
278 if (lun_begin > lun_end || lun_end >= geo->all_luns) {
279 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
280 lun_begin, lun_end, geo->all_luns - 1);
287 static int __nvm_config_simple(struct nvm_dev *dev,
288 struct nvm_ioctl_create_simple *s)
290 struct nvm_geo *geo = &dev->geo;
292 if (s->lun_begin == -1 && s->lun_end == -1) {
294 s->lun_end = geo->all_luns - 1;
297 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
300 static int __nvm_config_extended(struct nvm_dev *dev,
301 struct nvm_ioctl_create_extended *e)
303 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
305 e->lun_end = dev->geo.all_luns - 1;
308 /* op not set falls into target's default */
309 if (e->op == 0xFFFF) {
310 e->op = NVM_TARGET_DEFAULT_OP;
311 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
312 pr_err("nvm: invalid over provisioning value\n");
316 return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
319 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
321 struct nvm_ioctl_create_extended e;
322 struct request_queue *tqueue;
323 struct gendisk *tdisk;
324 struct nvm_tgt_type *tt;
325 struct nvm_target *t;
326 struct nvm_tgt_dev *tgt_dev;
330 switch (create->conf.type) {
331 case NVM_CONFIG_TYPE_SIMPLE:
332 ret = __nvm_config_simple(dev, &create->conf.s);
336 e.lun_begin = create->conf.s.lun_begin;
337 e.lun_end = create->conf.s.lun_end;
338 e.op = NVM_TARGET_DEFAULT_OP;
340 case NVM_CONFIG_TYPE_EXTENDED:
341 ret = __nvm_config_extended(dev, &create->conf.e);
348 pr_err("nvm: config type not valid\n");
352 tt = nvm_find_target_type(create->tgttype);
354 pr_err("nvm: target type %s not found\n", create->tgttype);
358 if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
359 pr_err("nvm: device is incompatible with target L2P type.\n");
363 if (nvm_target_exists(create->tgtname)) {
364 pr_err("nvm: target name already exists (%s)\n",
369 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
373 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
379 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
381 pr_err("nvm: could not create target device\n");
386 tdisk = alloc_disk(0);
392 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
397 blk_queue_make_request(tqueue, tt->make_rq);
399 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
400 tdisk->flags = GENHD_FL_EXT_DEVT;
402 tdisk->first_minor = 0;
403 tdisk->fops = &nvm_fops;
404 tdisk->queue = tqueue;
406 targetdata = tt->init(tgt_dev, tdisk, create->flags);
407 if (IS_ERR(targetdata)) {
408 ret = PTR_ERR(targetdata);
412 tdisk->private_data = targetdata;
413 tqueue->queuedata = targetdata;
415 blk_queue_max_hw_sectors(tqueue,
416 (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
418 set_capacity(tdisk, tt->capacity(targetdata));
421 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
430 mutex_lock(&dev->mlock);
431 list_add_tail(&t->list, &dev->targets);
432 mutex_unlock(&dev->mlock);
434 __module_get(tt->owner);
439 tt->exit(targetdata, true);
441 blk_cleanup_queue(tqueue);
446 nvm_remove_tgt_dev(tgt_dev, 0);
450 nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
454 static void __nvm_remove_target(struct nvm_target *t, bool graceful)
456 struct nvm_tgt_type *tt = t->type;
457 struct gendisk *tdisk = t->disk;
458 struct request_queue *q = tdisk->queue;
461 blk_cleanup_queue(q);
464 tt->sysfs_exit(tdisk);
467 tt->exit(tdisk->private_data, graceful);
469 nvm_remove_tgt_dev(t->dev, 1);
471 module_put(t->type->owner);
478 * nvm_remove_tgt - Removes a target from the media manager
480 * @remove: ioctl structure with target name to remove.
487 static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
489 struct nvm_target *t;
491 mutex_lock(&dev->mlock);
492 t = nvm_find_target(dev, remove->tgtname);
494 mutex_unlock(&dev->mlock);
497 __nvm_remove_target(t, true);
498 mutex_unlock(&dev->mlock);
503 static int nvm_register_map(struct nvm_dev *dev)
505 struct nvm_dev_map *rmap;
508 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
512 rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
517 for (i = 0; i < dev->geo.num_ch; i++) {
518 struct nvm_ch_map *ch_rmap;
520 int luns_in_chnl = dev->geo.num_lun;
522 ch_rmap = &rmap->chnls[i];
524 ch_rmap->ch_off = -1;
525 ch_rmap->num_lun = luns_in_chnl;
527 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
531 for (j = 0; j < luns_in_chnl; j++)
534 ch_rmap->lun_offs = lun_roffs;
542 kfree(rmap->chnls[i].lun_offs);
549 static void nvm_unregister_map(struct nvm_dev *dev)
551 struct nvm_dev_map *rmap = dev->rmap;
554 for (i = 0; i < dev->geo.num_ch; i++)
555 kfree(rmap->chnls[i].lun_offs);
561 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
563 struct nvm_dev_map *dev_map = tgt_dev->map;
564 struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
565 int lun_off = ch_map->lun_offs[p->a.lun];
567 p->a.ch += ch_map->ch_off;
571 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
573 struct nvm_dev *dev = tgt_dev->parent;
574 struct nvm_dev_map *dev_rmap = dev->rmap;
575 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
576 int lun_roff = ch_rmap->lun_offs[p->a.lun];
578 p->a.ch -= ch_rmap->ch_off;
579 p->a.lun -= lun_roff;
582 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
583 struct ppa_addr *ppa_list, int nr_ppas)
587 for (i = 0; i < nr_ppas; i++) {
588 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
589 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
593 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
594 struct ppa_addr *ppa_list, int nr_ppas)
598 for (i = 0; i < nr_ppas; i++) {
599 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
600 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
604 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
606 if (rqd->nr_ppas == 1) {
607 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
611 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
614 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
616 if (rqd->nr_ppas == 1) {
617 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
621 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
624 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
628 down_write(&nvm_tgtt_lock);
629 if (__nvm_find_target_type(tt->name))
632 list_add(&tt->list, &nvm_tgt_types);
633 up_write(&nvm_tgtt_lock);
637 EXPORT_SYMBOL(nvm_register_tgt_type);
639 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
644 down_write(&nvm_tgtt_lock);
646 up_write(&nvm_tgtt_lock);
648 EXPORT_SYMBOL(nvm_unregister_tgt_type);
650 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
651 dma_addr_t *dma_handler)
653 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
656 EXPORT_SYMBOL(nvm_dev_dma_alloc);
658 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
660 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
662 EXPORT_SYMBOL(nvm_dev_dma_free);
664 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
668 list_for_each_entry(dev, &nvm_devices, devices)
669 if (!strcmp(name, dev->name))
675 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
676 const struct ppa_addr *ppas, int nr_ppas)
678 struct nvm_dev *dev = tgt_dev->parent;
679 struct nvm_geo *geo = &tgt_dev->geo;
680 int i, plane_cnt, pl_idx;
683 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
684 rqd->nr_ppas = nr_ppas;
685 rqd->ppa_addr = ppas[0];
690 rqd->nr_ppas = nr_ppas;
691 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
692 if (!rqd->ppa_list) {
693 pr_err("nvm: failed to allocate dma memory\n");
697 plane_cnt = geo->pln_mode;
698 rqd->nr_ppas *= plane_cnt;
700 for (i = 0; i < nr_ppas; i++) {
701 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
704 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
711 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
717 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
720 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
724 if (geo->version == NVM_OCSSD_SPEC_20)
728 flags |= geo->pln_mode >> 1;
730 if (rqd->opcode == NVM_OP_PREAD)
731 flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
732 else if (rqd->opcode == NVM_OP_PWRITE)
733 flags |= NVM_IO_SCRAMBLE_ENABLE;
738 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
740 struct nvm_dev *dev = tgt_dev->parent;
743 if (!dev->ops->submit_io)
746 nvm_rq_tgt_to_dev(tgt_dev, rqd);
749 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
751 /* In case of error, fail with right address format */
752 ret = dev->ops->submit_io(dev, rqd);
754 nvm_rq_dev_to_tgt(tgt_dev, rqd);
757 EXPORT_SYMBOL(nvm_submit_io);
759 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
761 struct nvm_dev *dev = tgt_dev->parent;
764 if (!dev->ops->submit_io_sync)
767 nvm_rq_tgt_to_dev(tgt_dev, rqd);
770 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
772 /* In case of error, fail with right address format */
773 ret = dev->ops->submit_io_sync(dev, rqd);
774 nvm_rq_dev_to_tgt(tgt_dev, rqd);
778 EXPORT_SYMBOL(nvm_submit_io_sync);
780 void nvm_end_io(struct nvm_rq *rqd)
782 struct nvm_tgt_dev *tgt_dev = rqd->dev;
784 /* Convert address space */
786 nvm_rq_dev_to_tgt(tgt_dev, rqd);
791 EXPORT_SYMBOL(nvm_end_io);
793 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
795 if (!dev->ops->submit_io_sync)
798 rqd->flags = nvm_set_flags(&dev->geo, rqd);
800 return dev->ops->submit_io_sync(dev, rqd);
803 static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
805 struct nvm_rq rqd = { NULL };
807 struct bio_vec bio_vec;
811 page = alloc_page(GFP_KERNEL);
815 bio_init(&bio, &bio_vec, 1);
816 bio_add_page(&bio, page, PAGE_SIZE, 0);
817 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
820 rqd.opcode = NVM_OP_PREAD;
823 rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
825 ret = nvm_submit_io_sync_raw(dev, &rqd);
835 * Scans a 1.2 chunk first and last page to determine if its state.
836 * If the chunk is found to be open, also scan it to update the write
839 static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
840 struct nvm_chk_meta *meta)
842 struct nvm_geo *geo = &dev->geo;
845 /* sense first page */
846 ret = nvm_bb_chunk_sense(dev, ppa);
847 if (ret < 0) /* io error */
849 else if (ret == 0) /* valid data */
850 meta->state = NVM_CHK_ST_OPEN;
853 * If empty page, the chunk is free, else it is an
854 * actual io error. In that case, mark it offline.
857 case NVM_RSP_ERR_EMPTYPAGE:
858 meta->state = NVM_CHK_ST_FREE;
860 case NVM_RSP_ERR_FAILCRC:
861 case NVM_RSP_ERR_FAILECC:
862 case NVM_RSP_WARN_HIGHECC:
863 meta->state = NVM_CHK_ST_OPEN;
866 return -ret; /* other io error */
870 /* sense last page */
871 ppa.g.pg = geo->num_pg - 1;
872 ppa.g.pl = geo->num_pln - 1;
874 ret = nvm_bb_chunk_sense(dev, ppa);
875 if (ret < 0) /* io error */
877 else if (ret == 0) { /* Chunk fully written */
878 meta->state = NVM_CHK_ST_CLOSED;
879 meta->wp = geo->clba;
881 } else if (ret > 0) {
883 case NVM_RSP_ERR_EMPTYPAGE:
884 case NVM_RSP_ERR_FAILCRC:
885 case NVM_RSP_ERR_FAILECC:
886 case NVM_RSP_WARN_HIGHECC:
887 meta->state = NVM_CHK_ST_OPEN;
890 return -ret; /* other io error */
896 * chunk is open, we scan sequentially to update the write pointer.
897 * We make the assumption that targets write data across all planes
898 * before moving to the next page.
900 for (pg = 0; pg < geo->num_pg; pg++) {
901 for (pl = 0; pl < geo->num_pln; pl++) {
905 ret = nvm_bb_chunk_sense(dev, ppa);
906 if (ret < 0) /* io error */
909 meta->wp += geo->ws_min;
910 } else if (ret > 0) {
912 case NVM_RSP_ERR_EMPTYPAGE:
914 case NVM_RSP_ERR_FAILCRC:
915 case NVM_RSP_ERR_FAILECC:
916 case NVM_RSP_WARN_HIGHECC:
917 meta->wp += geo->ws_min;
920 return -ret; /* other io error */
930 * folds a bad block list from its plane representation to its
931 * chunk representation.
933 * If any of the planes status are bad or grown bad, the chunk is marked
934 * offline. If not bad, the first plane state acts as the chunk state.
936 static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
937 u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
939 struct nvm_geo *geo = &dev->geo;
940 int ret, blk, pl, offset, blktype;
942 for (blk = 0; blk < geo->num_chk; blk++) {
943 offset = blk * geo->pln_mode;
944 blktype = blks[offset];
946 for (pl = 0; pl < geo->pln_mode; pl++) {
947 if (blks[offset + pl] &
948 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
949 blktype = blks[offset + pl];
957 meta->type = NVM_CHK_TP_W_SEQ;
959 meta->slba = generic_to_dev_addr(dev, ppa).ppa;
960 meta->cnlb = dev->geo.clba;
962 if (blktype == NVM_BLK_T_FREE) {
963 ret = nvm_bb_chunk_scan(dev, ppa, meta);
967 meta->state = NVM_CHK_ST_OFFLINE;
976 static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
977 int nchks, struct nvm_chk_meta *meta)
979 struct nvm_geo *geo = &dev->geo;
982 int ch, lun, nr_blks;
986 ppa = dev_to_generic_addr(dev, ppa);
991 if ((nchks % geo->num_chk) != 0)
994 nr_blks = geo->num_chk * geo->pln_mode;
996 blks = kmalloc(nr_blks, GFP_KERNEL);
1000 for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1001 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1002 struct ppa_addr ppa_gen, ppa_dev;
1009 ppa_gen.g.lun = lun;
1010 ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1012 ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1016 ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1021 meta += geo->num_chk;
1022 nchks -= geo->num_chk;
1030 int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1031 int nchks, struct nvm_chk_meta *meta)
1033 struct nvm_dev *dev = tgt_dev->parent;
1035 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1037 if (dev->geo.version == NVM_OCSSD_SPEC_12)
1038 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1040 return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1042 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1044 int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1045 int nr_ppas, int type)
1047 struct nvm_dev *dev = tgt_dev->parent;
1051 if (dev->geo.version == NVM_OCSSD_SPEC_20)
1054 if (nr_ppas > NVM_MAX_VLBA) {
1055 pr_err("nvm: unable to update all blocks atomically\n");
1059 memset(&rqd, 0, sizeof(struct nvm_rq));
1061 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1062 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1064 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1065 nvm_free_rqd_ppalist(tgt_dev, &rqd);
1071 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1073 static int nvm_core_init(struct nvm_dev *dev)
1075 struct nvm_geo *geo = &dev->geo;
1078 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1079 sizeof(unsigned long), GFP_KERNEL);
1083 INIT_LIST_HEAD(&dev->area_list);
1084 INIT_LIST_HEAD(&dev->targets);
1085 mutex_init(&dev->mlock);
1086 spin_lock_init(&dev->lock);
1088 ret = nvm_register_map(dev);
1094 kfree(dev->lun_map);
1098 static void nvm_free(struct nvm_dev *dev)
1104 dev->ops->destroy_dma_pool(dev->dma_pool);
1106 nvm_unregister_map(dev);
1107 kfree(dev->lun_map);
1111 static int nvm_init(struct nvm_dev *dev)
1113 struct nvm_geo *geo = &dev->geo;
1116 if (dev->ops->identity(dev)) {
1117 pr_err("nvm: device could not be identified\n");
1121 pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
1122 geo->major_ver_id, geo->minor_ver_id,
1125 ret = nvm_core_init(dev);
1127 pr_err("nvm: could not initialize core structures.\n");
1131 pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
1132 dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1133 dev->geo.num_chk, dev->geo.all_luns,
1137 pr_err("nvm: failed to initialize nvm\n");
1141 struct nvm_dev *nvm_alloc_dev(int node)
1143 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1145 EXPORT_SYMBOL(nvm_alloc_dev);
1147 int nvm_register(struct nvm_dev *dev)
1151 if (!dev->q || !dev->ops)
1154 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1155 if (!dev->dma_pool) {
1156 pr_err("nvm: could not create dma pool\n");
1160 ret = nvm_init(dev);
1164 /* register device with a supported media manager */
1165 down_write(&nvm_lock);
1166 list_add(&dev->devices, &nvm_devices);
1167 up_write(&nvm_lock);
1171 dev->ops->destroy_dma_pool(dev->dma_pool);
1174 EXPORT_SYMBOL(nvm_register);
1176 void nvm_unregister(struct nvm_dev *dev)
1178 struct nvm_target *t, *tmp;
1180 mutex_lock(&dev->mlock);
1181 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1182 if (t->dev->parent != dev)
1184 __nvm_remove_target(t, false);
1186 mutex_unlock(&dev->mlock);
1188 down_write(&nvm_lock);
1189 list_del(&dev->devices);
1190 up_write(&nvm_lock);
1194 EXPORT_SYMBOL(nvm_unregister);
1196 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1198 struct nvm_dev *dev;
1200 down_write(&nvm_lock);
1201 dev = nvm_find_nvm_dev(create->dev);
1202 up_write(&nvm_lock);
1205 pr_err("nvm: device not found\n");
1209 return nvm_create_tgt(dev, create);
1212 static long nvm_ioctl_info(struct file *file, void __user *arg)
1214 struct nvm_ioctl_info *info;
1215 struct nvm_tgt_type *tt;
1218 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1222 info->version[0] = NVM_VERSION_MAJOR;
1223 info->version[1] = NVM_VERSION_MINOR;
1224 info->version[2] = NVM_VERSION_PATCH;
1226 down_write(&nvm_tgtt_lock);
1227 list_for_each_entry(tt, &nvm_tgt_types, list) {
1228 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1230 tgt->version[0] = tt->version[0];
1231 tgt->version[1] = tt->version[1];
1232 tgt->version[2] = tt->version[2];
1233 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1238 info->tgtsize = tgt_iter;
1239 up_write(&nvm_tgtt_lock);
1241 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1250 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1252 struct nvm_ioctl_get_devices *devices;
1253 struct nvm_dev *dev;
1256 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1260 down_write(&nvm_lock);
1261 list_for_each_entry(dev, &nvm_devices, devices) {
1262 struct nvm_ioctl_device_info *info = &devices->info[i];
1264 strlcpy(info->devname, dev->name, sizeof(info->devname));
1266 /* kept for compatibility */
1267 info->bmversion[0] = 1;
1268 info->bmversion[1] = 0;
1269 info->bmversion[2] = 0;
1270 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1274 pr_err("nvm: max 31 devices can be reported.\n");
1278 up_write(&nvm_lock);
1280 devices->nr_devices = i;
1282 if (copy_to_user(arg, devices,
1283 sizeof(struct nvm_ioctl_get_devices))) {
1292 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1294 struct nvm_ioctl_create create;
1296 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1299 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1300 create.conf.e.rsv != 0) {
1301 pr_err("nvm: reserved config field in use\n");
1305 create.dev[DISK_NAME_LEN - 1] = '\0';
1306 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1307 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1309 if (create.flags != 0) {
1310 __u32 flags = create.flags;
1312 /* Check for valid flags */
1313 if (flags & NVM_TARGET_FACTORY)
1314 flags &= ~NVM_TARGET_FACTORY;
1317 pr_err("nvm: flag not supported\n");
1322 return __nvm_configure_create(&create);
1325 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1327 struct nvm_ioctl_remove remove;
1328 struct nvm_dev *dev;
1331 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1334 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1336 if (remove.flags != 0) {
1337 pr_err("nvm: no flags supported\n");
1341 list_for_each_entry(dev, &nvm_devices, devices) {
1342 ret = nvm_remove_tgt(dev, &remove);
1350 /* kept for compatibility reasons */
1351 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1353 struct nvm_ioctl_dev_init init;
1355 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1358 if (init.flags != 0) {
1359 pr_err("nvm: no flags supported\n");
1366 /* Kept for compatibility reasons */
1367 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1369 struct nvm_ioctl_dev_factory fact;
1371 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1374 fact.dev[DISK_NAME_LEN - 1] = '\0';
1376 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1382 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1384 void __user *argp = (void __user *)arg;
1386 if (!capable(CAP_SYS_ADMIN))
1391 return nvm_ioctl_info(file, argp);
1392 case NVM_GET_DEVICES:
1393 return nvm_ioctl_get_devices(file, argp);
1394 case NVM_DEV_CREATE:
1395 return nvm_ioctl_dev_create(file, argp);
1396 case NVM_DEV_REMOVE:
1397 return nvm_ioctl_dev_remove(file, argp);
1399 return nvm_ioctl_dev_init(file, argp);
1400 case NVM_DEV_FACTORY:
1401 return nvm_ioctl_dev_factory(file, argp);
1406 static const struct file_operations _ctl_fops = {
1407 .open = nonseekable_open,
1408 .unlocked_ioctl = nvm_ctl_ioctl,
1409 .owner = THIS_MODULE,
1410 .llseek = noop_llseek,
1413 static struct miscdevice _nvm_misc = {
1414 .minor = MISC_DYNAMIC_MINOR,
1416 .nodename = "lightnvm/control",
1419 builtin_misc_device(_nvm_misc);