0df7454832efe082af142892d54fbcc6e3635028
[linux-2.6-microblaze.git] / drivers / lightnvm / core.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; see the file COPYING.  If not, write to
16  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  * USA.
18  *
19  */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/miscdevice.h>
28 #include <linux/lightnvm.h>
29 #include <linux/sched/sysctl.h>
30
31 static LIST_HEAD(nvm_tgt_types);
32 static DECLARE_RWSEM(nvm_tgtt_lock);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
35
36 /* Map between virtual and physical channel and lun */
37 struct nvm_ch_map {
38         int ch_off;
39         int num_lun;
40         int *lun_offs;
41 };
42
43 struct nvm_dev_map {
44         struct nvm_ch_map *chnls;
45         int num_ch;
46 };
47
48 static void nvm_free(struct kref *ref);
49
50 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
51 {
52         struct nvm_target *tgt;
53
54         list_for_each_entry(tgt, &dev->targets, list)
55                 if (!strcmp(name, tgt->disk->disk_name))
56                         return tgt;
57
58         return NULL;
59 }
60
61 static bool nvm_target_exists(const char *name)
62 {
63         struct nvm_dev *dev;
64         struct nvm_target *tgt;
65         bool ret = false;
66
67         down_write(&nvm_lock);
68         list_for_each_entry(dev, &nvm_devices, devices) {
69                 mutex_lock(&dev->mlock);
70                 list_for_each_entry(tgt, &dev->targets, list) {
71                         if (!strcmp(name, tgt->disk->disk_name)) {
72                                 ret = true;
73                                 mutex_unlock(&dev->mlock);
74                                 goto out;
75                         }
76                 }
77                 mutex_unlock(&dev->mlock);
78         }
79
80 out:
81         up_write(&nvm_lock);
82         return ret;
83 }
84
85 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
86 {
87         int i;
88
89         for (i = lun_begin; i <= lun_end; i++) {
90                 if (test_and_set_bit(i, dev->lun_map)) {
91                         pr_err("nvm: lun %d already allocated\n", i);
92                         goto err;
93                 }
94         }
95
96         return 0;
97 err:
98         while (--i >= lun_begin)
99                 clear_bit(i, dev->lun_map);
100
101         return -EBUSY;
102 }
103
104 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
105                                  int lun_end)
106 {
107         int i;
108
109         for (i = lun_begin; i <= lun_end; i++)
110                 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
111 }
112
113 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
114 {
115         struct nvm_dev *dev = tgt_dev->parent;
116         struct nvm_dev_map *dev_map = tgt_dev->map;
117         int i, j;
118
119         for (i = 0; i < dev_map->num_ch; i++) {
120                 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
121                 int *lun_offs = ch_map->lun_offs;
122                 int ch = i + ch_map->ch_off;
123
124                 if (clear) {
125                         for (j = 0; j < ch_map->num_lun; j++) {
126                                 int lun = j + lun_offs[j];
127                                 int lunid = (ch * dev->geo.num_lun) + lun;
128
129                                 WARN_ON(!test_and_clear_bit(lunid,
130                                                         dev->lun_map));
131                         }
132                 }
133
134                 kfree(ch_map->lun_offs);
135         }
136
137         kfree(dev_map->chnls);
138         kfree(dev_map);
139
140         kfree(tgt_dev->luns);
141         kfree(tgt_dev);
142 }
143
144 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
145                                               u16 lun_begin, u16 lun_end,
146                                               u16 op)
147 {
148         struct nvm_tgt_dev *tgt_dev = NULL;
149         struct nvm_dev_map *dev_rmap = dev->rmap;
150         struct nvm_dev_map *dev_map;
151         struct ppa_addr *luns;
152         int num_lun = lun_end - lun_begin + 1;
153         int luns_left = num_lun;
154         int num_ch = num_lun / dev->geo.num_lun;
155         int num_ch_mod = num_lun % dev->geo.num_lun;
156         int bch = lun_begin / dev->geo.num_lun;
157         int blun = lun_begin % dev->geo.num_lun;
158         int lunid = 0;
159         int lun_balanced = 1;
160         int sec_per_lun, prev_num_lun;
161         int i, j;
162
163         num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
164
165         dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
166         if (!dev_map)
167                 goto err_dev;
168
169         dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
170         if (!dev_map->chnls)
171                 goto err_chnls;
172
173         luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
174         if (!luns)
175                 goto err_luns;
176
177         prev_num_lun = (luns_left > dev->geo.num_lun) ?
178                                         dev->geo.num_lun : luns_left;
179         for (i = 0; i < num_ch; i++) {
180                 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
181                 int *lun_roffs = ch_rmap->lun_offs;
182                 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
183                 int *lun_offs;
184                 int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
185                                         dev->geo.num_lun : luns_left;
186
187                 if (lun_balanced && prev_num_lun != luns_in_chnl)
188                         lun_balanced = 0;
189
190                 ch_map->ch_off = ch_rmap->ch_off = bch;
191                 ch_map->num_lun = luns_in_chnl;
192
193                 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
194                 if (!lun_offs)
195                         goto err_ch;
196
197                 for (j = 0; j < luns_in_chnl; j++) {
198                         luns[lunid].ppa = 0;
199                         luns[lunid].a.ch = i;
200                         luns[lunid++].a.lun = j;
201
202                         lun_offs[j] = blun;
203                         lun_roffs[j + blun] = blun;
204                 }
205
206                 ch_map->lun_offs = lun_offs;
207
208                 /* when starting a new channel, lun offset is reset */
209                 blun = 0;
210                 luns_left -= luns_in_chnl;
211         }
212
213         dev_map->num_ch = num_ch;
214
215         tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
216         if (!tgt_dev)
217                 goto err_ch;
218
219         /* Inherit device geometry from parent */
220         memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
221
222         /* Target device only owns a portion of the physical device */
223         tgt_dev->geo.num_ch = num_ch;
224         tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
225         tgt_dev->geo.all_luns = num_lun;
226         tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
227
228         tgt_dev->geo.op = op;
229
230         sec_per_lun = dev->geo.clba * dev->geo.num_chk;
231         tgt_dev->geo.total_secs = num_lun * sec_per_lun;
232
233         tgt_dev->q = dev->q;
234         tgt_dev->map = dev_map;
235         tgt_dev->luns = luns;
236         tgt_dev->parent = dev;
237
238         return tgt_dev;
239 err_ch:
240         while (--i >= 0)
241                 kfree(dev_map->chnls[i].lun_offs);
242         kfree(luns);
243 err_luns:
244         kfree(dev_map->chnls);
245 err_chnls:
246         kfree(dev_map);
247 err_dev:
248         return tgt_dev;
249 }
250
251 static const struct block_device_operations nvm_fops = {
252         .owner          = THIS_MODULE,
253 };
254
255 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
256 {
257         struct nvm_tgt_type *tt;
258
259         list_for_each_entry(tt, &nvm_tgt_types, list)
260                 if (!strcmp(name, tt->name))
261                         return tt;
262
263         return NULL;
264 }
265
266 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
267 {
268         struct nvm_tgt_type *tt;
269
270         down_write(&nvm_tgtt_lock);
271         tt = __nvm_find_target_type(name);
272         up_write(&nvm_tgtt_lock);
273
274         return tt;
275 }
276
277 static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
278                                  int lun_end)
279 {
280         if (lun_begin > lun_end || lun_end >= geo->all_luns) {
281                 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
282                         lun_begin, lun_end, geo->all_luns - 1);
283                 return -EINVAL;
284         }
285
286         return 0;
287 }
288
289 static int __nvm_config_simple(struct nvm_dev *dev,
290                                struct nvm_ioctl_create_simple *s)
291 {
292         struct nvm_geo *geo = &dev->geo;
293
294         if (s->lun_begin == -1 && s->lun_end == -1) {
295                 s->lun_begin = 0;
296                 s->lun_end = geo->all_luns - 1;
297         }
298
299         return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
300 }
301
302 static int __nvm_config_extended(struct nvm_dev *dev,
303                                  struct nvm_ioctl_create_extended *e)
304 {
305         if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
306                 e->lun_begin = 0;
307                 e->lun_end = dev->geo.all_luns - 1;
308         }
309
310         /* op not set falls into target's default */
311         if (e->op == 0xFFFF) {
312                 e->op = NVM_TARGET_DEFAULT_OP;
313         } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
314                 pr_err("nvm: invalid over provisioning value\n");
315                 return -EINVAL;
316         }
317
318         return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
319 }
320
321 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
322 {
323         struct nvm_ioctl_create_extended e;
324         struct request_queue *tqueue;
325         struct gendisk *tdisk;
326         struct nvm_tgt_type *tt;
327         struct nvm_target *t;
328         struct nvm_tgt_dev *tgt_dev;
329         void *targetdata;
330         unsigned int mdts;
331         int ret;
332
333         switch (create->conf.type) {
334         case NVM_CONFIG_TYPE_SIMPLE:
335                 ret = __nvm_config_simple(dev, &create->conf.s);
336                 if (ret)
337                         return ret;
338
339                 e.lun_begin = create->conf.s.lun_begin;
340                 e.lun_end = create->conf.s.lun_end;
341                 e.op = NVM_TARGET_DEFAULT_OP;
342                 break;
343         case NVM_CONFIG_TYPE_EXTENDED:
344                 ret = __nvm_config_extended(dev, &create->conf.e);
345                 if (ret)
346                         return ret;
347
348                 e = create->conf.e;
349                 break;
350         default:
351                 pr_err("nvm: config type not valid\n");
352                 return -EINVAL;
353         }
354
355         tt = nvm_find_target_type(create->tgttype);
356         if (!tt) {
357                 pr_err("nvm: target type %s not found\n", create->tgttype);
358                 return -EINVAL;
359         }
360
361         if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
362                 pr_err("nvm: device is incompatible with target L2P type.\n");
363                 return -EINVAL;
364         }
365
366         if (nvm_target_exists(create->tgtname)) {
367                 pr_err("nvm: target name already exists (%s)\n",
368                                                         create->tgtname);
369                 return -EINVAL;
370         }
371
372         ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
373         if (ret)
374                 return ret;
375
376         t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
377         if (!t) {
378                 ret = -ENOMEM;
379                 goto err_reserve;
380         }
381
382         tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
383         if (!tgt_dev) {
384                 pr_err("nvm: could not create target device\n");
385                 ret = -ENOMEM;
386                 goto err_t;
387         }
388
389         tdisk = alloc_disk(0);
390         if (!tdisk) {
391                 ret = -ENOMEM;
392                 goto err_dev;
393         }
394
395         tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
396         if (!tqueue) {
397                 ret = -ENOMEM;
398                 goto err_disk;
399         }
400         blk_queue_make_request(tqueue, tt->make_rq);
401
402         strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
403         tdisk->flags = GENHD_FL_EXT_DEVT;
404         tdisk->major = 0;
405         tdisk->first_minor = 0;
406         tdisk->fops = &nvm_fops;
407         tdisk->queue = tqueue;
408
409         targetdata = tt->init(tgt_dev, tdisk, create->flags);
410         if (IS_ERR(targetdata)) {
411                 ret = PTR_ERR(targetdata);
412                 goto err_init;
413         }
414
415         tdisk->private_data = targetdata;
416         tqueue->queuedata = targetdata;
417
418         mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
419         if (dev->geo.mdts) {
420                 mdts = min_t(u32, dev->geo.mdts,
421                                 (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
422         }
423         blk_queue_max_hw_sectors(tqueue, mdts);
424
425         set_capacity(tdisk, tt->capacity(targetdata));
426         add_disk(tdisk);
427
428         if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
429                 ret = -ENOMEM;
430                 goto err_sysfs;
431         }
432
433         t->type = tt;
434         t->disk = tdisk;
435         t->dev = tgt_dev;
436
437         mutex_lock(&dev->mlock);
438         list_add_tail(&t->list, &dev->targets);
439         mutex_unlock(&dev->mlock);
440
441         __module_get(tt->owner);
442
443         return 0;
444 err_sysfs:
445         if (tt->exit)
446                 tt->exit(targetdata, true);
447 err_init:
448         blk_cleanup_queue(tqueue);
449         tdisk->queue = NULL;
450 err_disk:
451         put_disk(tdisk);
452 err_dev:
453         nvm_remove_tgt_dev(tgt_dev, 0);
454 err_t:
455         kfree(t);
456 err_reserve:
457         nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
458         return ret;
459 }
460
461 static void __nvm_remove_target(struct nvm_target *t, bool graceful)
462 {
463         struct nvm_tgt_type *tt = t->type;
464         struct gendisk *tdisk = t->disk;
465         struct request_queue *q = tdisk->queue;
466
467         del_gendisk(tdisk);
468         blk_cleanup_queue(q);
469
470         if (tt->sysfs_exit)
471                 tt->sysfs_exit(tdisk);
472
473         if (tt->exit)
474                 tt->exit(tdisk->private_data, graceful);
475
476         nvm_remove_tgt_dev(t->dev, 1);
477         put_disk(tdisk);
478         module_put(t->type->owner);
479
480         list_del(&t->list);
481         kfree(t);
482 }
483
484 /**
485  * nvm_remove_tgt - Removes a target from the media manager
486  * @remove:     ioctl structure with target name to remove.
487  *
488  * Returns:
489  * 0: on success
490  * 1: on not found
491  * <0: on error
492  */
493 static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
494 {
495         struct nvm_target *t;
496         struct nvm_dev *dev;
497
498         down_read(&nvm_lock);
499         list_for_each_entry(dev, &nvm_devices, devices) {
500                 mutex_lock(&dev->mlock);
501                 t = nvm_find_target(dev, remove->tgtname);
502                 if (t) {
503                         mutex_unlock(&dev->mlock);
504                         break;
505                 }
506                 mutex_unlock(&dev->mlock);
507         }
508         up_read(&nvm_lock);
509
510         if (!t)
511                 return 1;
512
513         __nvm_remove_target(t, true);
514         kref_put(&dev->ref, nvm_free);
515
516         return 0;
517 }
518
519 static int nvm_register_map(struct nvm_dev *dev)
520 {
521         struct nvm_dev_map *rmap;
522         int i, j;
523
524         rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
525         if (!rmap)
526                 goto err_rmap;
527
528         rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
529                                                                 GFP_KERNEL);
530         if (!rmap->chnls)
531                 goto err_chnls;
532
533         for (i = 0; i < dev->geo.num_ch; i++) {
534                 struct nvm_ch_map *ch_rmap;
535                 int *lun_roffs;
536                 int luns_in_chnl = dev->geo.num_lun;
537
538                 ch_rmap = &rmap->chnls[i];
539
540                 ch_rmap->ch_off = -1;
541                 ch_rmap->num_lun = luns_in_chnl;
542
543                 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
544                 if (!lun_roffs)
545                         goto err_ch;
546
547                 for (j = 0; j < luns_in_chnl; j++)
548                         lun_roffs[j] = -1;
549
550                 ch_rmap->lun_offs = lun_roffs;
551         }
552
553         dev->rmap = rmap;
554
555         return 0;
556 err_ch:
557         while (--i >= 0)
558                 kfree(rmap->chnls[i].lun_offs);
559 err_chnls:
560         kfree(rmap);
561 err_rmap:
562         return -ENOMEM;
563 }
564
565 static void nvm_unregister_map(struct nvm_dev *dev)
566 {
567         struct nvm_dev_map *rmap = dev->rmap;
568         int i;
569
570         for (i = 0; i < dev->geo.num_ch; i++)
571                 kfree(rmap->chnls[i].lun_offs);
572
573         kfree(rmap->chnls);
574         kfree(rmap);
575 }
576
577 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
578 {
579         struct nvm_dev_map *dev_map = tgt_dev->map;
580         struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
581         int lun_off = ch_map->lun_offs[p->a.lun];
582
583         p->a.ch += ch_map->ch_off;
584         p->a.lun += lun_off;
585 }
586
587 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
588 {
589         struct nvm_dev *dev = tgt_dev->parent;
590         struct nvm_dev_map *dev_rmap = dev->rmap;
591         struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
592         int lun_roff = ch_rmap->lun_offs[p->a.lun];
593
594         p->a.ch -= ch_rmap->ch_off;
595         p->a.lun -= lun_roff;
596 }
597
598 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
599                                 struct ppa_addr *ppa_list, int nr_ppas)
600 {
601         int i;
602
603         for (i = 0; i < nr_ppas; i++) {
604                 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
605                 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
606         }
607 }
608
609 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
610                                 struct ppa_addr *ppa_list, int nr_ppas)
611 {
612         int i;
613
614         for (i = 0; i < nr_ppas; i++) {
615                 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
616                 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
617         }
618 }
619
620 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
621 {
622         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
623
624         nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
625 }
626
627 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
628 {
629         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
630
631         nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
632 }
633
634 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
635 {
636         int ret = 0;
637
638         down_write(&nvm_tgtt_lock);
639         if (__nvm_find_target_type(tt->name))
640                 ret = -EEXIST;
641         else
642                 list_add(&tt->list, &nvm_tgt_types);
643         up_write(&nvm_tgtt_lock);
644
645         return ret;
646 }
647 EXPORT_SYMBOL(nvm_register_tgt_type);
648
649 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
650 {
651         if (!tt)
652                 return;
653
654         down_write(&nvm_tgtt_lock);
655         list_del(&tt->list);
656         up_write(&nvm_tgtt_lock);
657 }
658 EXPORT_SYMBOL(nvm_unregister_tgt_type);
659
660 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
661                                                         dma_addr_t *dma_handler)
662 {
663         return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
664                                                                 dma_handler);
665 }
666 EXPORT_SYMBOL(nvm_dev_dma_alloc);
667
668 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
669 {
670         dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
671 }
672 EXPORT_SYMBOL(nvm_dev_dma_free);
673
674 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
675 {
676         struct nvm_dev *dev;
677
678         list_for_each_entry(dev, &nvm_devices, devices)
679                 if (!strcmp(name, dev->name))
680                         return dev;
681
682         return NULL;
683 }
684
685 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
686                         const struct ppa_addr *ppas, int nr_ppas)
687 {
688         struct nvm_dev *dev = tgt_dev->parent;
689         struct nvm_geo *geo = &tgt_dev->geo;
690         int i, plane_cnt, pl_idx;
691         struct ppa_addr ppa;
692
693         if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
694                 rqd->nr_ppas = nr_ppas;
695                 rqd->ppa_addr = ppas[0];
696
697                 return 0;
698         }
699
700         rqd->nr_ppas = nr_ppas;
701         rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
702         if (!rqd->ppa_list) {
703                 pr_err("nvm: failed to allocate dma memory\n");
704                 return -ENOMEM;
705         }
706
707         plane_cnt = geo->pln_mode;
708         rqd->nr_ppas *= plane_cnt;
709
710         for (i = 0; i < nr_ppas; i++) {
711                 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
712                         ppa = ppas[i];
713                         ppa.g.pl = pl_idx;
714                         rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
715                 }
716         }
717
718         return 0;
719 }
720
721 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
722                         struct nvm_rq *rqd)
723 {
724         if (!rqd->ppa_list)
725                 return;
726
727         nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
728 }
729
730 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
731 {
732         int flags = 0;
733
734         if (geo->version == NVM_OCSSD_SPEC_20)
735                 return 0;
736
737         if (rqd->is_seq)
738                 flags |= geo->pln_mode >> 1;
739
740         if (rqd->opcode == NVM_OP_PREAD)
741                 flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
742         else if (rqd->opcode == NVM_OP_PWRITE)
743                 flags |= NVM_IO_SCRAMBLE_ENABLE;
744
745         return flags;
746 }
747
748 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
749 {
750         struct nvm_dev *dev = tgt_dev->parent;
751         int ret;
752
753         if (!dev->ops->submit_io)
754                 return -ENODEV;
755
756         nvm_rq_tgt_to_dev(tgt_dev, rqd);
757
758         rqd->dev = tgt_dev;
759         rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
760
761         /* In case of error, fail with right address format */
762         ret = dev->ops->submit_io(dev, rqd);
763         if (ret)
764                 nvm_rq_dev_to_tgt(tgt_dev, rqd);
765         return ret;
766 }
767 EXPORT_SYMBOL(nvm_submit_io);
768
769 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
770 {
771         struct nvm_dev *dev = tgt_dev->parent;
772         int ret;
773
774         if (!dev->ops->submit_io_sync)
775                 return -ENODEV;
776
777         nvm_rq_tgt_to_dev(tgt_dev, rqd);
778
779         rqd->dev = tgt_dev;
780         rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
781
782         /* In case of error, fail with right address format */
783         ret = dev->ops->submit_io_sync(dev, rqd);
784         nvm_rq_dev_to_tgt(tgt_dev, rqd);
785
786         return ret;
787 }
788 EXPORT_SYMBOL(nvm_submit_io_sync);
789
790 void nvm_end_io(struct nvm_rq *rqd)
791 {
792         struct nvm_tgt_dev *tgt_dev = rqd->dev;
793
794         /* Convert address space */
795         if (tgt_dev)
796                 nvm_rq_dev_to_tgt(tgt_dev, rqd);
797
798         if (rqd->end_io)
799                 rqd->end_io(rqd);
800 }
801 EXPORT_SYMBOL(nvm_end_io);
802
803 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
804 {
805         if (!dev->ops->submit_io_sync)
806                 return -ENODEV;
807
808         rqd->flags = nvm_set_flags(&dev->geo, rqd);
809
810         return dev->ops->submit_io_sync(dev, rqd);
811 }
812
813 static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
814 {
815         struct nvm_rq rqd = { NULL };
816         struct bio bio;
817         struct bio_vec bio_vec;
818         struct page *page;
819         int ret;
820
821         page = alloc_page(GFP_KERNEL);
822         if (!page)
823                 return -ENOMEM;
824
825         bio_init(&bio, &bio_vec, 1);
826         bio_add_page(&bio, page, PAGE_SIZE, 0);
827         bio_set_op_attrs(&bio, REQ_OP_READ, 0);
828
829         rqd.bio = &bio;
830         rqd.opcode = NVM_OP_PREAD;
831         rqd.is_seq = 1;
832         rqd.nr_ppas = 1;
833         rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
834
835         ret = nvm_submit_io_sync_raw(dev, &rqd);
836         if (ret)
837                 return ret;
838
839         __free_page(page);
840
841         return rqd.error;
842 }
843
844 /*
845  * Scans a 1.2 chunk first and last page to determine if its state.
846  * If the chunk is found to be open, also scan it to update the write
847  * pointer.
848  */
849 static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
850                              struct nvm_chk_meta *meta)
851 {
852         struct nvm_geo *geo = &dev->geo;
853         int ret, pg, pl;
854
855         /* sense first page */
856         ret = nvm_bb_chunk_sense(dev, ppa);
857         if (ret < 0) /* io error */
858                 return ret;
859         else if (ret == 0) /* valid data */
860                 meta->state = NVM_CHK_ST_OPEN;
861         else if (ret > 0) {
862                 /*
863                  * If empty page, the chunk is free, else it is an
864                  * actual io error. In that case, mark it offline.
865                  */
866                 switch (ret) {
867                 case NVM_RSP_ERR_EMPTYPAGE:
868                         meta->state = NVM_CHK_ST_FREE;
869                         return 0;
870                 case NVM_RSP_ERR_FAILCRC:
871                 case NVM_RSP_ERR_FAILECC:
872                 case NVM_RSP_WARN_HIGHECC:
873                         meta->state = NVM_CHK_ST_OPEN;
874                         goto scan;
875                 default:
876                         return -ret; /* other io error */
877                 }
878         }
879
880         /* sense last page */
881         ppa.g.pg = geo->num_pg - 1;
882         ppa.g.pl = geo->num_pln - 1;
883
884         ret = nvm_bb_chunk_sense(dev, ppa);
885         if (ret < 0) /* io error */
886                 return ret;
887         else if (ret == 0) { /* Chunk fully written */
888                 meta->state = NVM_CHK_ST_CLOSED;
889                 meta->wp = geo->clba;
890                 return 0;
891         } else if (ret > 0) {
892                 switch (ret) {
893                 case NVM_RSP_ERR_EMPTYPAGE:
894                 case NVM_RSP_ERR_FAILCRC:
895                 case NVM_RSP_ERR_FAILECC:
896                 case NVM_RSP_WARN_HIGHECC:
897                         meta->state = NVM_CHK_ST_OPEN;
898                         break;
899                 default:
900                         return -ret; /* other io error */
901                 }
902         }
903
904 scan:
905         /*
906          * chunk is open, we scan sequentially to update the write pointer.
907          * We make the assumption that targets write data across all planes
908          * before moving to the next page.
909          */
910         for (pg = 0; pg < geo->num_pg; pg++) {
911                 for (pl = 0; pl < geo->num_pln; pl++) {
912                         ppa.g.pg = pg;
913                         ppa.g.pl = pl;
914
915                         ret = nvm_bb_chunk_sense(dev, ppa);
916                         if (ret < 0) /* io error */
917                                 return ret;
918                         else if (ret == 0) {
919                                 meta->wp += geo->ws_min;
920                         } else if (ret > 0) {
921                                 switch (ret) {
922                                 case NVM_RSP_ERR_EMPTYPAGE:
923                                         return 0;
924                                 case NVM_RSP_ERR_FAILCRC:
925                                 case NVM_RSP_ERR_FAILECC:
926                                 case NVM_RSP_WARN_HIGHECC:
927                                         meta->wp += geo->ws_min;
928                                         break;
929                                 default:
930                                         return -ret; /* other io error */
931                                 }
932                         }
933                 }
934         }
935
936         return 0;
937 }
938
939 /*
940  * folds a bad block list from its plane representation to its
941  * chunk representation.
942  *
943  * If any of the planes status are bad or grown bad, the chunk is marked
944  * offline. If not bad, the first plane state acts as the chunk state.
945  */
946 static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
947                            u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
948 {
949         struct nvm_geo *geo = &dev->geo;
950         int ret, blk, pl, offset, blktype;
951
952         for (blk = 0; blk < geo->num_chk; blk++) {
953                 offset = blk * geo->pln_mode;
954                 blktype = blks[offset];
955
956                 for (pl = 0; pl < geo->pln_mode; pl++) {
957                         if (blks[offset + pl] &
958                                         (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
959                                 blktype = blks[offset + pl];
960                                 break;
961                         }
962                 }
963
964                 ppa.g.blk = blk;
965
966                 meta->wp = 0;
967                 meta->type = NVM_CHK_TP_W_SEQ;
968                 meta->wi = 0;
969                 meta->slba = generic_to_dev_addr(dev, ppa).ppa;
970                 meta->cnlb = dev->geo.clba;
971
972                 if (blktype == NVM_BLK_T_FREE) {
973                         ret = nvm_bb_chunk_scan(dev, ppa, meta);
974                         if (ret)
975                                 return ret;
976                 } else {
977                         meta->state = NVM_CHK_ST_OFFLINE;
978                 }
979
980                 meta++;
981         }
982
983         return 0;
984 }
985
986 static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
987                            int nchks, struct nvm_chk_meta *meta)
988 {
989         struct nvm_geo *geo = &dev->geo;
990         struct ppa_addr ppa;
991         u8 *blks;
992         int ch, lun, nr_blks;
993         int ret = 0;
994
995         ppa.ppa = slba;
996         ppa = dev_to_generic_addr(dev, ppa);
997
998         if (ppa.g.blk != 0)
999                 return -EINVAL;
1000
1001         if ((nchks % geo->num_chk) != 0)
1002                 return -EINVAL;
1003
1004         nr_blks = geo->num_chk * geo->pln_mode;
1005
1006         blks = kmalloc(nr_blks, GFP_KERNEL);
1007         if (!blks)
1008                 return -ENOMEM;
1009
1010         for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1011                 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1012                         struct ppa_addr ppa_gen, ppa_dev;
1013
1014                         if (!nchks)
1015                                 goto done;
1016
1017                         ppa_gen.ppa = 0;
1018                         ppa_gen.g.ch = ch;
1019                         ppa_gen.g.lun = lun;
1020                         ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1021
1022                         ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1023                         if (ret)
1024                                 goto done;
1025
1026                         ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1027                                                                         meta);
1028                         if (ret)
1029                                 goto done;
1030
1031                         meta += geo->num_chk;
1032                         nchks -= geo->num_chk;
1033                 }
1034         }
1035 done:
1036         kfree(blks);
1037         return ret;
1038 }
1039
1040 int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1041                        int nchks, struct nvm_chk_meta *meta)
1042 {
1043         struct nvm_dev *dev = tgt_dev->parent;
1044
1045         nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1046
1047         if (dev->geo.version == NVM_OCSSD_SPEC_12)
1048                 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1049
1050         return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1051 }
1052 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1053
1054 int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1055                        int nr_ppas, int type)
1056 {
1057         struct nvm_dev *dev = tgt_dev->parent;
1058         struct nvm_rq rqd;
1059         int ret;
1060
1061         if (dev->geo.version == NVM_OCSSD_SPEC_20)
1062                 return 0;
1063
1064         if (nr_ppas > NVM_MAX_VLBA) {
1065                 pr_err("nvm: unable to update all blocks atomically\n");
1066                 return -EINVAL;
1067         }
1068
1069         memset(&rqd, 0, sizeof(struct nvm_rq));
1070
1071         nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1072         nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1073
1074         ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1075         nvm_free_rqd_ppalist(tgt_dev, &rqd);
1076         if (ret)
1077                 return -EINVAL;
1078
1079         return 0;
1080 }
1081 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1082
1083 static int nvm_core_init(struct nvm_dev *dev)
1084 {
1085         struct nvm_geo *geo = &dev->geo;
1086         int ret;
1087
1088         dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1089                                         sizeof(unsigned long), GFP_KERNEL);
1090         if (!dev->lun_map)
1091                 return -ENOMEM;
1092
1093         INIT_LIST_HEAD(&dev->area_list);
1094         INIT_LIST_HEAD(&dev->targets);
1095         mutex_init(&dev->mlock);
1096         spin_lock_init(&dev->lock);
1097
1098         ret = nvm_register_map(dev);
1099         if (ret)
1100                 goto err_fmtype;
1101
1102         return 0;
1103 err_fmtype:
1104         kfree(dev->lun_map);
1105         return ret;
1106 }
1107
1108 static void nvm_free(struct kref *ref)
1109 {
1110         struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1111
1112         if (dev->dma_pool)
1113                 dev->ops->destroy_dma_pool(dev->dma_pool);
1114
1115         if (dev->rmap)
1116                 nvm_unregister_map(dev);
1117
1118         kfree(dev->lun_map);
1119         kfree(dev);
1120 }
1121
1122 static int nvm_init(struct nvm_dev *dev)
1123 {
1124         struct nvm_geo *geo = &dev->geo;
1125         int ret = -EINVAL;
1126
1127         if (dev->ops->identity(dev)) {
1128                 pr_err("nvm: device could not be identified\n");
1129                 goto err;
1130         }
1131
1132         pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
1133                                 geo->major_ver_id, geo->minor_ver_id,
1134                                 geo->vmnt);
1135
1136         ret = nvm_core_init(dev);
1137         if (ret) {
1138                 pr_err("nvm: could not initialize core structures.\n");
1139                 goto err;
1140         }
1141
1142         pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
1143                         dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1144                         dev->geo.num_chk, dev->geo.all_luns,
1145                         dev->geo.num_ch);
1146         return 0;
1147 err:
1148         pr_err("nvm: failed to initialize nvm\n");
1149         return ret;
1150 }
1151
1152 struct nvm_dev *nvm_alloc_dev(int node)
1153 {
1154         struct nvm_dev *dev;
1155
1156         dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1157         if (dev)
1158                 kref_init(&dev->ref);
1159
1160         return dev;
1161 }
1162 EXPORT_SYMBOL(nvm_alloc_dev);
1163
1164 int nvm_register(struct nvm_dev *dev)
1165 {
1166         int ret, exp_pool_size;
1167
1168         if (!dev->q || !dev->ops) {
1169                 kref_put(&dev->ref, nvm_free);
1170                 return -EINVAL;
1171         }
1172
1173         ret = nvm_init(dev);
1174         if (ret) {
1175                 kref_put(&dev->ref, nvm_free);
1176                 return ret;
1177         }
1178
1179         exp_pool_size = max_t(int, PAGE_SIZE,
1180                               (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1181         exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1182
1183         dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1184                                                   exp_pool_size);
1185         if (!dev->dma_pool) {
1186                 pr_err("nvm: could not create dma pool\n");
1187                 kref_put(&dev->ref, nvm_free);
1188                 return -ENOMEM;
1189         }
1190
1191         /* register device with a supported media manager */
1192         down_write(&nvm_lock);
1193         list_add(&dev->devices, &nvm_devices);
1194         up_write(&nvm_lock);
1195
1196         return 0;
1197 }
1198 EXPORT_SYMBOL(nvm_register);
1199
1200 void nvm_unregister(struct nvm_dev *dev)
1201 {
1202         struct nvm_target *t, *tmp;
1203
1204         mutex_lock(&dev->mlock);
1205         list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1206                 if (t->dev->parent != dev)
1207                         continue;
1208                 __nvm_remove_target(t, false);
1209                 kref_put(&dev->ref, nvm_free);
1210         }
1211         mutex_unlock(&dev->mlock);
1212
1213         down_write(&nvm_lock);
1214         list_del(&dev->devices);
1215         up_write(&nvm_lock);
1216
1217         kref_put(&dev->ref, nvm_free);
1218 }
1219 EXPORT_SYMBOL(nvm_unregister);
1220
1221 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1222 {
1223         struct nvm_dev *dev;
1224         int ret;
1225
1226         down_write(&nvm_lock);
1227         dev = nvm_find_nvm_dev(create->dev);
1228         up_write(&nvm_lock);
1229
1230         if (!dev) {
1231                 pr_err("nvm: device not found\n");
1232                 return -EINVAL;
1233         }
1234
1235         kref_get(&dev->ref);
1236         ret = nvm_create_tgt(dev, create);
1237         if (ret)
1238                 kref_put(&dev->ref, nvm_free);
1239
1240         return ret;
1241 }
1242
1243 static long nvm_ioctl_info(struct file *file, void __user *arg)
1244 {
1245         struct nvm_ioctl_info *info;
1246         struct nvm_tgt_type *tt;
1247         int tgt_iter = 0;
1248
1249         info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1250         if (IS_ERR(info))
1251                 return -EFAULT;
1252
1253         info->version[0] = NVM_VERSION_MAJOR;
1254         info->version[1] = NVM_VERSION_MINOR;
1255         info->version[2] = NVM_VERSION_PATCH;
1256
1257         down_write(&nvm_tgtt_lock);
1258         list_for_each_entry(tt, &nvm_tgt_types, list) {
1259                 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1260
1261                 tgt->version[0] = tt->version[0];
1262                 tgt->version[1] = tt->version[1];
1263                 tgt->version[2] = tt->version[2];
1264                 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1265
1266                 tgt_iter++;
1267         }
1268
1269         info->tgtsize = tgt_iter;
1270         up_write(&nvm_tgtt_lock);
1271
1272         if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1273                 kfree(info);
1274                 return -EFAULT;
1275         }
1276
1277         kfree(info);
1278         return 0;
1279 }
1280
1281 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1282 {
1283         struct nvm_ioctl_get_devices *devices;
1284         struct nvm_dev *dev;
1285         int i = 0;
1286
1287         devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1288         if (!devices)
1289                 return -ENOMEM;
1290
1291         down_write(&nvm_lock);
1292         list_for_each_entry(dev, &nvm_devices, devices) {
1293                 struct nvm_ioctl_device_info *info = &devices->info[i];
1294
1295                 strlcpy(info->devname, dev->name, sizeof(info->devname));
1296
1297                 /* kept for compatibility */
1298                 info->bmversion[0] = 1;
1299                 info->bmversion[1] = 0;
1300                 info->bmversion[2] = 0;
1301                 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1302                 i++;
1303
1304                 if (i > 31) {
1305                         pr_err("nvm: max 31 devices can be reported.\n");
1306                         break;
1307                 }
1308         }
1309         up_write(&nvm_lock);
1310
1311         devices->nr_devices = i;
1312
1313         if (copy_to_user(arg, devices,
1314                          sizeof(struct nvm_ioctl_get_devices))) {
1315                 kfree(devices);
1316                 return -EFAULT;
1317         }
1318
1319         kfree(devices);
1320         return 0;
1321 }
1322
1323 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1324 {
1325         struct nvm_ioctl_create create;
1326
1327         if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1328                 return -EFAULT;
1329
1330         if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1331             create.conf.e.rsv != 0) {
1332                 pr_err("nvm: reserved config field in use\n");
1333                 return -EINVAL;
1334         }
1335
1336         create.dev[DISK_NAME_LEN - 1] = '\0';
1337         create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1338         create.tgtname[DISK_NAME_LEN - 1] = '\0';
1339
1340         if (create.flags != 0) {
1341                 __u32 flags = create.flags;
1342
1343                 /* Check for valid flags */
1344                 if (flags & NVM_TARGET_FACTORY)
1345                         flags &= ~NVM_TARGET_FACTORY;
1346
1347                 if (flags) {
1348                         pr_err("nvm: flag not supported\n");
1349                         return -EINVAL;
1350                 }
1351         }
1352
1353         return __nvm_configure_create(&create);
1354 }
1355
1356 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1357 {
1358         struct nvm_ioctl_remove remove;
1359
1360         if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1361                 return -EFAULT;
1362
1363         remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1364
1365         if (remove.flags != 0) {
1366                 pr_err("nvm: no flags supported\n");
1367                 return -EINVAL;
1368         }
1369
1370         return nvm_remove_tgt(&remove);
1371 }
1372
1373 /* kept for compatibility reasons */
1374 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1375 {
1376         struct nvm_ioctl_dev_init init;
1377
1378         if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1379                 return -EFAULT;
1380
1381         if (init.flags != 0) {
1382                 pr_err("nvm: no flags supported\n");
1383                 return -EINVAL;
1384         }
1385
1386         return 0;
1387 }
1388
1389 /* Kept for compatibility reasons */
1390 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1391 {
1392         struct nvm_ioctl_dev_factory fact;
1393
1394         if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1395                 return -EFAULT;
1396
1397         fact.dev[DISK_NAME_LEN - 1] = '\0';
1398
1399         if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1400                 return -EINVAL;
1401
1402         return 0;
1403 }
1404
1405 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1406 {
1407         void __user *argp = (void __user *)arg;
1408
1409         if (!capable(CAP_SYS_ADMIN))
1410                 return -EPERM;
1411
1412         switch (cmd) {
1413         case NVM_INFO:
1414                 return nvm_ioctl_info(file, argp);
1415         case NVM_GET_DEVICES:
1416                 return nvm_ioctl_get_devices(file, argp);
1417         case NVM_DEV_CREATE:
1418                 return nvm_ioctl_dev_create(file, argp);
1419         case NVM_DEV_REMOVE:
1420                 return nvm_ioctl_dev_remove(file, argp);
1421         case NVM_DEV_INIT:
1422                 return nvm_ioctl_dev_init(file, argp);
1423         case NVM_DEV_FACTORY:
1424                 return nvm_ioctl_dev_factory(file, argp);
1425         }
1426         return 0;
1427 }
1428
1429 static const struct file_operations _ctl_fops = {
1430         .open = nonseekable_open,
1431         .unlocked_ioctl = nvm_ctl_ioctl,
1432         .owner = THIS_MODULE,
1433         .llseek  = noop_llseek,
1434 };
1435
1436 static struct miscdevice _nvm_misc = {
1437         .minor          = MISC_DYNAMIC_MINOR,
1438         .name           = "lightnvm",
1439         .nodename       = "lightnvm/control",
1440         .fops           = &_ctl_fops,
1441 };
1442 builtin_misc_device(_nvm_misc);