lightnvm: flatten nvm_id_group into nvm_id
[linux-2.6-microblaze.git] / drivers / nvme / host / lightnvm.c
1 /*
2  * nvme-lightnvm.c - LightNVM NVMe device
3  *
4  * Copyright (C) 2014-2015 IT University of Copenhagen
5  * Initial release: Matias Bjorling <mb@lightnvm.io>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; see the file COPYING.  If not, write to
18  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19  * USA.
20  *
21  */
22
23 #include "nvme.h"
24
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
31
32 enum nvme_nvm_admin_opcode {
33         nvme_nvm_admin_identity         = 0xe2,
34         nvme_nvm_admin_get_bb_tbl       = 0xf2,
35         nvme_nvm_admin_set_bb_tbl       = 0xf1,
36 };
37
38 struct nvme_nvm_ph_rw {
39         __u8                    opcode;
40         __u8                    flags;
41         __u16                   command_id;
42         __le32                  nsid;
43         __u64                   rsvd2;
44         __le64                  metadata;
45         __le64                  prp1;
46         __le64                  prp2;
47         __le64                  spba;
48         __le16                  length;
49         __le16                  control;
50         __le32                  dsmgmt;
51         __le64                  resv;
52 };
53
54 struct nvme_nvm_erase_blk {
55         __u8                    opcode;
56         __u8                    flags;
57         __u16                   command_id;
58         __le32                  nsid;
59         __u64                   rsvd[2];
60         __le64                  prp1;
61         __le64                  prp2;
62         __le64                  spba;
63         __le16                  length;
64         __le16                  control;
65         __le32                  dsmgmt;
66         __le64                  resv;
67 };
68
69 struct nvme_nvm_identity {
70         __u8                    opcode;
71         __u8                    flags;
72         __u16                   command_id;
73         __le32                  nsid;
74         __u64                   rsvd[2];
75         __le64                  prp1;
76         __le64                  prp2;
77         __u32                   rsvd11[6];
78 };
79
80 struct nvme_nvm_getbbtbl {
81         __u8                    opcode;
82         __u8                    flags;
83         __u16                   command_id;
84         __le32                  nsid;
85         __u64                   rsvd[2];
86         __le64                  prp1;
87         __le64                  prp2;
88         __le64                  spba;
89         __u32                   rsvd4[4];
90 };
91
92 struct nvme_nvm_setbbtbl {
93         __u8                    opcode;
94         __u8                    flags;
95         __u16                   command_id;
96         __le32                  nsid;
97         __le64                  rsvd[2];
98         __le64                  prp1;
99         __le64                  prp2;
100         __le64                  spba;
101         __le16                  nlb;
102         __u8                    value;
103         __u8                    rsvd3;
104         __u32                   rsvd4[3];
105 };
106
107 struct nvme_nvm_command {
108         union {
109                 struct nvme_common_command common;
110                 struct nvme_nvm_ph_rw ph_rw;
111                 struct nvme_nvm_erase_blk erase;
112                 struct nvme_nvm_identity identity;
113                 struct nvme_nvm_getbbtbl get_bb;
114                 struct nvme_nvm_setbbtbl set_bb;
115         };
116 };
117
118 struct nvme_nvm_id12_grp {
119         __u8                    mtype;
120         __u8                    fmtype;
121         __le16                  res16;
122         __u8                    num_ch;
123         __u8                    num_lun;
124         __u8                    num_pln;
125         __u8                    rsvd1;
126         __le16                  num_chk;
127         __le16                  num_pg;
128         __le16                  fpg_sz;
129         __le16                  csecs;
130         __le16                  sos;
131         __le16                  rsvd2;
132         __le32                  trdt;
133         __le32                  trdm;
134         __le32                  tprt;
135         __le32                  tprm;
136         __le32                  tbet;
137         __le32                  tbem;
138         __le32                  mpos;
139         __le32                  mccap;
140         __le16                  cpar;
141         __u8                    reserved[906];
142 } __packed;
143
144 struct nvme_nvm_id12_addrf {
145         __u8                    ch_offset;
146         __u8                    ch_len;
147         __u8                    lun_offset;
148         __u8                    lun_len;
149         __u8                    pln_offset;
150         __u8                    pln_len;
151         __u8                    blk_offset;
152         __u8                    blk_len;
153         __u8                    pg_offset;
154         __u8                    pg_len;
155         __u8                    sect_offset;
156         __u8                    sect_len;
157         __u8                    res[4];
158 } __packed;
159
160 struct nvme_nvm_id12 {
161         __u8                    ver_id;
162         __u8                    vmnt;
163         __u8                    cgrps;
164         __u8                    res;
165         __le32                  cap;
166         __le32                  dom;
167         struct nvme_nvm_id12_addrf ppaf;
168         __u8                    resv[228];
169         struct nvme_nvm_id12_grp grp;
170         __u8                    resv2[2880];
171 } __packed;
172
173 struct nvme_nvm_bb_tbl {
174         __u8    tblid[4];
175         __le16  verid;
176         __le16  revid;
177         __le32  rvsd1;
178         __le32  tblks;
179         __le32  tfact;
180         __le32  tgrown;
181         __le32  tdresv;
182         __le32  thresv;
183         __le32  rsvd2[8];
184         __u8    blk[0];
185 };
186
187 /*
188  * Check we didn't inadvertently grow the command struct
189  */
190 static inline void _nvme_nvm_check_size(void)
191 {
192         BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
193         BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
194         BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
195         BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
196         BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
197         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
198         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
199         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
200         BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
201 }
202
203 static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
204 {
205         struct nvme_nvm_id12_grp *src;
206         int sec_per_pg, sec_per_pl, pg_per_blk;
207
208         if (id12->cgrps != 1)
209                 return -EINVAL;
210
211         src = &id12->grp;
212
213         nvm_id->mtype = src->mtype;
214         nvm_id->fmtype = src->fmtype;
215
216         nvm_id->num_ch = src->num_ch;
217         nvm_id->num_lun = src->num_lun;
218
219         nvm_id->num_chk = le16_to_cpu(src->num_chk);
220         nvm_id->csecs = le16_to_cpu(src->csecs);
221         nvm_id->sos = le16_to_cpu(src->sos);
222
223         pg_per_blk = le16_to_cpu(src->num_pg);
224         sec_per_pg = le16_to_cpu(src->fpg_sz) / nvm_id->csecs;
225         sec_per_pl = sec_per_pg * src->num_pln;
226         nvm_id->clba = sec_per_pl * pg_per_blk;
227         nvm_id->ws_per_chk = pg_per_blk;
228
229         nvm_id->mpos = le32_to_cpu(src->mpos);
230         nvm_id->cpar = le16_to_cpu(src->cpar);
231         nvm_id->mccap = le32_to_cpu(src->mccap);
232
233         nvm_id->ws_opt = nvm_id->ws_min = sec_per_pg;
234         nvm_id->ws_seq = NVM_IO_SNGL_ACCESS;
235
236         if (nvm_id->mpos & 0x020202) {
237                 nvm_id->ws_seq = NVM_IO_DUAL_ACCESS;
238                 nvm_id->ws_opt <<= 1;
239         } else if (nvm_id->mpos & 0x040404) {
240                 nvm_id->ws_seq = NVM_IO_QUAD_ACCESS;
241                 nvm_id->ws_opt <<= 2;
242         }
243
244         nvm_id->trdt = le32_to_cpu(src->trdt);
245         nvm_id->trdm = le32_to_cpu(src->trdm);
246         nvm_id->tprt = le32_to_cpu(src->tprt);
247         nvm_id->tprm = le32_to_cpu(src->tprm);
248         nvm_id->tbet = le32_to_cpu(src->tbet);
249         nvm_id->tbem = le32_to_cpu(src->tbem);
250
251         /* 1.2 compatibility */
252         nvm_id->num_pln = src->num_pln;
253         nvm_id->num_pg = le16_to_cpu(src->num_pg);
254         nvm_id->fpg_sz = le16_to_cpu(src->fpg_sz);
255
256         return 0;
257 }
258
259 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
260 {
261         struct nvme_ns *ns = nvmdev->q->queuedata;
262         struct nvme_nvm_id12 *id;
263         struct nvme_nvm_command c = {};
264         int ret;
265
266         c.identity.opcode = nvme_nvm_admin_identity;
267         c.identity.nsid = cpu_to_le32(ns->head->ns_id);
268
269         id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
270         if (!id)
271                 return -ENOMEM;
272
273         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
274                                 id, sizeof(struct nvme_nvm_id12));
275         if (ret) {
276                 ret = -EIO;
277                 goto out;
278         }
279
280         nvm_id->ver_id = id->ver_id;
281         nvm_id->vmnt = id->vmnt;
282         nvm_id->cap = le32_to_cpu(id->cap);
283         nvm_id->dom = le32_to_cpu(id->dom);
284         memcpy(&nvm_id->ppaf, &id->ppaf,
285                                         sizeof(struct nvm_addr_format));
286
287         ret = init_grp(nvm_id, id);
288 out:
289         kfree(id);
290         return ret;
291 }
292
293 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
294                                                                 u8 *blks)
295 {
296         struct request_queue *q = nvmdev->q;
297         struct nvm_geo *geo = &nvmdev->geo;
298         struct nvme_ns *ns = q->queuedata;
299         struct nvme_ctrl *ctrl = ns->ctrl;
300         struct nvme_nvm_command c = {};
301         struct nvme_nvm_bb_tbl *bb_tbl;
302         int nr_blks = geo->nr_chks * geo->plane_mode;
303         int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
304         int ret = 0;
305
306         c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
307         c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
308         c.get_bb.spba = cpu_to_le64(ppa.ppa);
309
310         bb_tbl = kzalloc(tblsz, GFP_KERNEL);
311         if (!bb_tbl)
312                 return -ENOMEM;
313
314         ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
315                                                                 bb_tbl, tblsz);
316         if (ret) {
317                 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
318                 ret = -EIO;
319                 goto out;
320         }
321
322         if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
323                 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
324                 dev_err(ctrl->device, "bbt format mismatch\n");
325                 ret = -EINVAL;
326                 goto out;
327         }
328
329         if (le16_to_cpu(bb_tbl->verid) != 1) {
330                 ret = -EINVAL;
331                 dev_err(ctrl->device, "bbt version not supported\n");
332                 goto out;
333         }
334
335         if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
336                 ret = -EINVAL;
337                 dev_err(ctrl->device,
338                                 "bbt unsuspected blocks returned (%u!=%u)",
339                                 le32_to_cpu(bb_tbl->tblks), nr_blks);
340                 goto out;
341         }
342
343         memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
344 out:
345         kfree(bb_tbl);
346         return ret;
347 }
348
349 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
350                                                         int nr_ppas, int type)
351 {
352         struct nvme_ns *ns = nvmdev->q->queuedata;
353         struct nvme_nvm_command c = {};
354         int ret = 0;
355
356         c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
357         c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
358         c.set_bb.spba = cpu_to_le64(ppas->ppa);
359         c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
360         c.set_bb.value = type;
361
362         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
363                                                                 NULL, 0);
364         if (ret)
365                 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
366                                                                         ret);
367         return ret;
368 }
369
370 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
371                                     struct nvme_nvm_command *c)
372 {
373         c->ph_rw.opcode = rqd->opcode;
374         c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
375         c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
376         c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
377         c->ph_rw.control = cpu_to_le16(rqd->flags);
378         c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
379 }
380
381 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
382 {
383         struct nvm_rq *rqd = rq->end_io_data;
384
385         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
386         rqd->error = nvme_req(rq)->status;
387         nvm_end_io(rqd);
388
389         kfree(nvme_req(rq)->cmd);
390         blk_mq_free_request(rq);
391 }
392
393 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
394                                               struct nvm_rq *rqd,
395                                               struct nvme_nvm_command *cmd)
396 {
397         struct nvme_ns *ns = q->queuedata;
398         struct request *rq;
399
400         nvme_nvm_rqtocmd(rqd, ns, cmd);
401
402         rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
403         if (IS_ERR(rq))
404                 return rq;
405
406         rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
407
408         if (rqd->bio) {
409                 blk_init_request_from_bio(rq, rqd->bio);
410         } else {
411                 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
412                 rq->__data_len = 0;
413         }
414
415         return rq;
416 }
417
418 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
419 {
420         struct request_queue *q = dev->q;
421         struct nvme_nvm_command *cmd;
422         struct request *rq;
423
424         cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
425         if (!cmd)
426                 return -ENOMEM;
427
428         rq = nvme_nvm_alloc_request(q, rqd, cmd);
429         if (IS_ERR(rq)) {
430                 kfree(cmd);
431                 return PTR_ERR(rq);
432         }
433
434         rq->end_io_data = rqd;
435
436         blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
437
438         return 0;
439 }
440
441 static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
442 {
443         struct request_queue *q = dev->q;
444         struct request *rq;
445         struct nvme_nvm_command cmd;
446         int ret = 0;
447
448         memset(&cmd, 0, sizeof(struct nvme_nvm_command));
449
450         rq = nvme_nvm_alloc_request(q, rqd, &cmd);
451         if (IS_ERR(rq))
452                 return PTR_ERR(rq);
453
454         /* I/Os can fail and the error is signaled through rqd. Callers must
455          * handle the error accordingly.
456          */
457         blk_execute_rq(q, NULL, rq, 0);
458         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
459                 ret = -EINTR;
460
461         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
462         rqd->error = nvme_req(rq)->status;
463
464         blk_mq_free_request(rq);
465
466         return ret;
467 }
468
469 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
470 {
471         struct nvme_ns *ns = nvmdev->q->queuedata;
472
473         return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
474 }
475
476 static void nvme_nvm_destroy_dma_pool(void *pool)
477 {
478         struct dma_pool *dma_pool = pool;
479
480         dma_pool_destroy(dma_pool);
481 }
482
483 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
484                                     gfp_t mem_flags, dma_addr_t *dma_handler)
485 {
486         return dma_pool_alloc(pool, mem_flags, dma_handler);
487 }
488
489 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
490                                                         dma_addr_t dma_handler)
491 {
492         dma_pool_free(pool, addr, dma_handler);
493 }
494
495 static struct nvm_dev_ops nvme_nvm_dev_ops = {
496         .identity               = nvme_nvm_identity,
497
498         .get_bb_tbl             = nvme_nvm_get_bb_tbl,
499         .set_bb_tbl             = nvme_nvm_set_bb_tbl,
500
501         .submit_io              = nvme_nvm_submit_io,
502         .submit_io_sync         = nvme_nvm_submit_io_sync,
503
504         .create_dma_pool        = nvme_nvm_create_dma_pool,
505         .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
506         .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
507         .dev_dma_free           = nvme_nvm_dev_dma_free,
508
509         .max_phys_sect          = 64,
510 };
511
512 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
513                                 struct nvme_ns *ns,
514                                 struct nvme_nvm_command *vcmd,
515                                 void __user *ubuf, unsigned int bufflen,
516                                 void __user *meta_buf, unsigned int meta_len,
517                                 void __user *ppa_buf, unsigned int ppa_len,
518                                 u32 *result, u64 *status, unsigned int timeout)
519 {
520         bool write = nvme_is_write((struct nvme_command *)vcmd);
521         struct nvm_dev *dev = ns->ndev;
522         struct gendisk *disk = ns->disk;
523         struct request *rq;
524         struct bio *bio = NULL;
525         __le64 *ppa_list = NULL;
526         dma_addr_t ppa_dma;
527         __le64 *metadata = NULL;
528         dma_addr_t metadata_dma;
529         DECLARE_COMPLETION_ONSTACK(wait);
530         int ret = 0;
531
532         rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
533                         NVME_QID_ANY);
534         if (IS_ERR(rq)) {
535                 ret = -ENOMEM;
536                 goto err_cmd;
537         }
538
539         rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
540
541         if (ppa_buf && ppa_len) {
542                 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
543                 if (!ppa_list) {
544                         ret = -ENOMEM;
545                         goto err_rq;
546                 }
547                 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
548                                                 sizeof(u64) * (ppa_len + 1))) {
549                         ret = -EFAULT;
550                         goto err_ppa;
551                 }
552                 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
553         } else {
554                 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
555         }
556
557         if (ubuf && bufflen) {
558                 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
559                 if (ret)
560                         goto err_ppa;
561                 bio = rq->bio;
562
563                 if (meta_buf && meta_len) {
564                         metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
565                                                                 &metadata_dma);
566                         if (!metadata) {
567                                 ret = -ENOMEM;
568                                 goto err_map;
569                         }
570
571                         if (write) {
572                                 if (copy_from_user(metadata,
573                                                 (void __user *)meta_buf,
574                                                 meta_len)) {
575                                         ret = -EFAULT;
576                                         goto err_meta;
577                                 }
578                         }
579                         vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
580                 }
581
582                 bio->bi_disk = disk;
583         }
584
585         blk_execute_rq(q, NULL, rq, 0);
586
587         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
588                 ret = -EINTR;
589         else if (nvme_req(rq)->status & 0x7ff)
590                 ret = -EIO;
591         if (result)
592                 *result = nvme_req(rq)->status & 0x7ff;
593         if (status)
594                 *status = le64_to_cpu(nvme_req(rq)->result.u64);
595
596         if (metadata && !ret && !write) {
597                 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
598                         ret = -EFAULT;
599         }
600 err_meta:
601         if (meta_buf && meta_len)
602                 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
603 err_map:
604         if (bio)
605                 blk_rq_unmap_user(bio);
606 err_ppa:
607         if (ppa_buf && ppa_len)
608                 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
609 err_rq:
610         blk_mq_free_request(rq);
611 err_cmd:
612         return ret;
613 }
614
615 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
616                                         struct nvm_user_vio __user *uvio)
617 {
618         struct nvm_user_vio vio;
619         struct nvme_nvm_command c;
620         unsigned int length;
621         int ret;
622
623         if (copy_from_user(&vio, uvio, sizeof(vio)))
624                 return -EFAULT;
625         if (vio.flags)
626                 return -EINVAL;
627
628         memset(&c, 0, sizeof(c));
629         c.ph_rw.opcode = vio.opcode;
630         c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
631         c.ph_rw.control = cpu_to_le16(vio.control);
632         c.ph_rw.length = cpu_to_le16(vio.nppas);
633
634         length = (vio.nppas + 1) << ns->lba_shift;
635
636         ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
637                         (void __user *)(uintptr_t)vio.addr, length,
638                         (void __user *)(uintptr_t)vio.metadata,
639                                                         vio.metadata_len,
640                         (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
641                         &vio.result, &vio.status, 0);
642
643         if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
644                 return -EFAULT;
645
646         return ret;
647 }
648
649 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
650                                         struct nvm_passthru_vio __user *uvcmd)
651 {
652         struct nvm_passthru_vio vcmd;
653         struct nvme_nvm_command c;
654         struct request_queue *q;
655         unsigned int timeout = 0;
656         int ret;
657
658         if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
659                 return -EFAULT;
660         if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
661                 return -EACCES;
662         if (vcmd.flags)
663                 return -EINVAL;
664
665         memset(&c, 0, sizeof(c));
666         c.common.opcode = vcmd.opcode;
667         c.common.nsid = cpu_to_le32(ns->head->ns_id);
668         c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
669         c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
670         /* cdw11-12 */
671         c.ph_rw.length = cpu_to_le16(vcmd.nppas);
672         c.ph_rw.control  = cpu_to_le16(vcmd.control);
673         c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
674         c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
675         c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
676
677         if (vcmd.timeout_ms)
678                 timeout = msecs_to_jiffies(vcmd.timeout_ms);
679
680         q = admin ? ns->ctrl->admin_q : ns->queue;
681
682         ret = nvme_nvm_submit_user_cmd(q, ns,
683                         (struct nvme_nvm_command *)&c,
684                         (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
685                         (void __user *)(uintptr_t)vcmd.metadata,
686                                                         vcmd.metadata_len,
687                         (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
688                         &vcmd.result, &vcmd.status, timeout);
689
690         if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
691                 return -EFAULT;
692
693         return ret;
694 }
695
696 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
697 {
698         switch (cmd) {
699         case NVME_NVM_IOCTL_ADMIN_VIO:
700                 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
701         case NVME_NVM_IOCTL_IO_VIO:
702                 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
703         case NVME_NVM_IOCTL_SUBMIT_VIO:
704                 return nvme_nvm_submit_vio(ns, (void __user *)arg);
705         default:
706                 return -ENOTTY;
707         }
708 }
709
710 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
711 {
712         struct request_queue *q = ns->queue;
713         struct nvm_dev *dev;
714
715         _nvme_nvm_check_size();
716
717         dev = nvm_alloc_dev(node);
718         if (!dev)
719                 return -ENOMEM;
720
721         dev->q = q;
722         memcpy(dev->name, disk_name, DISK_NAME_LEN);
723         dev->ops = &nvme_nvm_dev_ops;
724         dev->private_data = ns;
725         ns->ndev = dev;
726
727         return nvm_register(dev);
728 }
729
730 void nvme_nvm_unregister(struct nvme_ns *ns)
731 {
732         nvm_unregister(ns->ndev);
733 }
734
735 static ssize_t nvm_dev_attr_show(struct device *dev,
736                                  struct device_attribute *dattr, char *page)
737 {
738         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
739         struct nvm_dev *ndev = ns->ndev;
740         struct nvm_id *id;
741         struct attribute *attr;
742
743         if (!ndev)
744                 return 0;
745
746         id = &ndev->identity;
747         attr = &dattr->attr;
748
749         if (strcmp(attr->name, "version") == 0) {
750                 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
751         } else if (strcmp(attr->name, "vendor_opcode") == 0) {
752                 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
753         } else if (strcmp(attr->name, "capabilities") == 0) {
754                 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
755         } else if (strcmp(attr->name, "device_mode") == 0) {
756                 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
757         /* kept for compatibility */
758         } else if (strcmp(attr->name, "media_manager") == 0) {
759                 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
760         } else if (strcmp(attr->name, "ppa_format") == 0) {
761                 return scnprintf(page, PAGE_SIZE,
762                         "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
763                         id->ppaf.ch_offset, id->ppaf.ch_len,
764                         id->ppaf.lun_offset, id->ppaf.lun_len,
765                         id->ppaf.pln_offset, id->ppaf.pln_len,
766                         id->ppaf.blk_offset, id->ppaf.blk_len,
767                         id->ppaf.pg_offset, id->ppaf.pg_len,
768                         id->ppaf.sect_offset, id->ppaf.sect_len);
769         } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
770                 return scnprintf(page, PAGE_SIZE, "%u\n", id->mtype);
771         } else if (strcmp(attr->name, "flash_media_type") == 0) {
772                 return scnprintf(page, PAGE_SIZE, "%u\n", id->fmtype);
773         } else if (strcmp(attr->name, "num_channels") == 0) {
774                 return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch);
775         } else if (strcmp(attr->name, "num_luns") == 0) {
776                 return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun);
777         } else if (strcmp(attr->name, "num_planes") == 0) {
778                 return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pln);
779         } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
780                 return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk);
781         } else if (strcmp(attr->name, "num_pages") == 0) {
782                 return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pg);
783         } else if (strcmp(attr->name, "page_size") == 0) {
784                 return scnprintf(page, PAGE_SIZE, "%u\n", id->fpg_sz);
785         } else if (strcmp(attr->name, "hw_sector_size") == 0) {
786                 return scnprintf(page, PAGE_SIZE, "%u\n", id->csecs);
787         } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
788                 return scnprintf(page, PAGE_SIZE, "%u\n", id->sos);
789         } else if (strcmp(attr->name, "read_typ") == 0) {
790                 return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt);
791         } else if (strcmp(attr->name, "read_max") == 0) {
792                 return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm);
793         } else if (strcmp(attr->name, "prog_typ") == 0) {
794                 return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt);
795         } else if (strcmp(attr->name, "prog_max") == 0) {
796                 return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm);
797         } else if (strcmp(attr->name, "erase_typ") == 0) {
798                 return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet);
799         } else if (strcmp(attr->name, "erase_max") == 0) {
800                 return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem);
801         } else if (strcmp(attr->name, "multiplane_modes") == 0) {
802                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mpos);
803         } else if (strcmp(attr->name, "media_capabilities") == 0) {
804                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mccap);
805         } else if (strcmp(attr->name, "max_phys_secs") == 0) {
806                 return scnprintf(page, PAGE_SIZE, "%u\n",
807                                 ndev->ops->max_phys_sect);
808         } else {
809                 return scnprintf(page,
810                                  PAGE_SIZE,
811                                  "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
812                                  attr->name);
813         }
814 }
815
816 #define NVM_DEV_ATTR_RO(_name)                                          \
817         DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
818
819 static NVM_DEV_ATTR_RO(version);
820 static NVM_DEV_ATTR_RO(vendor_opcode);
821 static NVM_DEV_ATTR_RO(capabilities);
822 static NVM_DEV_ATTR_RO(device_mode);
823 static NVM_DEV_ATTR_RO(ppa_format);
824 static NVM_DEV_ATTR_RO(media_manager);
825
826 static NVM_DEV_ATTR_RO(media_type);
827 static NVM_DEV_ATTR_RO(flash_media_type);
828 static NVM_DEV_ATTR_RO(num_channels);
829 static NVM_DEV_ATTR_RO(num_luns);
830 static NVM_DEV_ATTR_RO(num_planes);
831 static NVM_DEV_ATTR_RO(num_blocks);
832 static NVM_DEV_ATTR_RO(num_pages);
833 static NVM_DEV_ATTR_RO(page_size);
834 static NVM_DEV_ATTR_RO(hw_sector_size);
835 static NVM_DEV_ATTR_RO(oob_sector_size);
836 static NVM_DEV_ATTR_RO(read_typ);
837 static NVM_DEV_ATTR_RO(read_max);
838 static NVM_DEV_ATTR_RO(prog_typ);
839 static NVM_DEV_ATTR_RO(prog_max);
840 static NVM_DEV_ATTR_RO(erase_typ);
841 static NVM_DEV_ATTR_RO(erase_max);
842 static NVM_DEV_ATTR_RO(multiplane_modes);
843 static NVM_DEV_ATTR_RO(media_capabilities);
844 static NVM_DEV_ATTR_RO(max_phys_secs);
845
846 static struct attribute *nvm_dev_attrs[] = {
847         &dev_attr_version.attr,
848         &dev_attr_vendor_opcode.attr,
849         &dev_attr_capabilities.attr,
850         &dev_attr_device_mode.attr,
851         &dev_attr_media_manager.attr,
852
853         &dev_attr_ppa_format.attr,
854         &dev_attr_media_type.attr,
855         &dev_attr_flash_media_type.attr,
856         &dev_attr_num_channels.attr,
857         &dev_attr_num_luns.attr,
858         &dev_attr_num_planes.attr,
859         &dev_attr_num_blocks.attr,
860         &dev_attr_num_pages.attr,
861         &dev_attr_page_size.attr,
862         &dev_attr_hw_sector_size.attr,
863         &dev_attr_oob_sector_size.attr,
864         &dev_attr_read_typ.attr,
865         &dev_attr_read_max.attr,
866         &dev_attr_prog_typ.attr,
867         &dev_attr_prog_max.attr,
868         &dev_attr_erase_typ.attr,
869         &dev_attr_erase_max.attr,
870         &dev_attr_multiplane_modes.attr,
871         &dev_attr_media_capabilities.attr,
872         &dev_attr_max_phys_secs.attr,
873         NULL,
874 };
875
876 static const struct attribute_group nvm_dev_attr_group = {
877         .name           = "lightnvm",
878         .attrs          = nvm_dev_attrs,
879 };
880
881 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
882 {
883         return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
884                                         &nvm_dev_attr_group);
885 }
886
887 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
888 {
889         sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
890                                         &nvm_dev_attr_group);
891 }