f9c38a8d54e10d8246ae90d6c3ad2c757de0c93f
[linux-2.6-microblaze.git] / drivers / nvme / host / lightnvm.c
1 /*
2  * nvme-lightnvm.c - LightNVM NVMe device
3  *
4  * Copyright (C) 2014-2015 IT University of Copenhagen
5  * Initial release: Matias Bjorling <mb@lightnvm.io>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; see the file COPYING.  If not, write to
18  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19  * USA.
20  *
21  */
22
23 #include "nvme.h"
24
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
31
32 enum nvme_nvm_admin_opcode {
33         nvme_nvm_admin_identity         = 0xe2,
34         nvme_nvm_admin_get_bb_tbl       = 0xf2,
35         nvme_nvm_admin_set_bb_tbl       = 0xf1,
36 };
37
38 struct nvme_nvm_ph_rw {
39         __u8                    opcode;
40         __u8                    flags;
41         __u16                   command_id;
42         __le32                  nsid;
43         __u64                   rsvd2;
44         __le64                  metadata;
45         __le64                  prp1;
46         __le64                  prp2;
47         __le64                  spba;
48         __le16                  length;
49         __le16                  control;
50         __le32                  dsmgmt;
51         __le64                  resv;
52 };
53
54 struct nvme_nvm_identity {
55         __u8                    opcode;
56         __u8                    flags;
57         __u16                   command_id;
58         __le32                  nsid;
59         __u64                   rsvd[2];
60         __le64                  prp1;
61         __le64                  prp2;
62         __u32                   rsvd11[6];
63 };
64
65 struct nvme_nvm_getbbtbl {
66         __u8                    opcode;
67         __u8                    flags;
68         __u16                   command_id;
69         __le32                  nsid;
70         __u64                   rsvd[2];
71         __le64                  prp1;
72         __le64                  prp2;
73         __le64                  spba;
74         __u32                   rsvd4[4];
75 };
76
77 struct nvme_nvm_setbbtbl {
78         __u8                    opcode;
79         __u8                    flags;
80         __u16                   command_id;
81         __le32                  nsid;
82         __le64                  rsvd[2];
83         __le64                  prp1;
84         __le64                  prp2;
85         __le64                  spba;
86         __le16                  nlb;
87         __u8                    value;
88         __u8                    rsvd3;
89         __u32                   rsvd4[3];
90 };
91
92 struct nvme_nvm_erase_blk {
93         __u8                    opcode;
94         __u8                    flags;
95         __u16                   command_id;
96         __le32                  nsid;
97         __u64                   rsvd[2];
98         __le64                  prp1;
99         __le64                  prp2;
100         __le64                  spba;
101         __le16                  length;
102         __le16                  control;
103         __le32                  dsmgmt;
104         __le64                  resv;
105 };
106
107 struct nvme_nvm_command {
108         union {
109                 struct nvme_common_command common;
110                 struct nvme_nvm_identity identity;
111                 struct nvme_nvm_ph_rw ph_rw;
112                 struct nvme_nvm_getbbtbl get_bb;
113                 struct nvme_nvm_setbbtbl set_bb;
114                 struct nvme_nvm_erase_blk erase;
115         };
116 };
117
118 #define NVME_NVM_LP_MLC_PAIRS 886
119 struct nvme_nvm_lp_mlc {
120         __le16                  num_pairs;
121         __u8                    pairs[NVME_NVM_LP_MLC_PAIRS];
122 };
123
124 struct nvme_nvm_lp_tbl {
125         __u8                    id[8];
126         struct nvme_nvm_lp_mlc  mlc;
127 };
128
129 struct nvme_nvm_id_group {
130         __u8                    mtype;
131         __u8                    fmtype;
132         __le16                  res16;
133         __u8                    num_ch;
134         __u8                    num_lun;
135         __u8                    num_pln;
136         __u8                    rsvd1;
137         __le16                  num_chk;
138         __le16                  num_pg;
139         __le16                  fpg_sz;
140         __le16                  csecs;
141         __le16                  sos;
142         __le16                  rsvd2;
143         __le32                  trdt;
144         __le32                  trdm;
145         __le32                  tprt;
146         __le32                  tprm;
147         __le32                  tbet;
148         __le32                  tbem;
149         __le32                  mpos;
150         __le32                  mccap;
151         __le16                  cpar;
152         __u8                    reserved[10];
153         struct nvme_nvm_lp_tbl lptbl;
154 } __packed;
155
156 struct nvme_nvm_addr_format {
157         __u8                    ch_offset;
158         __u8                    ch_len;
159         __u8                    lun_offset;
160         __u8                    lun_len;
161         __u8                    pln_offset;
162         __u8                    pln_len;
163         __u8                    blk_offset;
164         __u8                    blk_len;
165         __u8                    pg_offset;
166         __u8                    pg_len;
167         __u8                    sect_offset;
168         __u8                    sect_len;
169         __u8                    res[4];
170 } __packed;
171
172 struct nvme_nvm_id {
173         __u8                    ver_id;
174         __u8                    vmnt;
175         __u8                    cgrps;
176         __u8                    res;
177         __le32                  cap;
178         __le32                  dom;
179         struct nvme_nvm_addr_format ppaf;
180         __u8                    resv[228];
181         struct nvme_nvm_id_group groups[4];
182 } __packed;
183
184 struct nvme_nvm_bb_tbl {
185         __u8    tblid[4];
186         __le16  verid;
187         __le16  revid;
188         __le32  rvsd1;
189         __le32  tblks;
190         __le32  tfact;
191         __le32  tgrown;
192         __le32  tdresv;
193         __le32  thresv;
194         __le32  rsvd2[8];
195         __u8    blk[0];
196 };
197
198 /*
199  * Check we didn't inadvertently grow the command struct
200  */
201 static inline void _nvme_nvm_check_size(void)
202 {
203         BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
204         BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
205         BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
206         BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
207         BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
208         BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
209         BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
210         BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
211         BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
212 }
213
214 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
215 {
216         struct nvme_nvm_id_group *src;
217         struct nvm_id_group *grp;
218         int sec_per_pg, sec_per_pl, pg_per_blk;
219
220         if (nvme_nvm_id->cgrps != 1)
221                 return -EINVAL;
222
223         src = &nvme_nvm_id->groups[0];
224         grp = &nvm_id->grp;
225
226         grp->mtype = src->mtype;
227         grp->fmtype = src->fmtype;
228
229         grp->num_ch = src->num_ch;
230         grp->num_lun = src->num_lun;
231
232         grp->num_chk = le16_to_cpu(src->num_chk);
233         grp->csecs = le16_to_cpu(src->csecs);
234         grp->sos = le16_to_cpu(src->sos);
235
236         pg_per_blk = le16_to_cpu(src->num_pg);
237         sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
238         sec_per_pl = sec_per_pg * src->num_pln;
239         grp->clba = sec_per_pl * pg_per_blk;
240         grp->ws_per_chk = pg_per_blk;
241
242         grp->mpos = le32_to_cpu(src->mpos);
243         grp->cpar = le16_to_cpu(src->cpar);
244         grp->mccap = le32_to_cpu(src->mccap);
245
246         grp->ws_opt = grp->ws_min = sec_per_pg;
247         grp->ws_seq = NVM_IO_SNGL_ACCESS;
248
249         if (grp->mpos & 0x020202) {
250                 grp->ws_seq = NVM_IO_DUAL_ACCESS;
251                 grp->ws_opt <<= 1;
252         } else if (grp->mpos & 0x040404) {
253                 grp->ws_seq = NVM_IO_QUAD_ACCESS;
254                 grp->ws_opt <<= 2;
255         }
256
257         grp->trdt = le32_to_cpu(src->trdt);
258         grp->trdm = le32_to_cpu(src->trdm);
259         grp->tprt = le32_to_cpu(src->tprt);
260         grp->tprm = le32_to_cpu(src->tprm);
261         grp->tbet = le32_to_cpu(src->tbet);
262         grp->tbem = le32_to_cpu(src->tbem);
263
264         /* 1.2 compatibility */
265         grp->num_pln = src->num_pln;
266         grp->num_pg = le16_to_cpu(src->num_pg);
267         grp->fpg_sz = le16_to_cpu(src->fpg_sz);
268
269         return 0;
270 }
271
272 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
273 {
274         struct nvme_ns *ns = nvmdev->q->queuedata;
275         struct nvme_nvm_id *nvme_nvm_id;
276         struct nvme_nvm_command c = {};
277         int ret;
278
279         c.identity.opcode = nvme_nvm_admin_identity;
280         c.identity.nsid = cpu_to_le32(ns->head->ns_id);
281
282         nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
283         if (!nvme_nvm_id)
284                 return -ENOMEM;
285
286         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
287                                 nvme_nvm_id, sizeof(struct nvme_nvm_id));
288         if (ret) {
289                 ret = -EIO;
290                 goto out;
291         }
292
293         nvm_id->ver_id = nvme_nvm_id->ver_id;
294         nvm_id->vmnt = nvme_nvm_id->vmnt;
295         nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
296         nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
297         memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
298                                         sizeof(struct nvm_addr_format));
299
300         ret = init_grps(nvm_id, nvme_nvm_id);
301 out:
302         kfree(nvme_nvm_id);
303         return ret;
304 }
305
306 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
307                                                                 u8 *blks)
308 {
309         struct request_queue *q = nvmdev->q;
310         struct nvm_geo *geo = &nvmdev->geo;
311         struct nvme_ns *ns = q->queuedata;
312         struct nvme_ctrl *ctrl = ns->ctrl;
313         struct nvme_nvm_command c = {};
314         struct nvme_nvm_bb_tbl *bb_tbl;
315         int nr_blks = geo->nr_chks * geo->plane_mode;
316         int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
317         int ret = 0;
318
319         c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
320         c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
321         c.get_bb.spba = cpu_to_le64(ppa.ppa);
322
323         bb_tbl = kzalloc(tblsz, GFP_KERNEL);
324         if (!bb_tbl)
325                 return -ENOMEM;
326
327         ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
328                                                                 bb_tbl, tblsz);
329         if (ret) {
330                 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
331                 ret = -EIO;
332                 goto out;
333         }
334
335         if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
336                 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
337                 dev_err(ctrl->device, "bbt format mismatch\n");
338                 ret = -EINVAL;
339                 goto out;
340         }
341
342         if (le16_to_cpu(bb_tbl->verid) != 1) {
343                 ret = -EINVAL;
344                 dev_err(ctrl->device, "bbt version not supported\n");
345                 goto out;
346         }
347
348         if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
349                 ret = -EINVAL;
350                 dev_err(ctrl->device,
351                                 "bbt unsuspected blocks returned (%u!=%u)",
352                                 le32_to_cpu(bb_tbl->tblks), nr_blks);
353                 goto out;
354         }
355
356         memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
357 out:
358         kfree(bb_tbl);
359         return ret;
360 }
361
362 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
363                                                         int nr_ppas, int type)
364 {
365         struct nvme_ns *ns = nvmdev->q->queuedata;
366         struct nvme_nvm_command c = {};
367         int ret = 0;
368
369         c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
370         c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
371         c.set_bb.spba = cpu_to_le64(ppas->ppa);
372         c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
373         c.set_bb.value = type;
374
375         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
376                                                                 NULL, 0);
377         if (ret)
378                 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
379                                                                         ret);
380         return ret;
381 }
382
383 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
384                                     struct nvme_nvm_command *c)
385 {
386         c->ph_rw.opcode = rqd->opcode;
387         c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
388         c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
389         c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
390         c->ph_rw.control = cpu_to_le16(rqd->flags);
391         c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
392 }
393
394 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
395 {
396         struct nvm_rq *rqd = rq->end_io_data;
397
398         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
399         rqd->error = nvme_req(rq)->status;
400         nvm_end_io(rqd);
401
402         kfree(nvme_req(rq)->cmd);
403         blk_mq_free_request(rq);
404 }
405
406 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
407                                               struct nvm_rq *rqd,
408                                               struct nvme_nvm_command *cmd)
409 {
410         struct nvme_ns *ns = q->queuedata;
411         struct request *rq;
412
413         nvme_nvm_rqtocmd(rqd, ns, cmd);
414
415         rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
416         if (IS_ERR(rq))
417                 return rq;
418
419         rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
420
421         if (rqd->bio) {
422                 blk_init_request_from_bio(rq, rqd->bio);
423         } else {
424                 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
425                 rq->__data_len = 0;
426         }
427
428         return rq;
429 }
430
431 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
432 {
433         struct request_queue *q = dev->q;
434         struct nvme_nvm_command *cmd;
435         struct request *rq;
436
437         cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
438         if (!cmd)
439                 return -ENOMEM;
440
441         rq = nvme_nvm_alloc_request(q, rqd, cmd);
442         if (IS_ERR(rq)) {
443                 kfree(cmd);
444                 return PTR_ERR(rq);
445         }
446
447         rq->end_io_data = rqd;
448
449         blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
450
451         return 0;
452 }
453
454 static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
455 {
456         struct request_queue *q = dev->q;
457         struct request *rq;
458         struct nvme_nvm_command cmd;
459         int ret = 0;
460
461         memset(&cmd, 0, sizeof(struct nvme_nvm_command));
462
463         rq = nvme_nvm_alloc_request(q, rqd, &cmd);
464         if (IS_ERR(rq))
465                 return PTR_ERR(rq);
466
467         /* I/Os can fail and the error is signaled through rqd. Callers must
468          * handle the error accordingly.
469          */
470         blk_execute_rq(q, NULL, rq, 0);
471         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
472                 ret = -EINTR;
473
474         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
475         rqd->error = nvme_req(rq)->status;
476
477         blk_mq_free_request(rq);
478
479         return ret;
480 }
481
482 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
483 {
484         struct nvme_ns *ns = nvmdev->q->queuedata;
485
486         return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
487 }
488
489 static void nvme_nvm_destroy_dma_pool(void *pool)
490 {
491         struct dma_pool *dma_pool = pool;
492
493         dma_pool_destroy(dma_pool);
494 }
495
496 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
497                                     gfp_t mem_flags, dma_addr_t *dma_handler)
498 {
499         return dma_pool_alloc(pool, mem_flags, dma_handler);
500 }
501
502 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
503                                                         dma_addr_t dma_handler)
504 {
505         dma_pool_free(pool, addr, dma_handler);
506 }
507
508 static struct nvm_dev_ops nvme_nvm_dev_ops = {
509         .identity               = nvme_nvm_identity,
510
511         .get_bb_tbl             = nvme_nvm_get_bb_tbl,
512         .set_bb_tbl             = nvme_nvm_set_bb_tbl,
513
514         .submit_io              = nvme_nvm_submit_io,
515         .submit_io_sync         = nvme_nvm_submit_io_sync,
516
517         .create_dma_pool        = nvme_nvm_create_dma_pool,
518         .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
519         .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
520         .dev_dma_free           = nvme_nvm_dev_dma_free,
521
522         .max_phys_sect          = 64,
523 };
524
525 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
526                                 struct nvme_ns *ns,
527                                 struct nvme_nvm_command *vcmd,
528                                 void __user *ubuf, unsigned int bufflen,
529                                 void __user *meta_buf, unsigned int meta_len,
530                                 void __user *ppa_buf, unsigned int ppa_len,
531                                 u32 *result, u64 *status, unsigned int timeout)
532 {
533         bool write = nvme_is_write((struct nvme_command *)vcmd);
534         struct nvm_dev *dev = ns->ndev;
535         struct gendisk *disk = ns->disk;
536         struct request *rq;
537         struct bio *bio = NULL;
538         __le64 *ppa_list = NULL;
539         dma_addr_t ppa_dma;
540         __le64 *metadata = NULL;
541         dma_addr_t metadata_dma;
542         DECLARE_COMPLETION_ONSTACK(wait);
543         int ret = 0;
544
545         rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
546                         NVME_QID_ANY);
547         if (IS_ERR(rq)) {
548                 ret = -ENOMEM;
549                 goto err_cmd;
550         }
551
552         rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
553
554         if (ppa_buf && ppa_len) {
555                 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
556                 if (!ppa_list) {
557                         ret = -ENOMEM;
558                         goto err_rq;
559                 }
560                 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
561                                                 sizeof(u64) * (ppa_len + 1))) {
562                         ret = -EFAULT;
563                         goto err_ppa;
564                 }
565                 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
566         } else {
567                 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
568         }
569
570         if (ubuf && bufflen) {
571                 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
572                 if (ret)
573                         goto err_ppa;
574                 bio = rq->bio;
575
576                 if (meta_buf && meta_len) {
577                         metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
578                                                                 &metadata_dma);
579                         if (!metadata) {
580                                 ret = -ENOMEM;
581                                 goto err_map;
582                         }
583
584                         if (write) {
585                                 if (copy_from_user(metadata,
586                                                 (void __user *)meta_buf,
587                                                 meta_len)) {
588                                         ret = -EFAULT;
589                                         goto err_meta;
590                                 }
591                         }
592                         vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
593                 }
594
595                 bio->bi_disk = disk;
596         }
597
598         blk_execute_rq(q, NULL, rq, 0);
599
600         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
601                 ret = -EINTR;
602         else if (nvme_req(rq)->status & 0x7ff)
603                 ret = -EIO;
604         if (result)
605                 *result = nvme_req(rq)->status & 0x7ff;
606         if (status)
607                 *status = le64_to_cpu(nvme_req(rq)->result.u64);
608
609         if (metadata && !ret && !write) {
610                 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
611                         ret = -EFAULT;
612         }
613 err_meta:
614         if (meta_buf && meta_len)
615                 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
616 err_map:
617         if (bio)
618                 blk_rq_unmap_user(bio);
619 err_ppa:
620         if (ppa_buf && ppa_len)
621                 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
622 err_rq:
623         blk_mq_free_request(rq);
624 err_cmd:
625         return ret;
626 }
627
628 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
629                                         struct nvm_user_vio __user *uvio)
630 {
631         struct nvm_user_vio vio;
632         struct nvme_nvm_command c;
633         unsigned int length;
634         int ret;
635
636         if (copy_from_user(&vio, uvio, sizeof(vio)))
637                 return -EFAULT;
638         if (vio.flags)
639                 return -EINVAL;
640
641         memset(&c, 0, sizeof(c));
642         c.ph_rw.opcode = vio.opcode;
643         c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
644         c.ph_rw.control = cpu_to_le16(vio.control);
645         c.ph_rw.length = cpu_to_le16(vio.nppas);
646
647         length = (vio.nppas + 1) << ns->lba_shift;
648
649         ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
650                         (void __user *)(uintptr_t)vio.addr, length,
651                         (void __user *)(uintptr_t)vio.metadata,
652                                                         vio.metadata_len,
653                         (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
654                         &vio.result, &vio.status, 0);
655
656         if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
657                 return -EFAULT;
658
659         return ret;
660 }
661
662 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
663                                         struct nvm_passthru_vio __user *uvcmd)
664 {
665         struct nvm_passthru_vio vcmd;
666         struct nvme_nvm_command c;
667         struct request_queue *q;
668         unsigned int timeout = 0;
669         int ret;
670
671         if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
672                 return -EFAULT;
673         if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
674                 return -EACCES;
675         if (vcmd.flags)
676                 return -EINVAL;
677
678         memset(&c, 0, sizeof(c));
679         c.common.opcode = vcmd.opcode;
680         c.common.nsid = cpu_to_le32(ns->head->ns_id);
681         c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
682         c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
683         /* cdw11-12 */
684         c.ph_rw.length = cpu_to_le16(vcmd.nppas);
685         c.ph_rw.control  = cpu_to_le16(vcmd.control);
686         c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
687         c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
688         c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
689
690         if (vcmd.timeout_ms)
691                 timeout = msecs_to_jiffies(vcmd.timeout_ms);
692
693         q = admin ? ns->ctrl->admin_q : ns->queue;
694
695         ret = nvme_nvm_submit_user_cmd(q, ns,
696                         (struct nvme_nvm_command *)&c,
697                         (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
698                         (void __user *)(uintptr_t)vcmd.metadata,
699                                                         vcmd.metadata_len,
700                         (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
701                         &vcmd.result, &vcmd.status, timeout);
702
703         if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
704                 return -EFAULT;
705
706         return ret;
707 }
708
709 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
710 {
711         switch (cmd) {
712         case NVME_NVM_IOCTL_ADMIN_VIO:
713                 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
714         case NVME_NVM_IOCTL_IO_VIO:
715                 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
716         case NVME_NVM_IOCTL_SUBMIT_VIO:
717                 return nvme_nvm_submit_vio(ns, (void __user *)arg);
718         default:
719                 return -ENOTTY;
720         }
721 }
722
723 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
724 {
725         struct request_queue *q = ns->queue;
726         struct nvm_dev *dev;
727
728         _nvme_nvm_check_size();
729
730         dev = nvm_alloc_dev(node);
731         if (!dev)
732                 return -ENOMEM;
733
734         dev->q = q;
735         memcpy(dev->name, disk_name, DISK_NAME_LEN);
736         dev->ops = &nvme_nvm_dev_ops;
737         dev->private_data = ns;
738         ns->ndev = dev;
739
740         return nvm_register(dev);
741 }
742
743 void nvme_nvm_unregister(struct nvme_ns *ns)
744 {
745         nvm_unregister(ns->ndev);
746 }
747
748 static ssize_t nvm_dev_attr_show(struct device *dev,
749                                  struct device_attribute *dattr, char *page)
750 {
751         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
752         struct nvm_dev *ndev = ns->ndev;
753         struct nvm_id *id;
754         struct nvm_id_group *grp;
755         struct attribute *attr;
756
757         if (!ndev)
758                 return 0;
759
760         id = &ndev->identity;
761         grp = &id->grp;
762         attr = &dattr->attr;
763
764         if (strcmp(attr->name, "version") == 0) {
765                 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
766         } else if (strcmp(attr->name, "vendor_opcode") == 0) {
767                 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
768         } else if (strcmp(attr->name, "capabilities") == 0) {
769                 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
770         } else if (strcmp(attr->name, "device_mode") == 0) {
771                 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
772         /* kept for compatibility */
773         } else if (strcmp(attr->name, "media_manager") == 0) {
774                 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
775         } else if (strcmp(attr->name, "ppa_format") == 0) {
776                 return scnprintf(page, PAGE_SIZE,
777                         "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
778                         id->ppaf.ch_offset, id->ppaf.ch_len,
779                         id->ppaf.lun_offset, id->ppaf.lun_len,
780                         id->ppaf.pln_offset, id->ppaf.pln_len,
781                         id->ppaf.blk_offset, id->ppaf.blk_len,
782                         id->ppaf.pg_offset, id->ppaf.pg_len,
783                         id->ppaf.sect_offset, id->ppaf.sect_len);
784         } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
785                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
786         } else if (strcmp(attr->name, "flash_media_type") == 0) {
787                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
788         } else if (strcmp(attr->name, "num_channels") == 0) {
789                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
790         } else if (strcmp(attr->name, "num_luns") == 0) {
791                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
792         } else if (strcmp(attr->name, "num_planes") == 0) {
793                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
794         } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
795                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
796         } else if (strcmp(attr->name, "num_pages") == 0) {
797                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
798         } else if (strcmp(attr->name, "page_size") == 0) {
799                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
800         } else if (strcmp(attr->name, "hw_sector_size") == 0) {
801                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
802         } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
803                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
804         } else if (strcmp(attr->name, "read_typ") == 0) {
805                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
806         } else if (strcmp(attr->name, "read_max") == 0) {
807                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
808         } else if (strcmp(attr->name, "prog_typ") == 0) {
809                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
810         } else if (strcmp(attr->name, "prog_max") == 0) {
811                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
812         } else if (strcmp(attr->name, "erase_typ") == 0) {
813                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
814         } else if (strcmp(attr->name, "erase_max") == 0) {
815                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
816         } else if (strcmp(attr->name, "multiplane_modes") == 0) {
817                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
818         } else if (strcmp(attr->name, "media_capabilities") == 0) {
819                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
820         } else if (strcmp(attr->name, "max_phys_secs") == 0) {
821                 return scnprintf(page, PAGE_SIZE, "%u\n",
822                                 ndev->ops->max_phys_sect);
823         } else {
824                 return scnprintf(page,
825                                  PAGE_SIZE,
826                                  "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
827                                  attr->name);
828         }
829 }
830
831 #define NVM_DEV_ATTR_RO(_name)                                          \
832         DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
833
834 static NVM_DEV_ATTR_RO(version);
835 static NVM_DEV_ATTR_RO(vendor_opcode);
836 static NVM_DEV_ATTR_RO(capabilities);
837 static NVM_DEV_ATTR_RO(device_mode);
838 static NVM_DEV_ATTR_RO(ppa_format);
839 static NVM_DEV_ATTR_RO(media_manager);
840
841 static NVM_DEV_ATTR_RO(media_type);
842 static NVM_DEV_ATTR_RO(flash_media_type);
843 static NVM_DEV_ATTR_RO(num_channels);
844 static NVM_DEV_ATTR_RO(num_luns);
845 static NVM_DEV_ATTR_RO(num_planes);
846 static NVM_DEV_ATTR_RO(num_blocks);
847 static NVM_DEV_ATTR_RO(num_pages);
848 static NVM_DEV_ATTR_RO(page_size);
849 static NVM_DEV_ATTR_RO(hw_sector_size);
850 static NVM_DEV_ATTR_RO(oob_sector_size);
851 static NVM_DEV_ATTR_RO(read_typ);
852 static NVM_DEV_ATTR_RO(read_max);
853 static NVM_DEV_ATTR_RO(prog_typ);
854 static NVM_DEV_ATTR_RO(prog_max);
855 static NVM_DEV_ATTR_RO(erase_typ);
856 static NVM_DEV_ATTR_RO(erase_max);
857 static NVM_DEV_ATTR_RO(multiplane_modes);
858 static NVM_DEV_ATTR_RO(media_capabilities);
859 static NVM_DEV_ATTR_RO(max_phys_secs);
860
861 static struct attribute *nvm_dev_attrs[] = {
862         &dev_attr_version.attr,
863         &dev_attr_vendor_opcode.attr,
864         &dev_attr_capabilities.attr,
865         &dev_attr_device_mode.attr,
866         &dev_attr_media_manager.attr,
867
868         &dev_attr_ppa_format.attr,
869         &dev_attr_media_type.attr,
870         &dev_attr_flash_media_type.attr,
871         &dev_attr_num_channels.attr,
872         &dev_attr_num_luns.attr,
873         &dev_attr_num_planes.attr,
874         &dev_attr_num_blocks.attr,
875         &dev_attr_num_pages.attr,
876         &dev_attr_page_size.attr,
877         &dev_attr_hw_sector_size.attr,
878         &dev_attr_oob_sector_size.attr,
879         &dev_attr_read_typ.attr,
880         &dev_attr_read_max.attr,
881         &dev_attr_prog_typ.attr,
882         &dev_attr_prog_max.attr,
883         &dev_attr_erase_typ.attr,
884         &dev_attr_erase_max.attr,
885         &dev_attr_multiplane_modes.attr,
886         &dev_attr_media_capabilities.attr,
887         &dev_attr_max_phys_secs.attr,
888         NULL,
889 };
890
891 static const struct attribute_group nvm_dev_attr_group = {
892         .name           = "lightnvm",
893         .attrs          = nvm_dev_attrs,
894 };
895
896 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
897 {
898         return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
899                                         &nvm_dev_attr_group);
900 }
901
902 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
903 {
904         sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
905                                         &nvm_dev_attr_group);
906 }