lightnvm: make 1.2 data structures explicit
[linux-2.6-microblaze.git] / drivers / nvme / host / lightnvm.c
1 /*
2  * nvme-lightnvm.c - LightNVM NVMe device
3  *
4  * Copyright (C) 2014-2015 IT University of Copenhagen
5  * Initial release: Matias Bjorling <mb@lightnvm.io>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; see the file COPYING.  If not, write to
18  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19  * USA.
20  *
21  */
22
23 #include "nvme.h"
24
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
31
32 enum nvme_nvm_admin_opcode {
33         nvme_nvm_admin_identity         = 0xe2,
34         nvme_nvm_admin_get_bb_tbl       = 0xf2,
35         nvme_nvm_admin_set_bb_tbl       = 0xf1,
36 };
37
38 struct nvme_nvm_ph_rw {
39         __u8                    opcode;
40         __u8                    flags;
41         __u16                   command_id;
42         __le32                  nsid;
43         __u64                   rsvd2;
44         __le64                  metadata;
45         __le64                  prp1;
46         __le64                  prp2;
47         __le64                  spba;
48         __le16                  length;
49         __le16                  control;
50         __le32                  dsmgmt;
51         __le64                  resv;
52 };
53
54 struct nvme_nvm_erase_blk {
55         __u8                    opcode;
56         __u8                    flags;
57         __u16                   command_id;
58         __le32                  nsid;
59         __u64                   rsvd[2];
60         __le64                  prp1;
61         __le64                  prp2;
62         __le64                  spba;
63         __le16                  length;
64         __le16                  control;
65         __le32                  dsmgmt;
66         __le64                  resv;
67 };
68
69 struct nvme_nvm_identity {
70         __u8                    opcode;
71         __u8                    flags;
72         __u16                   command_id;
73         __le32                  nsid;
74         __u64                   rsvd[2];
75         __le64                  prp1;
76         __le64                  prp2;
77         __u32                   rsvd11[6];
78 };
79
80 struct nvme_nvm_getbbtbl {
81         __u8                    opcode;
82         __u8                    flags;
83         __u16                   command_id;
84         __le32                  nsid;
85         __u64                   rsvd[2];
86         __le64                  prp1;
87         __le64                  prp2;
88         __le64                  spba;
89         __u32                   rsvd4[4];
90 };
91
92 struct nvme_nvm_setbbtbl {
93         __u8                    opcode;
94         __u8                    flags;
95         __u16                   command_id;
96         __le32                  nsid;
97         __le64                  rsvd[2];
98         __le64                  prp1;
99         __le64                  prp2;
100         __le64                  spba;
101         __le16                  nlb;
102         __u8                    value;
103         __u8                    rsvd3;
104         __u32                   rsvd4[3];
105 };
106
107 struct nvme_nvm_command {
108         union {
109                 struct nvme_common_command common;
110                 struct nvme_nvm_ph_rw ph_rw;
111                 struct nvme_nvm_erase_blk erase;
112                 struct nvme_nvm_identity identity;
113                 struct nvme_nvm_getbbtbl get_bb;
114                 struct nvme_nvm_setbbtbl set_bb;
115         };
116 };
117
118 struct nvme_nvm_id12_grp {
119         __u8                    mtype;
120         __u8                    fmtype;
121         __le16                  res16;
122         __u8                    num_ch;
123         __u8                    num_lun;
124         __u8                    num_pln;
125         __u8                    rsvd1;
126         __le16                  num_chk;
127         __le16                  num_pg;
128         __le16                  fpg_sz;
129         __le16                  csecs;
130         __le16                  sos;
131         __le16                  rsvd2;
132         __le32                  trdt;
133         __le32                  trdm;
134         __le32                  tprt;
135         __le32                  tprm;
136         __le32                  tbet;
137         __le32                  tbem;
138         __le32                  mpos;
139         __le32                  mccap;
140         __le16                  cpar;
141         __u8                    reserved[906];
142 } __packed;
143
144 struct nvme_nvm_id12_addrf {
145         __u8                    ch_offset;
146         __u8                    ch_len;
147         __u8                    lun_offset;
148         __u8                    lun_len;
149         __u8                    pln_offset;
150         __u8                    pln_len;
151         __u8                    blk_offset;
152         __u8                    blk_len;
153         __u8                    pg_offset;
154         __u8                    pg_len;
155         __u8                    sect_offset;
156         __u8                    sect_len;
157         __u8                    res[4];
158 } __packed;
159
160 struct nvme_nvm_id12 {
161         __u8                    ver_id;
162         __u8                    vmnt;
163         __u8                    cgrps;
164         __u8                    res;
165         __le32                  cap;
166         __le32                  dom;
167         struct nvme_nvm_id12_addrf ppaf;
168         __u8                    resv[228];
169         struct nvme_nvm_id12_grp grp;
170         __u8                    resv2[2880];
171 } __packed;
172
173 struct nvme_nvm_bb_tbl {
174         __u8    tblid[4];
175         __le16  verid;
176         __le16  revid;
177         __le32  rvsd1;
178         __le32  tblks;
179         __le32  tfact;
180         __le32  tgrown;
181         __le32  tdresv;
182         __le32  thresv;
183         __le32  rsvd2[8];
184         __u8    blk[0];
185 };
186
187 /*
188  * Check we didn't inadvertently grow the command struct
189  */
190 static inline void _nvme_nvm_check_size(void)
191 {
192         BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
193         BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
194         BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
195         BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
196         BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
197         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
198         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
199         BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
200         BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
201 }
202
203 static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
204 {
205         struct nvme_nvm_id12_grp *src;
206         struct nvm_id_group *grp;
207         int sec_per_pg, sec_per_pl, pg_per_blk;
208
209         if (id12->cgrps != 1)
210                 return -EINVAL;
211
212         src = &id12->grp;
213         grp = &nvm_id->grp;
214
215         grp->mtype = src->mtype;
216         grp->fmtype = src->fmtype;
217
218         grp->num_ch = src->num_ch;
219         grp->num_lun = src->num_lun;
220
221         grp->num_chk = le16_to_cpu(src->num_chk);
222         grp->csecs = le16_to_cpu(src->csecs);
223         grp->sos = le16_to_cpu(src->sos);
224
225         pg_per_blk = le16_to_cpu(src->num_pg);
226         sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
227         sec_per_pl = sec_per_pg * src->num_pln;
228         grp->clba = sec_per_pl * pg_per_blk;
229         grp->ws_per_chk = pg_per_blk;
230
231         grp->mpos = le32_to_cpu(src->mpos);
232         grp->cpar = le16_to_cpu(src->cpar);
233         grp->mccap = le32_to_cpu(src->mccap);
234
235         grp->ws_opt = grp->ws_min = sec_per_pg;
236         grp->ws_seq = NVM_IO_SNGL_ACCESS;
237
238         if (grp->mpos & 0x020202) {
239                 grp->ws_seq = NVM_IO_DUAL_ACCESS;
240                 grp->ws_opt <<= 1;
241         } else if (grp->mpos & 0x040404) {
242                 grp->ws_seq = NVM_IO_QUAD_ACCESS;
243                 grp->ws_opt <<= 2;
244         }
245
246         grp->trdt = le32_to_cpu(src->trdt);
247         grp->trdm = le32_to_cpu(src->trdm);
248         grp->tprt = le32_to_cpu(src->tprt);
249         grp->tprm = le32_to_cpu(src->tprm);
250         grp->tbet = le32_to_cpu(src->tbet);
251         grp->tbem = le32_to_cpu(src->tbem);
252
253         /* 1.2 compatibility */
254         grp->num_pln = src->num_pln;
255         grp->num_pg = le16_to_cpu(src->num_pg);
256         grp->fpg_sz = le16_to_cpu(src->fpg_sz);
257
258         return 0;
259 }
260
261 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
262 {
263         struct nvme_ns *ns = nvmdev->q->queuedata;
264         struct nvme_nvm_id12 *id;
265         struct nvme_nvm_command c = {};
266         int ret;
267
268         c.identity.opcode = nvme_nvm_admin_identity;
269         c.identity.nsid = cpu_to_le32(ns->head->ns_id);
270
271         id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
272         if (!id)
273                 return -ENOMEM;
274
275         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
276                                 id, sizeof(struct nvme_nvm_id12));
277         if (ret) {
278                 ret = -EIO;
279                 goto out;
280         }
281
282         nvm_id->ver_id = id->ver_id;
283         nvm_id->vmnt = id->vmnt;
284         nvm_id->cap = le32_to_cpu(id->cap);
285         nvm_id->dom = le32_to_cpu(id->dom);
286         memcpy(&nvm_id->ppaf, &id->ppaf,
287                                         sizeof(struct nvm_addr_format));
288
289         ret = init_grp(nvm_id, id);
290 out:
291         kfree(id);
292         return ret;
293 }
294
295 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
296                                                                 u8 *blks)
297 {
298         struct request_queue *q = nvmdev->q;
299         struct nvm_geo *geo = &nvmdev->geo;
300         struct nvme_ns *ns = q->queuedata;
301         struct nvme_ctrl *ctrl = ns->ctrl;
302         struct nvme_nvm_command c = {};
303         struct nvme_nvm_bb_tbl *bb_tbl;
304         int nr_blks = geo->nr_chks * geo->plane_mode;
305         int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
306         int ret = 0;
307
308         c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
309         c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
310         c.get_bb.spba = cpu_to_le64(ppa.ppa);
311
312         bb_tbl = kzalloc(tblsz, GFP_KERNEL);
313         if (!bb_tbl)
314                 return -ENOMEM;
315
316         ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
317                                                                 bb_tbl, tblsz);
318         if (ret) {
319                 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
320                 ret = -EIO;
321                 goto out;
322         }
323
324         if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
325                 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
326                 dev_err(ctrl->device, "bbt format mismatch\n");
327                 ret = -EINVAL;
328                 goto out;
329         }
330
331         if (le16_to_cpu(bb_tbl->verid) != 1) {
332                 ret = -EINVAL;
333                 dev_err(ctrl->device, "bbt version not supported\n");
334                 goto out;
335         }
336
337         if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
338                 ret = -EINVAL;
339                 dev_err(ctrl->device,
340                                 "bbt unsuspected blocks returned (%u!=%u)",
341                                 le32_to_cpu(bb_tbl->tblks), nr_blks);
342                 goto out;
343         }
344
345         memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
346 out:
347         kfree(bb_tbl);
348         return ret;
349 }
350
351 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
352                                                         int nr_ppas, int type)
353 {
354         struct nvme_ns *ns = nvmdev->q->queuedata;
355         struct nvme_nvm_command c = {};
356         int ret = 0;
357
358         c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
359         c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
360         c.set_bb.spba = cpu_to_le64(ppas->ppa);
361         c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
362         c.set_bb.value = type;
363
364         ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
365                                                                 NULL, 0);
366         if (ret)
367                 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
368                                                                         ret);
369         return ret;
370 }
371
372 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
373                                     struct nvme_nvm_command *c)
374 {
375         c->ph_rw.opcode = rqd->opcode;
376         c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
377         c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
378         c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
379         c->ph_rw.control = cpu_to_le16(rqd->flags);
380         c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
381 }
382
383 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
384 {
385         struct nvm_rq *rqd = rq->end_io_data;
386
387         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
388         rqd->error = nvme_req(rq)->status;
389         nvm_end_io(rqd);
390
391         kfree(nvme_req(rq)->cmd);
392         blk_mq_free_request(rq);
393 }
394
395 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
396                                               struct nvm_rq *rqd,
397                                               struct nvme_nvm_command *cmd)
398 {
399         struct nvme_ns *ns = q->queuedata;
400         struct request *rq;
401
402         nvme_nvm_rqtocmd(rqd, ns, cmd);
403
404         rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
405         if (IS_ERR(rq))
406                 return rq;
407
408         rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
409
410         if (rqd->bio) {
411                 blk_init_request_from_bio(rq, rqd->bio);
412         } else {
413                 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
414                 rq->__data_len = 0;
415         }
416
417         return rq;
418 }
419
420 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
421 {
422         struct request_queue *q = dev->q;
423         struct nvme_nvm_command *cmd;
424         struct request *rq;
425
426         cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
427         if (!cmd)
428                 return -ENOMEM;
429
430         rq = nvme_nvm_alloc_request(q, rqd, cmd);
431         if (IS_ERR(rq)) {
432                 kfree(cmd);
433                 return PTR_ERR(rq);
434         }
435
436         rq->end_io_data = rqd;
437
438         blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
439
440         return 0;
441 }
442
443 static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
444 {
445         struct request_queue *q = dev->q;
446         struct request *rq;
447         struct nvme_nvm_command cmd;
448         int ret = 0;
449
450         memset(&cmd, 0, sizeof(struct nvme_nvm_command));
451
452         rq = nvme_nvm_alloc_request(q, rqd, &cmd);
453         if (IS_ERR(rq))
454                 return PTR_ERR(rq);
455
456         /* I/Os can fail and the error is signaled through rqd. Callers must
457          * handle the error accordingly.
458          */
459         blk_execute_rq(q, NULL, rq, 0);
460         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
461                 ret = -EINTR;
462
463         rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
464         rqd->error = nvme_req(rq)->status;
465
466         blk_mq_free_request(rq);
467
468         return ret;
469 }
470
471 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
472 {
473         struct nvme_ns *ns = nvmdev->q->queuedata;
474
475         return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
476 }
477
478 static void nvme_nvm_destroy_dma_pool(void *pool)
479 {
480         struct dma_pool *dma_pool = pool;
481
482         dma_pool_destroy(dma_pool);
483 }
484
485 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
486                                     gfp_t mem_flags, dma_addr_t *dma_handler)
487 {
488         return dma_pool_alloc(pool, mem_flags, dma_handler);
489 }
490
491 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
492                                                         dma_addr_t dma_handler)
493 {
494         dma_pool_free(pool, addr, dma_handler);
495 }
496
497 static struct nvm_dev_ops nvme_nvm_dev_ops = {
498         .identity               = nvme_nvm_identity,
499
500         .get_bb_tbl             = nvme_nvm_get_bb_tbl,
501         .set_bb_tbl             = nvme_nvm_set_bb_tbl,
502
503         .submit_io              = nvme_nvm_submit_io,
504         .submit_io_sync         = nvme_nvm_submit_io_sync,
505
506         .create_dma_pool        = nvme_nvm_create_dma_pool,
507         .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
508         .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
509         .dev_dma_free           = nvme_nvm_dev_dma_free,
510
511         .max_phys_sect          = 64,
512 };
513
514 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
515                                 struct nvme_ns *ns,
516                                 struct nvme_nvm_command *vcmd,
517                                 void __user *ubuf, unsigned int bufflen,
518                                 void __user *meta_buf, unsigned int meta_len,
519                                 void __user *ppa_buf, unsigned int ppa_len,
520                                 u32 *result, u64 *status, unsigned int timeout)
521 {
522         bool write = nvme_is_write((struct nvme_command *)vcmd);
523         struct nvm_dev *dev = ns->ndev;
524         struct gendisk *disk = ns->disk;
525         struct request *rq;
526         struct bio *bio = NULL;
527         __le64 *ppa_list = NULL;
528         dma_addr_t ppa_dma;
529         __le64 *metadata = NULL;
530         dma_addr_t metadata_dma;
531         DECLARE_COMPLETION_ONSTACK(wait);
532         int ret = 0;
533
534         rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
535                         NVME_QID_ANY);
536         if (IS_ERR(rq)) {
537                 ret = -ENOMEM;
538                 goto err_cmd;
539         }
540
541         rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
542
543         if (ppa_buf && ppa_len) {
544                 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
545                 if (!ppa_list) {
546                         ret = -ENOMEM;
547                         goto err_rq;
548                 }
549                 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
550                                                 sizeof(u64) * (ppa_len + 1))) {
551                         ret = -EFAULT;
552                         goto err_ppa;
553                 }
554                 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
555         } else {
556                 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
557         }
558
559         if (ubuf && bufflen) {
560                 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
561                 if (ret)
562                         goto err_ppa;
563                 bio = rq->bio;
564
565                 if (meta_buf && meta_len) {
566                         metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
567                                                                 &metadata_dma);
568                         if (!metadata) {
569                                 ret = -ENOMEM;
570                                 goto err_map;
571                         }
572
573                         if (write) {
574                                 if (copy_from_user(metadata,
575                                                 (void __user *)meta_buf,
576                                                 meta_len)) {
577                                         ret = -EFAULT;
578                                         goto err_meta;
579                                 }
580                         }
581                         vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
582                 }
583
584                 bio->bi_disk = disk;
585         }
586
587         blk_execute_rq(q, NULL, rq, 0);
588
589         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
590                 ret = -EINTR;
591         else if (nvme_req(rq)->status & 0x7ff)
592                 ret = -EIO;
593         if (result)
594                 *result = nvme_req(rq)->status & 0x7ff;
595         if (status)
596                 *status = le64_to_cpu(nvme_req(rq)->result.u64);
597
598         if (metadata && !ret && !write) {
599                 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
600                         ret = -EFAULT;
601         }
602 err_meta:
603         if (meta_buf && meta_len)
604                 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
605 err_map:
606         if (bio)
607                 blk_rq_unmap_user(bio);
608 err_ppa:
609         if (ppa_buf && ppa_len)
610                 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
611 err_rq:
612         blk_mq_free_request(rq);
613 err_cmd:
614         return ret;
615 }
616
617 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
618                                         struct nvm_user_vio __user *uvio)
619 {
620         struct nvm_user_vio vio;
621         struct nvme_nvm_command c;
622         unsigned int length;
623         int ret;
624
625         if (copy_from_user(&vio, uvio, sizeof(vio)))
626                 return -EFAULT;
627         if (vio.flags)
628                 return -EINVAL;
629
630         memset(&c, 0, sizeof(c));
631         c.ph_rw.opcode = vio.opcode;
632         c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
633         c.ph_rw.control = cpu_to_le16(vio.control);
634         c.ph_rw.length = cpu_to_le16(vio.nppas);
635
636         length = (vio.nppas + 1) << ns->lba_shift;
637
638         ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
639                         (void __user *)(uintptr_t)vio.addr, length,
640                         (void __user *)(uintptr_t)vio.metadata,
641                                                         vio.metadata_len,
642                         (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
643                         &vio.result, &vio.status, 0);
644
645         if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
646                 return -EFAULT;
647
648         return ret;
649 }
650
651 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
652                                         struct nvm_passthru_vio __user *uvcmd)
653 {
654         struct nvm_passthru_vio vcmd;
655         struct nvme_nvm_command c;
656         struct request_queue *q;
657         unsigned int timeout = 0;
658         int ret;
659
660         if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
661                 return -EFAULT;
662         if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
663                 return -EACCES;
664         if (vcmd.flags)
665                 return -EINVAL;
666
667         memset(&c, 0, sizeof(c));
668         c.common.opcode = vcmd.opcode;
669         c.common.nsid = cpu_to_le32(ns->head->ns_id);
670         c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
671         c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
672         /* cdw11-12 */
673         c.ph_rw.length = cpu_to_le16(vcmd.nppas);
674         c.ph_rw.control  = cpu_to_le16(vcmd.control);
675         c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
676         c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
677         c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
678
679         if (vcmd.timeout_ms)
680                 timeout = msecs_to_jiffies(vcmd.timeout_ms);
681
682         q = admin ? ns->ctrl->admin_q : ns->queue;
683
684         ret = nvme_nvm_submit_user_cmd(q, ns,
685                         (struct nvme_nvm_command *)&c,
686                         (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
687                         (void __user *)(uintptr_t)vcmd.metadata,
688                                                         vcmd.metadata_len,
689                         (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
690                         &vcmd.result, &vcmd.status, timeout);
691
692         if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
693                 return -EFAULT;
694
695         return ret;
696 }
697
698 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
699 {
700         switch (cmd) {
701         case NVME_NVM_IOCTL_ADMIN_VIO:
702                 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
703         case NVME_NVM_IOCTL_IO_VIO:
704                 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
705         case NVME_NVM_IOCTL_SUBMIT_VIO:
706                 return nvme_nvm_submit_vio(ns, (void __user *)arg);
707         default:
708                 return -ENOTTY;
709         }
710 }
711
712 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
713 {
714         struct request_queue *q = ns->queue;
715         struct nvm_dev *dev;
716
717         _nvme_nvm_check_size();
718
719         dev = nvm_alloc_dev(node);
720         if (!dev)
721                 return -ENOMEM;
722
723         dev->q = q;
724         memcpy(dev->name, disk_name, DISK_NAME_LEN);
725         dev->ops = &nvme_nvm_dev_ops;
726         dev->private_data = ns;
727         ns->ndev = dev;
728
729         return nvm_register(dev);
730 }
731
732 void nvme_nvm_unregister(struct nvme_ns *ns)
733 {
734         nvm_unregister(ns->ndev);
735 }
736
737 static ssize_t nvm_dev_attr_show(struct device *dev,
738                                  struct device_attribute *dattr, char *page)
739 {
740         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
741         struct nvm_dev *ndev = ns->ndev;
742         struct nvm_id *id;
743         struct nvm_id_group *grp;
744         struct attribute *attr;
745
746         if (!ndev)
747                 return 0;
748
749         id = &ndev->identity;
750         grp = &id->grp;
751         attr = &dattr->attr;
752
753         if (strcmp(attr->name, "version") == 0) {
754                 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
755         } else if (strcmp(attr->name, "vendor_opcode") == 0) {
756                 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
757         } else if (strcmp(attr->name, "capabilities") == 0) {
758                 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
759         } else if (strcmp(attr->name, "device_mode") == 0) {
760                 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
761         /* kept for compatibility */
762         } else if (strcmp(attr->name, "media_manager") == 0) {
763                 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
764         } else if (strcmp(attr->name, "ppa_format") == 0) {
765                 return scnprintf(page, PAGE_SIZE,
766                         "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
767                         id->ppaf.ch_offset, id->ppaf.ch_len,
768                         id->ppaf.lun_offset, id->ppaf.lun_len,
769                         id->ppaf.pln_offset, id->ppaf.pln_len,
770                         id->ppaf.blk_offset, id->ppaf.blk_len,
771                         id->ppaf.pg_offset, id->ppaf.pg_len,
772                         id->ppaf.sect_offset, id->ppaf.sect_len);
773         } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
774                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
775         } else if (strcmp(attr->name, "flash_media_type") == 0) {
776                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
777         } else if (strcmp(attr->name, "num_channels") == 0) {
778                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
779         } else if (strcmp(attr->name, "num_luns") == 0) {
780                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
781         } else if (strcmp(attr->name, "num_planes") == 0) {
782                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
783         } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
784                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
785         } else if (strcmp(attr->name, "num_pages") == 0) {
786                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
787         } else if (strcmp(attr->name, "page_size") == 0) {
788                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
789         } else if (strcmp(attr->name, "hw_sector_size") == 0) {
790                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
791         } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
792                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
793         } else if (strcmp(attr->name, "read_typ") == 0) {
794                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
795         } else if (strcmp(attr->name, "read_max") == 0) {
796                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
797         } else if (strcmp(attr->name, "prog_typ") == 0) {
798                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
799         } else if (strcmp(attr->name, "prog_max") == 0) {
800                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
801         } else if (strcmp(attr->name, "erase_typ") == 0) {
802                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
803         } else if (strcmp(attr->name, "erase_max") == 0) {
804                 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
805         } else if (strcmp(attr->name, "multiplane_modes") == 0) {
806                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
807         } else if (strcmp(attr->name, "media_capabilities") == 0) {
808                 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
809         } else if (strcmp(attr->name, "max_phys_secs") == 0) {
810                 return scnprintf(page, PAGE_SIZE, "%u\n",
811                                 ndev->ops->max_phys_sect);
812         } else {
813                 return scnprintf(page,
814                                  PAGE_SIZE,
815                                  "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
816                                  attr->name);
817         }
818 }
819
820 #define NVM_DEV_ATTR_RO(_name)                                          \
821         DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
822
823 static NVM_DEV_ATTR_RO(version);
824 static NVM_DEV_ATTR_RO(vendor_opcode);
825 static NVM_DEV_ATTR_RO(capabilities);
826 static NVM_DEV_ATTR_RO(device_mode);
827 static NVM_DEV_ATTR_RO(ppa_format);
828 static NVM_DEV_ATTR_RO(media_manager);
829
830 static NVM_DEV_ATTR_RO(media_type);
831 static NVM_DEV_ATTR_RO(flash_media_type);
832 static NVM_DEV_ATTR_RO(num_channels);
833 static NVM_DEV_ATTR_RO(num_luns);
834 static NVM_DEV_ATTR_RO(num_planes);
835 static NVM_DEV_ATTR_RO(num_blocks);
836 static NVM_DEV_ATTR_RO(num_pages);
837 static NVM_DEV_ATTR_RO(page_size);
838 static NVM_DEV_ATTR_RO(hw_sector_size);
839 static NVM_DEV_ATTR_RO(oob_sector_size);
840 static NVM_DEV_ATTR_RO(read_typ);
841 static NVM_DEV_ATTR_RO(read_max);
842 static NVM_DEV_ATTR_RO(prog_typ);
843 static NVM_DEV_ATTR_RO(prog_max);
844 static NVM_DEV_ATTR_RO(erase_typ);
845 static NVM_DEV_ATTR_RO(erase_max);
846 static NVM_DEV_ATTR_RO(multiplane_modes);
847 static NVM_DEV_ATTR_RO(media_capabilities);
848 static NVM_DEV_ATTR_RO(max_phys_secs);
849
850 static struct attribute *nvm_dev_attrs[] = {
851         &dev_attr_version.attr,
852         &dev_attr_vendor_opcode.attr,
853         &dev_attr_capabilities.attr,
854         &dev_attr_device_mode.attr,
855         &dev_attr_media_manager.attr,
856
857         &dev_attr_ppa_format.attr,
858         &dev_attr_media_type.attr,
859         &dev_attr_flash_media_type.attr,
860         &dev_attr_num_channels.attr,
861         &dev_attr_num_luns.attr,
862         &dev_attr_num_planes.attr,
863         &dev_attr_num_blocks.attr,
864         &dev_attr_num_pages.attr,
865         &dev_attr_page_size.attr,
866         &dev_attr_hw_sector_size.attr,
867         &dev_attr_oob_sector_size.attr,
868         &dev_attr_read_typ.attr,
869         &dev_attr_read_max.attr,
870         &dev_attr_prog_typ.attr,
871         &dev_attr_prog_max.attr,
872         &dev_attr_erase_typ.attr,
873         &dev_attr_erase_max.attr,
874         &dev_attr_multiplane_modes.attr,
875         &dev_attr_media_capabilities.attr,
876         &dev_attr_max_phys_secs.attr,
877         NULL,
878 };
879
880 static const struct attribute_group nvm_dev_attr_group = {
881         .name           = "lightnvm",
882         .attrs          = nvm_dev_attrs,
883 };
884
885 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
886 {
887         return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
888                                         &nvm_dev_attr_group);
889 }
890
891 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
892 {
893         sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
894                                         &nvm_dev_attr_group);
895 }