2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
32 enum nvme_nvm_admin_opcode {
33 nvme_nvm_admin_identity = 0xe2,
34 nvme_nvm_admin_get_bb_tbl = 0xf2,
35 nvme_nvm_admin_set_bb_tbl = 0xf1,
38 struct nvme_nvm_ph_rw {
54 struct nvme_nvm_identity {
65 struct nvme_nvm_getbbtbl {
77 struct nvme_nvm_setbbtbl {
92 struct nvme_nvm_erase_blk {
107 struct nvme_nvm_command {
109 struct nvme_common_command common;
110 struct nvme_nvm_identity identity;
111 struct nvme_nvm_ph_rw ph_rw;
112 struct nvme_nvm_getbbtbl get_bb;
113 struct nvme_nvm_setbbtbl set_bb;
114 struct nvme_nvm_erase_blk erase;
118 #define NVME_NVM_LP_MLC_PAIRS 886
119 struct nvme_nvm_lp_mlc {
121 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
124 struct nvme_nvm_lp_tbl {
126 struct nvme_nvm_lp_mlc mlc;
129 struct nvme_nvm_id_group {
153 struct nvme_nvm_lp_tbl lptbl;
156 struct nvme_nvm_addr_format {
179 struct nvme_nvm_addr_format ppaf;
181 struct nvme_nvm_id_group groups[4];
184 struct nvme_nvm_bb_tbl {
199 * Check we didn't inadvertently grow the command struct
201 static inline void _nvme_nvm_check_size(void)
203 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
204 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
205 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
206 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
207 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
208 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
209 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
210 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
211 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
214 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
216 struct nvme_nvm_id_group *src;
217 struct nvm_id_group *grp;
218 int sec_per_pg, sec_per_pl, pg_per_blk;
220 if (nvme_nvm_id->cgrps != 1)
223 src = &nvme_nvm_id->groups[0];
226 grp->mtype = src->mtype;
227 grp->fmtype = src->fmtype;
229 grp->num_ch = src->num_ch;
230 grp->num_lun = src->num_lun;
232 grp->num_chk = le16_to_cpu(src->num_chk);
233 grp->csecs = le16_to_cpu(src->csecs);
234 grp->sos = le16_to_cpu(src->sos);
236 pg_per_blk = le16_to_cpu(src->num_pg);
237 sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
238 sec_per_pl = sec_per_pg * src->num_pln;
239 grp->clba = sec_per_pl * pg_per_blk;
240 grp->ws_per_chk = pg_per_blk;
242 grp->mpos = le32_to_cpu(src->mpos);
243 grp->cpar = le16_to_cpu(src->cpar);
244 grp->mccap = le32_to_cpu(src->mccap);
246 grp->ws_opt = grp->ws_min = sec_per_pg;
247 grp->ws_seq = NVM_IO_SNGL_ACCESS;
249 if (grp->mpos & 0x020202) {
250 grp->ws_seq = NVM_IO_DUAL_ACCESS;
252 } else if (grp->mpos & 0x040404) {
253 grp->ws_seq = NVM_IO_QUAD_ACCESS;
257 grp->trdt = le32_to_cpu(src->trdt);
258 grp->trdm = le32_to_cpu(src->trdm);
259 grp->tprt = le32_to_cpu(src->tprt);
260 grp->tprm = le32_to_cpu(src->tprm);
261 grp->tbet = le32_to_cpu(src->tbet);
262 grp->tbem = le32_to_cpu(src->tbem);
264 /* 1.2 compatibility */
265 grp->num_pln = src->num_pln;
266 grp->num_pg = le16_to_cpu(src->num_pg);
267 grp->fpg_sz = le16_to_cpu(src->fpg_sz);
272 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
274 struct nvme_ns *ns = nvmdev->q->queuedata;
275 struct nvme_nvm_id *nvme_nvm_id;
276 struct nvme_nvm_command c = {};
279 c.identity.opcode = nvme_nvm_admin_identity;
280 c.identity.nsid = cpu_to_le32(ns->head->ns_id);
282 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
286 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
287 nvme_nvm_id, sizeof(struct nvme_nvm_id));
293 nvm_id->ver_id = nvme_nvm_id->ver_id;
294 nvm_id->vmnt = nvme_nvm_id->vmnt;
295 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
296 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
297 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
298 sizeof(struct nvm_addr_format));
300 ret = init_grps(nvm_id, nvme_nvm_id);
306 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
309 struct request_queue *q = nvmdev->q;
310 struct nvm_geo *geo = &nvmdev->geo;
311 struct nvme_ns *ns = q->queuedata;
312 struct nvme_ctrl *ctrl = ns->ctrl;
313 struct nvme_nvm_command c = {};
314 struct nvme_nvm_bb_tbl *bb_tbl;
315 int nr_blks = geo->nr_chks * geo->plane_mode;
316 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
319 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
320 c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
321 c.get_bb.spba = cpu_to_le64(ppa.ppa);
323 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
327 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
330 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
335 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
336 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
337 dev_err(ctrl->device, "bbt format mismatch\n");
342 if (le16_to_cpu(bb_tbl->verid) != 1) {
344 dev_err(ctrl->device, "bbt version not supported\n");
348 if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
350 dev_err(ctrl->device,
351 "bbt unsuspected blocks returned (%u!=%u)",
352 le32_to_cpu(bb_tbl->tblks), nr_blks);
356 memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
362 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
363 int nr_ppas, int type)
365 struct nvme_ns *ns = nvmdev->q->queuedata;
366 struct nvme_nvm_command c = {};
369 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
370 c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
371 c.set_bb.spba = cpu_to_le64(ppas->ppa);
372 c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
373 c.set_bb.value = type;
375 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
378 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
383 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
384 struct nvme_nvm_command *c)
386 c->ph_rw.opcode = rqd->opcode;
387 c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
388 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
389 c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
390 c->ph_rw.control = cpu_to_le16(rqd->flags);
391 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
394 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
396 struct nvm_rq *rqd = rq->end_io_data;
398 rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
399 rqd->error = nvme_req(rq)->status;
402 kfree(nvme_req(rq)->cmd);
403 blk_mq_free_request(rq);
406 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
408 struct nvme_nvm_command *cmd)
410 struct nvme_ns *ns = q->queuedata;
413 nvme_nvm_rqtocmd(rqd, ns, cmd);
415 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
419 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
422 blk_init_request_from_bio(rq, rqd->bio);
424 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
431 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
433 struct request_queue *q = dev->q;
434 struct nvme_nvm_command *cmd;
437 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
441 rq = nvme_nvm_alloc_request(q, rqd, cmd);
447 rq->end_io_data = rqd;
449 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
454 static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
456 struct request_queue *q = dev->q;
458 struct nvme_nvm_command cmd;
461 memset(&cmd, 0, sizeof(struct nvme_nvm_command));
463 rq = nvme_nvm_alloc_request(q, rqd, &cmd);
467 /* I/Os can fail and the error is signaled through rqd. Callers must
468 * handle the error accordingly.
470 blk_execute_rq(q, NULL, rq, 0);
471 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
474 rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
475 rqd->error = nvme_req(rq)->status;
477 blk_mq_free_request(rq);
482 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
484 struct nvme_ns *ns = nvmdev->q->queuedata;
486 return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
489 static void nvme_nvm_destroy_dma_pool(void *pool)
491 struct dma_pool *dma_pool = pool;
493 dma_pool_destroy(dma_pool);
496 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
497 gfp_t mem_flags, dma_addr_t *dma_handler)
499 return dma_pool_alloc(pool, mem_flags, dma_handler);
502 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
503 dma_addr_t dma_handler)
505 dma_pool_free(pool, addr, dma_handler);
508 static struct nvm_dev_ops nvme_nvm_dev_ops = {
509 .identity = nvme_nvm_identity,
511 .get_bb_tbl = nvme_nvm_get_bb_tbl,
512 .set_bb_tbl = nvme_nvm_set_bb_tbl,
514 .submit_io = nvme_nvm_submit_io,
515 .submit_io_sync = nvme_nvm_submit_io_sync,
517 .create_dma_pool = nvme_nvm_create_dma_pool,
518 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
519 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
520 .dev_dma_free = nvme_nvm_dev_dma_free,
525 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
527 struct nvme_nvm_command *vcmd,
528 void __user *ubuf, unsigned int bufflen,
529 void __user *meta_buf, unsigned int meta_len,
530 void __user *ppa_buf, unsigned int ppa_len,
531 u32 *result, u64 *status, unsigned int timeout)
533 bool write = nvme_is_write((struct nvme_command *)vcmd);
534 struct nvm_dev *dev = ns->ndev;
535 struct gendisk *disk = ns->disk;
537 struct bio *bio = NULL;
538 __le64 *ppa_list = NULL;
540 __le64 *metadata = NULL;
541 dma_addr_t metadata_dma;
542 DECLARE_COMPLETION_ONSTACK(wait);
545 rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
552 rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
554 if (ppa_buf && ppa_len) {
555 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
560 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
561 sizeof(u64) * (ppa_len + 1))) {
565 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
567 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
570 if (ubuf && bufflen) {
571 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
576 if (meta_buf && meta_len) {
577 metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
585 if (copy_from_user(metadata,
586 (void __user *)meta_buf,
592 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
598 blk_execute_rq(q, NULL, rq, 0);
600 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
602 else if (nvme_req(rq)->status & 0x7ff)
605 *result = nvme_req(rq)->status & 0x7ff;
607 *status = le64_to_cpu(nvme_req(rq)->result.u64);
609 if (metadata && !ret && !write) {
610 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
614 if (meta_buf && meta_len)
615 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
618 blk_rq_unmap_user(bio);
620 if (ppa_buf && ppa_len)
621 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
623 blk_mq_free_request(rq);
628 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
629 struct nvm_user_vio __user *uvio)
631 struct nvm_user_vio vio;
632 struct nvme_nvm_command c;
636 if (copy_from_user(&vio, uvio, sizeof(vio)))
641 memset(&c, 0, sizeof(c));
642 c.ph_rw.opcode = vio.opcode;
643 c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
644 c.ph_rw.control = cpu_to_le16(vio.control);
645 c.ph_rw.length = cpu_to_le16(vio.nppas);
647 length = (vio.nppas + 1) << ns->lba_shift;
649 ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
650 (void __user *)(uintptr_t)vio.addr, length,
651 (void __user *)(uintptr_t)vio.metadata,
653 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
654 &vio.result, &vio.status, 0);
656 if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
662 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
663 struct nvm_passthru_vio __user *uvcmd)
665 struct nvm_passthru_vio vcmd;
666 struct nvme_nvm_command c;
667 struct request_queue *q;
668 unsigned int timeout = 0;
671 if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
673 if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
678 memset(&c, 0, sizeof(c));
679 c.common.opcode = vcmd.opcode;
680 c.common.nsid = cpu_to_le32(ns->head->ns_id);
681 c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
682 c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
684 c.ph_rw.length = cpu_to_le16(vcmd.nppas);
685 c.ph_rw.control = cpu_to_le16(vcmd.control);
686 c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
687 c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
688 c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
691 timeout = msecs_to_jiffies(vcmd.timeout_ms);
693 q = admin ? ns->ctrl->admin_q : ns->queue;
695 ret = nvme_nvm_submit_user_cmd(q, ns,
696 (struct nvme_nvm_command *)&c,
697 (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
698 (void __user *)(uintptr_t)vcmd.metadata,
700 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
701 &vcmd.result, &vcmd.status, timeout);
703 if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
709 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
712 case NVME_NVM_IOCTL_ADMIN_VIO:
713 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
714 case NVME_NVM_IOCTL_IO_VIO:
715 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
716 case NVME_NVM_IOCTL_SUBMIT_VIO:
717 return nvme_nvm_submit_vio(ns, (void __user *)arg);
723 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
725 struct request_queue *q = ns->queue;
728 _nvme_nvm_check_size();
730 dev = nvm_alloc_dev(node);
735 memcpy(dev->name, disk_name, DISK_NAME_LEN);
736 dev->ops = &nvme_nvm_dev_ops;
737 dev->private_data = ns;
740 return nvm_register(dev);
743 void nvme_nvm_unregister(struct nvme_ns *ns)
745 nvm_unregister(ns->ndev);
748 static ssize_t nvm_dev_attr_show(struct device *dev,
749 struct device_attribute *dattr, char *page)
751 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
752 struct nvm_dev *ndev = ns->ndev;
754 struct nvm_id_group *grp;
755 struct attribute *attr;
760 id = &ndev->identity;
764 if (strcmp(attr->name, "version") == 0) {
765 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
766 } else if (strcmp(attr->name, "vendor_opcode") == 0) {
767 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
768 } else if (strcmp(attr->name, "capabilities") == 0) {
769 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
770 } else if (strcmp(attr->name, "device_mode") == 0) {
771 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
772 /* kept for compatibility */
773 } else if (strcmp(attr->name, "media_manager") == 0) {
774 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
775 } else if (strcmp(attr->name, "ppa_format") == 0) {
776 return scnprintf(page, PAGE_SIZE,
777 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
778 id->ppaf.ch_offset, id->ppaf.ch_len,
779 id->ppaf.lun_offset, id->ppaf.lun_len,
780 id->ppaf.pln_offset, id->ppaf.pln_len,
781 id->ppaf.blk_offset, id->ppaf.blk_len,
782 id->ppaf.pg_offset, id->ppaf.pg_len,
783 id->ppaf.sect_offset, id->ppaf.sect_len);
784 } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
785 return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
786 } else if (strcmp(attr->name, "flash_media_type") == 0) {
787 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
788 } else if (strcmp(attr->name, "num_channels") == 0) {
789 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
790 } else if (strcmp(attr->name, "num_luns") == 0) {
791 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
792 } else if (strcmp(attr->name, "num_planes") == 0) {
793 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
794 } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
795 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
796 } else if (strcmp(attr->name, "num_pages") == 0) {
797 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
798 } else if (strcmp(attr->name, "page_size") == 0) {
799 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
800 } else if (strcmp(attr->name, "hw_sector_size") == 0) {
801 return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
802 } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
803 return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
804 } else if (strcmp(attr->name, "read_typ") == 0) {
805 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
806 } else if (strcmp(attr->name, "read_max") == 0) {
807 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
808 } else if (strcmp(attr->name, "prog_typ") == 0) {
809 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
810 } else if (strcmp(attr->name, "prog_max") == 0) {
811 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
812 } else if (strcmp(attr->name, "erase_typ") == 0) {
813 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
814 } else if (strcmp(attr->name, "erase_max") == 0) {
815 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
816 } else if (strcmp(attr->name, "multiplane_modes") == 0) {
817 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
818 } else if (strcmp(attr->name, "media_capabilities") == 0) {
819 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
820 } else if (strcmp(attr->name, "max_phys_secs") == 0) {
821 return scnprintf(page, PAGE_SIZE, "%u\n",
822 ndev->ops->max_phys_sect);
824 return scnprintf(page,
826 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
831 #define NVM_DEV_ATTR_RO(_name) \
832 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
834 static NVM_DEV_ATTR_RO(version);
835 static NVM_DEV_ATTR_RO(vendor_opcode);
836 static NVM_DEV_ATTR_RO(capabilities);
837 static NVM_DEV_ATTR_RO(device_mode);
838 static NVM_DEV_ATTR_RO(ppa_format);
839 static NVM_DEV_ATTR_RO(media_manager);
841 static NVM_DEV_ATTR_RO(media_type);
842 static NVM_DEV_ATTR_RO(flash_media_type);
843 static NVM_DEV_ATTR_RO(num_channels);
844 static NVM_DEV_ATTR_RO(num_luns);
845 static NVM_DEV_ATTR_RO(num_planes);
846 static NVM_DEV_ATTR_RO(num_blocks);
847 static NVM_DEV_ATTR_RO(num_pages);
848 static NVM_DEV_ATTR_RO(page_size);
849 static NVM_DEV_ATTR_RO(hw_sector_size);
850 static NVM_DEV_ATTR_RO(oob_sector_size);
851 static NVM_DEV_ATTR_RO(read_typ);
852 static NVM_DEV_ATTR_RO(read_max);
853 static NVM_DEV_ATTR_RO(prog_typ);
854 static NVM_DEV_ATTR_RO(prog_max);
855 static NVM_DEV_ATTR_RO(erase_typ);
856 static NVM_DEV_ATTR_RO(erase_max);
857 static NVM_DEV_ATTR_RO(multiplane_modes);
858 static NVM_DEV_ATTR_RO(media_capabilities);
859 static NVM_DEV_ATTR_RO(max_phys_secs);
861 static struct attribute *nvm_dev_attrs[] = {
862 &dev_attr_version.attr,
863 &dev_attr_vendor_opcode.attr,
864 &dev_attr_capabilities.attr,
865 &dev_attr_device_mode.attr,
866 &dev_attr_media_manager.attr,
868 &dev_attr_ppa_format.attr,
869 &dev_attr_media_type.attr,
870 &dev_attr_flash_media_type.attr,
871 &dev_attr_num_channels.attr,
872 &dev_attr_num_luns.attr,
873 &dev_attr_num_planes.attr,
874 &dev_attr_num_blocks.attr,
875 &dev_attr_num_pages.attr,
876 &dev_attr_page_size.attr,
877 &dev_attr_hw_sector_size.attr,
878 &dev_attr_oob_sector_size.attr,
879 &dev_attr_read_typ.attr,
880 &dev_attr_read_max.attr,
881 &dev_attr_prog_typ.attr,
882 &dev_attr_prog_max.attr,
883 &dev_attr_erase_typ.attr,
884 &dev_attr_erase_max.attr,
885 &dev_attr_multiplane_modes.attr,
886 &dev_attr_media_capabilities.attr,
887 &dev_attr_max_phys_secs.attr,
891 static const struct attribute_group nvm_dev_attr_group = {
893 .attrs = nvm_dev_attrs,
896 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
898 return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
899 &nvm_dev_attr_group);
902 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
904 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
905 &nvm_dev_attr_group);