2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
32 enum nvme_nvm_admin_opcode {
33 nvme_nvm_admin_identity = 0xe2,
34 nvme_nvm_admin_get_bb_tbl = 0xf2,
35 nvme_nvm_admin_set_bb_tbl = 0xf1,
38 struct nvme_nvm_ph_rw {
54 struct nvme_nvm_erase_blk {
69 struct nvme_nvm_identity {
80 struct nvme_nvm_getbbtbl {
92 struct nvme_nvm_setbbtbl {
107 struct nvme_nvm_command {
109 struct nvme_common_command common;
110 struct nvme_nvm_ph_rw ph_rw;
111 struct nvme_nvm_erase_blk erase;
112 struct nvme_nvm_identity identity;
113 struct nvme_nvm_getbbtbl get_bb;
114 struct nvme_nvm_setbbtbl set_bb;
118 struct nvme_nvm_id12_grp {
144 struct nvme_nvm_id12_addrf {
160 struct nvme_nvm_id12 {
167 struct nvme_nvm_id12_addrf ppaf;
169 struct nvme_nvm_id12_grp grp;
173 struct nvme_nvm_bb_tbl {
188 * Check we didn't inadvertently grow the command struct
190 static inline void _nvme_nvm_check_size(void)
192 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
193 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
194 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
195 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
196 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
197 BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
198 BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
199 BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
200 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
203 static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
205 struct nvme_nvm_id12_grp *src;
206 struct nvm_id_group *grp;
207 int sec_per_pg, sec_per_pl, pg_per_blk;
209 if (id12->cgrps != 1)
215 grp->mtype = src->mtype;
216 grp->fmtype = src->fmtype;
218 grp->num_ch = src->num_ch;
219 grp->num_lun = src->num_lun;
221 grp->num_chk = le16_to_cpu(src->num_chk);
222 grp->csecs = le16_to_cpu(src->csecs);
223 grp->sos = le16_to_cpu(src->sos);
225 pg_per_blk = le16_to_cpu(src->num_pg);
226 sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
227 sec_per_pl = sec_per_pg * src->num_pln;
228 grp->clba = sec_per_pl * pg_per_blk;
229 grp->ws_per_chk = pg_per_blk;
231 grp->mpos = le32_to_cpu(src->mpos);
232 grp->cpar = le16_to_cpu(src->cpar);
233 grp->mccap = le32_to_cpu(src->mccap);
235 grp->ws_opt = grp->ws_min = sec_per_pg;
236 grp->ws_seq = NVM_IO_SNGL_ACCESS;
238 if (grp->mpos & 0x020202) {
239 grp->ws_seq = NVM_IO_DUAL_ACCESS;
241 } else if (grp->mpos & 0x040404) {
242 grp->ws_seq = NVM_IO_QUAD_ACCESS;
246 grp->trdt = le32_to_cpu(src->trdt);
247 grp->trdm = le32_to_cpu(src->trdm);
248 grp->tprt = le32_to_cpu(src->tprt);
249 grp->tprm = le32_to_cpu(src->tprm);
250 grp->tbet = le32_to_cpu(src->tbet);
251 grp->tbem = le32_to_cpu(src->tbem);
253 /* 1.2 compatibility */
254 grp->num_pln = src->num_pln;
255 grp->num_pg = le16_to_cpu(src->num_pg);
256 grp->fpg_sz = le16_to_cpu(src->fpg_sz);
261 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
263 struct nvme_ns *ns = nvmdev->q->queuedata;
264 struct nvme_nvm_id12 *id;
265 struct nvme_nvm_command c = {};
268 c.identity.opcode = nvme_nvm_admin_identity;
269 c.identity.nsid = cpu_to_le32(ns->head->ns_id);
271 id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
275 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
276 id, sizeof(struct nvme_nvm_id12));
282 nvm_id->ver_id = id->ver_id;
283 nvm_id->vmnt = id->vmnt;
284 nvm_id->cap = le32_to_cpu(id->cap);
285 nvm_id->dom = le32_to_cpu(id->dom);
286 memcpy(&nvm_id->ppaf, &id->ppaf,
287 sizeof(struct nvm_addr_format));
289 ret = init_grp(nvm_id, id);
295 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
298 struct request_queue *q = nvmdev->q;
299 struct nvm_geo *geo = &nvmdev->geo;
300 struct nvme_ns *ns = q->queuedata;
301 struct nvme_ctrl *ctrl = ns->ctrl;
302 struct nvme_nvm_command c = {};
303 struct nvme_nvm_bb_tbl *bb_tbl;
304 int nr_blks = geo->nr_chks * geo->plane_mode;
305 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
308 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
309 c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
310 c.get_bb.spba = cpu_to_le64(ppa.ppa);
312 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
316 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
319 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
324 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
325 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
326 dev_err(ctrl->device, "bbt format mismatch\n");
331 if (le16_to_cpu(bb_tbl->verid) != 1) {
333 dev_err(ctrl->device, "bbt version not supported\n");
337 if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
339 dev_err(ctrl->device,
340 "bbt unsuspected blocks returned (%u!=%u)",
341 le32_to_cpu(bb_tbl->tblks), nr_blks);
345 memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
351 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
352 int nr_ppas, int type)
354 struct nvme_ns *ns = nvmdev->q->queuedata;
355 struct nvme_nvm_command c = {};
358 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
359 c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
360 c.set_bb.spba = cpu_to_le64(ppas->ppa);
361 c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
362 c.set_bb.value = type;
364 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
367 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
372 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
373 struct nvme_nvm_command *c)
375 c->ph_rw.opcode = rqd->opcode;
376 c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
377 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
378 c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
379 c->ph_rw.control = cpu_to_le16(rqd->flags);
380 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
383 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
385 struct nvm_rq *rqd = rq->end_io_data;
387 rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
388 rqd->error = nvme_req(rq)->status;
391 kfree(nvme_req(rq)->cmd);
392 blk_mq_free_request(rq);
395 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
397 struct nvme_nvm_command *cmd)
399 struct nvme_ns *ns = q->queuedata;
402 nvme_nvm_rqtocmd(rqd, ns, cmd);
404 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
408 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
411 blk_init_request_from_bio(rq, rqd->bio);
413 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
420 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
422 struct request_queue *q = dev->q;
423 struct nvme_nvm_command *cmd;
426 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
430 rq = nvme_nvm_alloc_request(q, rqd, cmd);
436 rq->end_io_data = rqd;
438 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
443 static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
445 struct request_queue *q = dev->q;
447 struct nvme_nvm_command cmd;
450 memset(&cmd, 0, sizeof(struct nvme_nvm_command));
452 rq = nvme_nvm_alloc_request(q, rqd, &cmd);
456 /* I/Os can fail and the error is signaled through rqd. Callers must
457 * handle the error accordingly.
459 blk_execute_rq(q, NULL, rq, 0);
460 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
463 rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
464 rqd->error = nvme_req(rq)->status;
466 blk_mq_free_request(rq);
471 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
473 struct nvme_ns *ns = nvmdev->q->queuedata;
475 return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
478 static void nvme_nvm_destroy_dma_pool(void *pool)
480 struct dma_pool *dma_pool = pool;
482 dma_pool_destroy(dma_pool);
485 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
486 gfp_t mem_flags, dma_addr_t *dma_handler)
488 return dma_pool_alloc(pool, mem_flags, dma_handler);
491 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
492 dma_addr_t dma_handler)
494 dma_pool_free(pool, addr, dma_handler);
497 static struct nvm_dev_ops nvme_nvm_dev_ops = {
498 .identity = nvme_nvm_identity,
500 .get_bb_tbl = nvme_nvm_get_bb_tbl,
501 .set_bb_tbl = nvme_nvm_set_bb_tbl,
503 .submit_io = nvme_nvm_submit_io,
504 .submit_io_sync = nvme_nvm_submit_io_sync,
506 .create_dma_pool = nvme_nvm_create_dma_pool,
507 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
508 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
509 .dev_dma_free = nvme_nvm_dev_dma_free,
514 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
516 struct nvme_nvm_command *vcmd,
517 void __user *ubuf, unsigned int bufflen,
518 void __user *meta_buf, unsigned int meta_len,
519 void __user *ppa_buf, unsigned int ppa_len,
520 u32 *result, u64 *status, unsigned int timeout)
522 bool write = nvme_is_write((struct nvme_command *)vcmd);
523 struct nvm_dev *dev = ns->ndev;
524 struct gendisk *disk = ns->disk;
526 struct bio *bio = NULL;
527 __le64 *ppa_list = NULL;
529 __le64 *metadata = NULL;
530 dma_addr_t metadata_dma;
531 DECLARE_COMPLETION_ONSTACK(wait);
534 rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
541 rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
543 if (ppa_buf && ppa_len) {
544 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
549 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
550 sizeof(u64) * (ppa_len + 1))) {
554 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
556 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
559 if (ubuf && bufflen) {
560 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
565 if (meta_buf && meta_len) {
566 metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
574 if (copy_from_user(metadata,
575 (void __user *)meta_buf,
581 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
587 blk_execute_rq(q, NULL, rq, 0);
589 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
591 else if (nvme_req(rq)->status & 0x7ff)
594 *result = nvme_req(rq)->status & 0x7ff;
596 *status = le64_to_cpu(nvme_req(rq)->result.u64);
598 if (metadata && !ret && !write) {
599 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
603 if (meta_buf && meta_len)
604 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
607 blk_rq_unmap_user(bio);
609 if (ppa_buf && ppa_len)
610 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
612 blk_mq_free_request(rq);
617 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
618 struct nvm_user_vio __user *uvio)
620 struct nvm_user_vio vio;
621 struct nvme_nvm_command c;
625 if (copy_from_user(&vio, uvio, sizeof(vio)))
630 memset(&c, 0, sizeof(c));
631 c.ph_rw.opcode = vio.opcode;
632 c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
633 c.ph_rw.control = cpu_to_le16(vio.control);
634 c.ph_rw.length = cpu_to_le16(vio.nppas);
636 length = (vio.nppas + 1) << ns->lba_shift;
638 ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
639 (void __user *)(uintptr_t)vio.addr, length,
640 (void __user *)(uintptr_t)vio.metadata,
642 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
643 &vio.result, &vio.status, 0);
645 if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
651 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
652 struct nvm_passthru_vio __user *uvcmd)
654 struct nvm_passthru_vio vcmd;
655 struct nvme_nvm_command c;
656 struct request_queue *q;
657 unsigned int timeout = 0;
660 if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
662 if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
667 memset(&c, 0, sizeof(c));
668 c.common.opcode = vcmd.opcode;
669 c.common.nsid = cpu_to_le32(ns->head->ns_id);
670 c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
671 c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
673 c.ph_rw.length = cpu_to_le16(vcmd.nppas);
674 c.ph_rw.control = cpu_to_le16(vcmd.control);
675 c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
676 c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
677 c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
680 timeout = msecs_to_jiffies(vcmd.timeout_ms);
682 q = admin ? ns->ctrl->admin_q : ns->queue;
684 ret = nvme_nvm_submit_user_cmd(q, ns,
685 (struct nvme_nvm_command *)&c,
686 (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
687 (void __user *)(uintptr_t)vcmd.metadata,
689 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
690 &vcmd.result, &vcmd.status, timeout);
692 if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
698 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
701 case NVME_NVM_IOCTL_ADMIN_VIO:
702 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
703 case NVME_NVM_IOCTL_IO_VIO:
704 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
705 case NVME_NVM_IOCTL_SUBMIT_VIO:
706 return nvme_nvm_submit_vio(ns, (void __user *)arg);
712 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
714 struct request_queue *q = ns->queue;
717 _nvme_nvm_check_size();
719 dev = nvm_alloc_dev(node);
724 memcpy(dev->name, disk_name, DISK_NAME_LEN);
725 dev->ops = &nvme_nvm_dev_ops;
726 dev->private_data = ns;
729 return nvm_register(dev);
732 void nvme_nvm_unregister(struct nvme_ns *ns)
734 nvm_unregister(ns->ndev);
737 static ssize_t nvm_dev_attr_show(struct device *dev,
738 struct device_attribute *dattr, char *page)
740 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
741 struct nvm_dev *ndev = ns->ndev;
743 struct nvm_id_group *grp;
744 struct attribute *attr;
749 id = &ndev->identity;
753 if (strcmp(attr->name, "version") == 0) {
754 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
755 } else if (strcmp(attr->name, "vendor_opcode") == 0) {
756 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
757 } else if (strcmp(attr->name, "capabilities") == 0) {
758 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
759 } else if (strcmp(attr->name, "device_mode") == 0) {
760 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
761 /* kept for compatibility */
762 } else if (strcmp(attr->name, "media_manager") == 0) {
763 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
764 } else if (strcmp(attr->name, "ppa_format") == 0) {
765 return scnprintf(page, PAGE_SIZE,
766 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
767 id->ppaf.ch_offset, id->ppaf.ch_len,
768 id->ppaf.lun_offset, id->ppaf.lun_len,
769 id->ppaf.pln_offset, id->ppaf.pln_len,
770 id->ppaf.blk_offset, id->ppaf.blk_len,
771 id->ppaf.pg_offset, id->ppaf.pg_len,
772 id->ppaf.sect_offset, id->ppaf.sect_len);
773 } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
774 return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
775 } else if (strcmp(attr->name, "flash_media_type") == 0) {
776 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
777 } else if (strcmp(attr->name, "num_channels") == 0) {
778 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
779 } else if (strcmp(attr->name, "num_luns") == 0) {
780 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
781 } else if (strcmp(attr->name, "num_planes") == 0) {
782 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
783 } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
784 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
785 } else if (strcmp(attr->name, "num_pages") == 0) {
786 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
787 } else if (strcmp(attr->name, "page_size") == 0) {
788 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
789 } else if (strcmp(attr->name, "hw_sector_size") == 0) {
790 return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
791 } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
792 return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
793 } else if (strcmp(attr->name, "read_typ") == 0) {
794 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
795 } else if (strcmp(attr->name, "read_max") == 0) {
796 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
797 } else if (strcmp(attr->name, "prog_typ") == 0) {
798 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
799 } else if (strcmp(attr->name, "prog_max") == 0) {
800 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
801 } else if (strcmp(attr->name, "erase_typ") == 0) {
802 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
803 } else if (strcmp(attr->name, "erase_max") == 0) {
804 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
805 } else if (strcmp(attr->name, "multiplane_modes") == 0) {
806 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
807 } else if (strcmp(attr->name, "media_capabilities") == 0) {
808 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
809 } else if (strcmp(attr->name, "max_phys_secs") == 0) {
810 return scnprintf(page, PAGE_SIZE, "%u\n",
811 ndev->ops->max_phys_sect);
813 return scnprintf(page,
815 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
820 #define NVM_DEV_ATTR_RO(_name) \
821 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
823 static NVM_DEV_ATTR_RO(version);
824 static NVM_DEV_ATTR_RO(vendor_opcode);
825 static NVM_DEV_ATTR_RO(capabilities);
826 static NVM_DEV_ATTR_RO(device_mode);
827 static NVM_DEV_ATTR_RO(ppa_format);
828 static NVM_DEV_ATTR_RO(media_manager);
830 static NVM_DEV_ATTR_RO(media_type);
831 static NVM_DEV_ATTR_RO(flash_media_type);
832 static NVM_DEV_ATTR_RO(num_channels);
833 static NVM_DEV_ATTR_RO(num_luns);
834 static NVM_DEV_ATTR_RO(num_planes);
835 static NVM_DEV_ATTR_RO(num_blocks);
836 static NVM_DEV_ATTR_RO(num_pages);
837 static NVM_DEV_ATTR_RO(page_size);
838 static NVM_DEV_ATTR_RO(hw_sector_size);
839 static NVM_DEV_ATTR_RO(oob_sector_size);
840 static NVM_DEV_ATTR_RO(read_typ);
841 static NVM_DEV_ATTR_RO(read_max);
842 static NVM_DEV_ATTR_RO(prog_typ);
843 static NVM_DEV_ATTR_RO(prog_max);
844 static NVM_DEV_ATTR_RO(erase_typ);
845 static NVM_DEV_ATTR_RO(erase_max);
846 static NVM_DEV_ATTR_RO(multiplane_modes);
847 static NVM_DEV_ATTR_RO(media_capabilities);
848 static NVM_DEV_ATTR_RO(max_phys_secs);
850 static struct attribute *nvm_dev_attrs[] = {
851 &dev_attr_version.attr,
852 &dev_attr_vendor_opcode.attr,
853 &dev_attr_capabilities.attr,
854 &dev_attr_device_mode.attr,
855 &dev_attr_media_manager.attr,
857 &dev_attr_ppa_format.attr,
858 &dev_attr_media_type.attr,
859 &dev_attr_flash_media_type.attr,
860 &dev_attr_num_channels.attr,
861 &dev_attr_num_luns.attr,
862 &dev_attr_num_planes.attr,
863 &dev_attr_num_blocks.attr,
864 &dev_attr_num_pages.attr,
865 &dev_attr_page_size.attr,
866 &dev_attr_hw_sector_size.attr,
867 &dev_attr_oob_sector_size.attr,
868 &dev_attr_read_typ.attr,
869 &dev_attr_read_max.attr,
870 &dev_attr_prog_typ.attr,
871 &dev_attr_prog_max.attr,
872 &dev_attr_erase_typ.attr,
873 &dev_attr_erase_max.attr,
874 &dev_attr_multiplane_modes.attr,
875 &dev_attr_media_capabilities.attr,
876 &dev_attr_max_phys_secs.attr,
880 static const struct attribute_group nvm_dev_attr_group = {
882 .attrs = nvm_dev_attrs,
885 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
887 return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
888 &nvm_dev_attr_group);
891 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
893 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
894 &nvm_dev_attr_group);