2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
32 enum nvme_nvm_admin_opcode {
33 nvme_nvm_admin_identity = 0xe2,
34 nvme_nvm_admin_get_l2p_tbl = 0xea,
35 nvme_nvm_admin_get_bb_tbl = 0xf2,
36 nvme_nvm_admin_set_bb_tbl = 0xf1,
39 struct nvme_nvm_hb_rw {
55 struct nvme_nvm_ph_rw {
71 struct nvme_nvm_identity {
83 struct nvme_nvm_l2ptbl {
96 struct nvme_nvm_getbbtbl {
108 struct nvme_nvm_setbbtbl {
123 struct nvme_nvm_erase_blk {
138 struct nvme_nvm_command {
140 struct nvme_common_command common;
141 struct nvme_nvm_identity identity;
142 struct nvme_nvm_hb_rw hb_rw;
143 struct nvme_nvm_ph_rw ph_rw;
144 struct nvme_nvm_l2ptbl l2p;
145 struct nvme_nvm_getbbtbl get_bb;
146 struct nvme_nvm_setbbtbl set_bb;
147 struct nvme_nvm_erase_blk erase;
151 #define NVME_NVM_LP_MLC_PAIRS 886
152 struct nvme_nvm_lp_mlc {
154 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
157 struct nvme_nvm_lp_tbl {
159 struct nvme_nvm_lp_mlc mlc;
162 struct nvme_nvm_id_group {
186 struct nvme_nvm_lp_tbl lptbl;
189 struct nvme_nvm_addr_format {
212 struct nvme_nvm_addr_format ppaf;
214 struct nvme_nvm_id_group groups[4];
217 struct nvme_nvm_bb_tbl {
232 * Check we didn't inadvertently grow the command struct
234 static inline void _nvme_nvm_check_size(void)
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
245 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
246 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
249 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
251 struct nvme_nvm_id_group *src;
252 struct nvm_id_group *dst;
254 if (nvme_nvm_id->cgrps != 1)
257 src = &nvme_nvm_id->groups[0];
260 dst->mtype = src->mtype;
261 dst->fmtype = src->fmtype;
262 dst->num_ch = src->num_ch;
263 dst->num_lun = src->num_lun;
264 dst->num_pln = src->num_pln;
266 dst->num_pg = le16_to_cpu(src->num_pg);
267 dst->num_blk = le16_to_cpu(src->num_blk);
268 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
269 dst->csecs = le16_to_cpu(src->csecs);
270 dst->sos = le16_to_cpu(src->sos);
272 dst->trdt = le32_to_cpu(src->trdt);
273 dst->trdm = le32_to_cpu(src->trdm);
274 dst->tprt = le32_to_cpu(src->tprt);
275 dst->tprm = le32_to_cpu(src->tprm);
276 dst->tbet = le32_to_cpu(src->tbet);
277 dst->tbem = le32_to_cpu(src->tbem);
278 dst->mpos = le32_to_cpu(src->mpos);
279 dst->mccap = le32_to_cpu(src->mccap);
281 dst->cpar = le16_to_cpu(src->cpar);
283 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
284 memcpy(dst->lptbl.id, src->lptbl.id, 8);
285 dst->lptbl.mlc.num_pairs =
286 le16_to_cpu(src->lptbl.mlc.num_pairs);
288 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
289 pr_err("nvm: number of MLC pairs not supported\n");
293 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
294 dst->lptbl.mlc.num_pairs);
300 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
302 struct nvme_ns *ns = nvmdev->q->queuedata;
303 struct nvme_nvm_id *nvme_nvm_id;
304 struct nvme_nvm_command c = {};
307 c.identity.opcode = nvme_nvm_admin_identity;
308 c.identity.nsid = cpu_to_le32(ns->ns_id);
309 c.identity.chnl_off = 0;
311 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
315 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
316 nvme_nvm_id, sizeof(struct nvme_nvm_id));
322 nvm_id->ver_id = nvme_nvm_id->ver_id;
323 nvm_id->vmnt = nvme_nvm_id->vmnt;
324 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
325 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
326 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
327 sizeof(struct nvme_nvm_addr_format));
329 ret = init_grps(nvm_id, nvme_nvm_id);
335 static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
336 nvm_l2p_update_fn *update_l2p, void *priv)
338 struct nvme_ns *ns = nvmdev->q->queuedata;
339 struct nvme_nvm_command c = {};
340 u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
341 u32 nlb_pr_rq = len / sizeof(u64);
346 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
347 c.l2p.nsid = cpu_to_le32(ns->ns_id);
348 entries = kmalloc(len, GFP_KERNEL);
353 u32 cmd_nlb = min(nlb_pr_rq, nlb);
354 u64 elba = slba + cmd_nlb;
356 c.l2p.slba = cpu_to_le64(cmd_slba);
357 c.l2p.nlb = cpu_to_le32(cmd_nlb);
359 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
360 (struct nvme_command *)&c, entries, len);
362 dev_err(ns->ctrl->device,
363 "L2P table transfer failed (%d)\n", ret);
368 if (unlikely(elba > nvmdev->total_secs)) {
369 pr_err("nvm: L2P data from device is out of bounds!\n");
373 /* Transform physical address to target address space */
374 nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
376 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
390 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
393 struct request_queue *q = nvmdev->q;
394 struct nvm_geo *geo = &nvmdev->geo;
395 struct nvme_ns *ns = q->queuedata;
396 struct nvme_ctrl *ctrl = ns->ctrl;
397 struct nvme_nvm_command c = {};
398 struct nvme_nvm_bb_tbl *bb_tbl;
399 int nr_blks = geo->blks_per_lun * geo->plane_mode;
400 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
403 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
404 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
405 c.get_bb.spba = cpu_to_le64(ppa.ppa);
407 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
411 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
414 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
419 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
420 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
421 dev_err(ctrl->device, "bbt format mismatch\n");
426 if (le16_to_cpu(bb_tbl->verid) != 1) {
428 dev_err(ctrl->device, "bbt version not supported\n");
432 if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
434 dev_err(ctrl->device,
435 "bbt unsuspected blocks returned (%u!=%u)",
436 le32_to_cpu(bb_tbl->tblks), nr_blks);
440 memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
446 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
447 int nr_ppas, int type)
449 struct nvme_ns *ns = nvmdev->q->queuedata;
450 struct nvme_nvm_command c = {};
453 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
454 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
455 c.set_bb.spba = cpu_to_le64(ppas->ppa);
456 c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
457 c.set_bb.value = type;
459 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
462 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
467 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
468 struct nvme_ns *ns, struct nvme_nvm_command *c)
470 c->ph_rw.opcode = rqd->opcode;
471 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
472 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
473 c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
474 c->ph_rw.control = cpu_to_le16(rqd->flags);
475 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
477 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
478 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
479 rqd->bio->bi_iter.bi_sector));
482 static void nvme_nvm_end_io(struct request *rq, int error)
484 struct nvm_rq *rqd = rq->end_io_data;
486 rqd->ppa_status = nvme_req(rq)->result.u64;
490 kfree(nvme_req(rq)->cmd);
491 blk_mq_free_request(rq);
494 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
496 struct request_queue *q = dev->q;
497 struct nvme_ns *ns = q->queuedata;
499 struct bio *bio = rqd->bio;
500 struct nvme_nvm_command *cmd;
502 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
506 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
511 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
513 rq->ioprio = bio_prio(bio);
514 if (bio_has_data(bio))
515 rq->nr_phys_segments = bio_phys_segments(q, bio);
517 rq->__data_len = bio->bi_iter.bi_size;
518 rq->bio = rq->biotail = bio;
520 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
522 rq->end_io_data = rqd;
524 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
529 static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
531 struct request_queue *q = dev->q;
532 struct nvme_ns *ns = q->queuedata;
533 struct nvme_nvm_command c = {};
535 c.erase.opcode = NVM_OP_ERASE;
536 c.erase.nsid = cpu_to_le32(ns->ns_id);
537 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
538 c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
539 c.erase.control = cpu_to_le16(rqd->flags);
541 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
544 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
546 struct nvme_ns *ns = nvmdev->q->queuedata;
548 return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
551 static void nvme_nvm_destroy_dma_pool(void *pool)
553 struct dma_pool *dma_pool = pool;
555 dma_pool_destroy(dma_pool);
558 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
559 gfp_t mem_flags, dma_addr_t *dma_handler)
561 return dma_pool_alloc(pool, mem_flags, dma_handler);
564 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
565 dma_addr_t dma_handler)
567 dma_pool_free(pool, addr, dma_handler);
570 static struct nvm_dev_ops nvme_nvm_dev_ops = {
571 .identity = nvme_nvm_identity,
573 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
575 .get_bb_tbl = nvme_nvm_get_bb_tbl,
576 .set_bb_tbl = nvme_nvm_set_bb_tbl,
578 .submit_io = nvme_nvm_submit_io,
579 .erase_block = nvme_nvm_erase_block,
581 .create_dma_pool = nvme_nvm_create_dma_pool,
582 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
583 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
584 .dev_dma_free = nvme_nvm_dev_dma_free,
589 static void nvme_nvm_end_user_vio(struct request *rq, int error)
591 struct completion *waiting = rq->end_io_data;
596 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
598 struct nvme_nvm_command *vcmd,
599 void __user *ubuf, unsigned int bufflen,
600 void __user *meta_buf, unsigned int meta_len,
601 void __user *ppa_buf, unsigned int ppa_len,
602 u32 *result, u64 *status, unsigned int timeout)
604 bool write = nvme_is_write((struct nvme_command *)vcmd);
605 struct nvm_dev *dev = ns->ndev;
606 struct gendisk *disk = ns->disk;
608 struct bio *bio = NULL;
609 __le64 *ppa_list = NULL;
611 __le64 *metadata = NULL;
612 dma_addr_t metadata_dma;
613 DECLARE_COMPLETION_ONSTACK(wait);
616 rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
623 rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
625 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
626 rq->end_io_data = &wait;
628 if (ppa_buf && ppa_len) {
629 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
634 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
635 sizeof(u64) * (ppa_len + 1))) {
639 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
641 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
644 if (ubuf && bufflen) {
645 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
650 if (meta_buf && meta_len) {
651 metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
659 if (copy_from_user(metadata,
660 (void __user *)meta_buf,
666 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
672 bio->bi_bdev = bdget_disk(disk, 0);
680 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
682 wait_for_completion_io(&wait);
684 ret = nvme_error_status(rq->errors);
686 *result = rq->errors & 0x7ff;
688 *status = le64_to_cpu(nvme_req(rq)->result.u64);
690 if (metadata && !ret && !write) {
691 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
695 if (meta_buf && meta_len)
696 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
699 if (disk && bio->bi_bdev)
701 blk_rq_unmap_user(bio);
704 if (ppa_buf && ppa_len)
705 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
707 blk_mq_free_request(rq);
712 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
713 struct nvm_user_vio __user *uvio)
715 struct nvm_user_vio vio;
716 struct nvme_nvm_command c;
720 if (copy_from_user(&vio, uvio, sizeof(vio)))
725 memset(&c, 0, sizeof(c));
726 c.ph_rw.opcode = vio.opcode;
727 c.ph_rw.nsid = cpu_to_le32(ns->ns_id);
728 c.ph_rw.control = cpu_to_le16(vio.control);
729 c.ph_rw.length = cpu_to_le16(vio.nppas);
731 length = (vio.nppas + 1) << ns->lba_shift;
733 ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
734 (void __user *)(uintptr_t)vio.addr, length,
735 (void __user *)(uintptr_t)vio.metadata,
737 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
738 &vio.result, &vio.status, 0);
740 if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
746 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
747 struct nvm_passthru_vio __user *uvcmd)
749 struct nvm_passthru_vio vcmd;
750 struct nvme_nvm_command c;
751 struct request_queue *q;
752 unsigned int timeout = 0;
755 if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
757 if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
762 memset(&c, 0, sizeof(c));
763 c.common.opcode = vcmd.opcode;
764 c.common.nsid = cpu_to_le32(ns->ns_id);
765 c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
766 c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
768 c.ph_rw.length = cpu_to_le16(vcmd.nppas);
769 c.ph_rw.control = cpu_to_le32(vcmd.control);
770 c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
771 c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
772 c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
775 timeout = msecs_to_jiffies(vcmd.timeout_ms);
777 q = admin ? ns->ctrl->admin_q : ns->queue;
779 ret = nvme_nvm_submit_user_cmd(q, ns,
780 (struct nvme_nvm_command *)&c,
781 (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
782 (void __user *)(uintptr_t)vcmd.metadata,
784 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
785 &vcmd.result, &vcmd.status, timeout);
787 if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
793 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
796 case NVME_NVM_IOCTL_ADMIN_VIO:
797 return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
798 case NVME_NVM_IOCTL_IO_VIO:
799 return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
800 case NVME_NVM_IOCTL_SUBMIT_VIO:
801 return nvme_nvm_submit_vio(ns, (void __user *)arg);
807 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
809 struct request_queue *q = ns->queue;
812 dev = nvm_alloc_dev(node);
817 memcpy(dev->name, disk_name, DISK_NAME_LEN);
818 dev->ops = &nvme_nvm_dev_ops;
819 dev->private_data = ns;
822 return nvm_register(dev);
825 void nvme_nvm_unregister(struct nvme_ns *ns)
827 nvm_unregister(ns->ndev);
830 static ssize_t nvm_dev_attr_show(struct device *dev,
831 struct device_attribute *dattr, char *page)
833 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
834 struct nvm_dev *ndev = ns->ndev;
836 struct nvm_id_group *grp;
837 struct attribute *attr;
842 id = &ndev->identity;
846 if (strcmp(attr->name, "version") == 0) {
847 return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
848 } else if (strcmp(attr->name, "vendor_opcode") == 0) {
849 return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
850 } else if (strcmp(attr->name, "capabilities") == 0) {
851 return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
852 } else if (strcmp(attr->name, "device_mode") == 0) {
853 return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
854 /* kept for compatibility */
855 } else if (strcmp(attr->name, "media_manager") == 0) {
856 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
857 } else if (strcmp(attr->name, "ppa_format") == 0) {
858 return scnprintf(page, PAGE_SIZE,
859 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
860 id->ppaf.ch_offset, id->ppaf.ch_len,
861 id->ppaf.lun_offset, id->ppaf.lun_len,
862 id->ppaf.pln_offset, id->ppaf.pln_len,
863 id->ppaf.blk_offset, id->ppaf.blk_len,
864 id->ppaf.pg_offset, id->ppaf.pg_len,
865 id->ppaf.sect_offset, id->ppaf.sect_len);
866 } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
867 return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
868 } else if (strcmp(attr->name, "flash_media_type") == 0) {
869 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
870 } else if (strcmp(attr->name, "num_channels") == 0) {
871 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
872 } else if (strcmp(attr->name, "num_luns") == 0) {
873 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
874 } else if (strcmp(attr->name, "num_planes") == 0) {
875 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
876 } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
877 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
878 } else if (strcmp(attr->name, "num_pages") == 0) {
879 return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
880 } else if (strcmp(attr->name, "page_size") == 0) {
881 return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
882 } else if (strcmp(attr->name, "hw_sector_size") == 0) {
883 return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
884 } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
885 return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
886 } else if (strcmp(attr->name, "read_typ") == 0) {
887 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
888 } else if (strcmp(attr->name, "read_max") == 0) {
889 return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
890 } else if (strcmp(attr->name, "prog_typ") == 0) {
891 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
892 } else if (strcmp(attr->name, "prog_max") == 0) {
893 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
894 } else if (strcmp(attr->name, "erase_typ") == 0) {
895 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
896 } else if (strcmp(attr->name, "erase_max") == 0) {
897 return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
898 } else if (strcmp(attr->name, "multiplane_modes") == 0) {
899 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
900 } else if (strcmp(attr->name, "media_capabilities") == 0) {
901 return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
902 } else if (strcmp(attr->name, "max_phys_secs") == 0) {
903 return scnprintf(page, PAGE_SIZE, "%u\n",
904 ndev->ops->max_phys_sect);
906 return scnprintf(page,
908 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
913 #define NVM_DEV_ATTR_RO(_name) \
914 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
916 static NVM_DEV_ATTR_RO(version);
917 static NVM_DEV_ATTR_RO(vendor_opcode);
918 static NVM_DEV_ATTR_RO(capabilities);
919 static NVM_DEV_ATTR_RO(device_mode);
920 static NVM_DEV_ATTR_RO(ppa_format);
921 static NVM_DEV_ATTR_RO(media_manager);
923 static NVM_DEV_ATTR_RO(media_type);
924 static NVM_DEV_ATTR_RO(flash_media_type);
925 static NVM_DEV_ATTR_RO(num_channels);
926 static NVM_DEV_ATTR_RO(num_luns);
927 static NVM_DEV_ATTR_RO(num_planes);
928 static NVM_DEV_ATTR_RO(num_blocks);
929 static NVM_DEV_ATTR_RO(num_pages);
930 static NVM_DEV_ATTR_RO(page_size);
931 static NVM_DEV_ATTR_RO(hw_sector_size);
932 static NVM_DEV_ATTR_RO(oob_sector_size);
933 static NVM_DEV_ATTR_RO(read_typ);
934 static NVM_DEV_ATTR_RO(read_max);
935 static NVM_DEV_ATTR_RO(prog_typ);
936 static NVM_DEV_ATTR_RO(prog_max);
937 static NVM_DEV_ATTR_RO(erase_typ);
938 static NVM_DEV_ATTR_RO(erase_max);
939 static NVM_DEV_ATTR_RO(multiplane_modes);
940 static NVM_DEV_ATTR_RO(media_capabilities);
941 static NVM_DEV_ATTR_RO(max_phys_secs);
943 static struct attribute *nvm_dev_attrs[] = {
944 &dev_attr_version.attr,
945 &dev_attr_vendor_opcode.attr,
946 &dev_attr_capabilities.attr,
947 &dev_attr_device_mode.attr,
948 &dev_attr_media_manager.attr,
950 &dev_attr_ppa_format.attr,
951 &dev_attr_media_type.attr,
952 &dev_attr_flash_media_type.attr,
953 &dev_attr_num_channels.attr,
954 &dev_attr_num_luns.attr,
955 &dev_attr_num_planes.attr,
956 &dev_attr_num_blocks.attr,
957 &dev_attr_num_pages.attr,
958 &dev_attr_page_size.attr,
959 &dev_attr_hw_sector_size.attr,
960 &dev_attr_oob_sector_size.attr,
961 &dev_attr_read_typ.attr,
962 &dev_attr_read_max.attr,
963 &dev_attr_prog_typ.attr,
964 &dev_attr_prog_max.attr,
965 &dev_attr_erase_typ.attr,
966 &dev_attr_erase_max.attr,
967 &dev_attr_multiplane_modes.attr,
968 &dev_attr_media_capabilities.attr,
969 &dev_attr_max_phys_secs.attr,
973 static const struct attribute_group nvm_dev_attr_group = {
975 .attrs = nvm_dev_attrs,
978 int nvme_nvm_register_sysfs(struct nvme_ns *ns)
980 return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
981 &nvm_dev_attr_group);
984 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
986 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
987 &nvm_dev_attr_group);
990 /* move to shared place when used in multiple places. */
991 #define PCI_VENDOR_ID_CNEX 0x1d1d
992 #define PCI_DEVICE_ID_CNEX_WL 0x2807
993 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
995 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
997 struct nvme_ctrl *ctrl = ns->ctrl;
998 /* XXX: this is poking into PCI structures from generic code! */
999 struct pci_dev *pdev = to_pci_dev(ctrl->dev);
1001 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
1002 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
1003 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
1007 /* CNEX Labs - PCI ID + Vendor specific bit */
1008 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
1009 pdev->device == PCI_DEVICE_ID_CNEX_WL &&