2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
30 enum nvme_nvm_admin_opcode {
31 nvme_nvm_admin_identity = 0xe2,
32 nvme_nvm_admin_get_l2p_tbl = 0xea,
33 nvme_nvm_admin_get_bb_tbl = 0xf2,
34 nvme_nvm_admin_set_bb_tbl = 0xf1,
37 struct nvme_nvm_hb_rw {
53 struct nvme_nvm_ph_rw {
69 struct nvme_nvm_identity {
81 struct nvme_nvm_l2ptbl {
94 struct nvme_nvm_getbbtbl {
106 struct nvme_nvm_setbbtbl {
121 struct nvme_nvm_erase_blk {
136 struct nvme_nvm_command {
138 struct nvme_common_command common;
139 struct nvme_nvm_identity identity;
140 struct nvme_nvm_hb_rw hb_rw;
141 struct nvme_nvm_ph_rw ph_rw;
142 struct nvme_nvm_l2ptbl l2p;
143 struct nvme_nvm_getbbtbl get_bb;
144 struct nvme_nvm_setbbtbl set_bb;
145 struct nvme_nvm_erase_blk erase;
149 struct nvme_nvm_lp_mlc {
154 struct nvme_nvm_lp_tbl {
156 struct nvme_nvm_lp_mlc mlc;
159 struct nvme_nvm_id_group {
183 struct nvme_nvm_lp_tbl lptbl;
186 struct nvme_nvm_addr_format {
209 struct nvme_nvm_addr_format ppaf;
211 struct nvme_nvm_id_group groups[4];
214 struct nvme_nvm_bb_tbl {
229 * Check we didn't inadvertently grow the command struct
231 static inline void _nvme_nvm_check_size(void)
233 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
235 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
246 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
248 struct nvme_nvm_id_group *src;
249 struct nvm_id_group *dst;
252 end = min_t(u32, 4, nvm_id->cgrps);
254 for (i = 0; i < end; i++) {
255 src = &nvme_nvm_id->groups[i];
256 dst = &nvm_id->groups[i];
258 dst->mtype = src->mtype;
259 dst->fmtype = src->fmtype;
260 dst->num_ch = src->num_ch;
261 dst->num_lun = src->num_lun;
262 dst->num_pln = src->num_pln;
264 dst->num_pg = le16_to_cpu(src->num_pg);
265 dst->num_blk = le16_to_cpu(src->num_blk);
266 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
267 dst->csecs = le16_to_cpu(src->csecs);
268 dst->sos = le16_to_cpu(src->sos);
270 dst->trdt = le32_to_cpu(src->trdt);
271 dst->trdm = le32_to_cpu(src->trdm);
272 dst->tprt = le32_to_cpu(src->tprt);
273 dst->tprm = le32_to_cpu(src->tprm);
274 dst->tbet = le32_to_cpu(src->tbet);
275 dst->tbem = le32_to_cpu(src->tbem);
276 dst->mpos = le32_to_cpu(src->mpos);
277 dst->mccap = le32_to_cpu(src->mccap);
279 dst->cpar = le16_to_cpu(src->cpar);
281 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
282 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1);
294 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
296 struct nvme_ns *ns = nvmdev->q->queuedata;
297 struct nvme_nvm_id *nvme_nvm_id;
298 struct nvme_nvm_command c = {};
301 c.identity.opcode = nvme_nvm_admin_identity;
302 c.identity.nsid = cpu_to_le32(ns->ns_id);
303 c.identity.chnl_off = 0;
305 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
309 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
310 nvme_nvm_id, sizeof(struct nvme_nvm_id));
316 nvm_id->ver_id = nvme_nvm_id->ver_id;
317 nvm_id->vmnt = nvme_nvm_id->vmnt;
318 nvm_id->cgrps = nvme_nvm_id->cgrps;
319 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
320 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
321 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
322 sizeof(struct nvme_nvm_addr_format));
324 ret = init_grps(nvm_id, nvme_nvm_id);
330 static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
331 nvm_l2p_update_fn *update_l2p, void *priv)
333 struct nvme_ns *ns = nvmdev->q->queuedata;
334 struct nvme_nvm_command c = {};
335 u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
336 u32 nlb_pr_rq = len / sizeof(u64);
341 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
342 c.l2p.nsid = cpu_to_le32(ns->ns_id);
343 entries = kmalloc(len, GFP_KERNEL);
348 u32 cmd_nlb = min(nlb_pr_rq, nlb);
350 c.l2p.slba = cpu_to_le64(cmd_slba);
351 c.l2p.nlb = cpu_to_le32(cmd_nlb);
353 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
354 (struct nvme_command *)&c, entries, len);
356 dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
362 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
376 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
377 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
380 struct request_queue *q = nvmdev->q;
381 struct nvme_ns *ns = q->queuedata;
382 struct nvme_ctrl *ctrl = ns->ctrl;
383 struct nvme_nvm_command c = {};
384 struct nvme_nvm_bb_tbl *bb_tbl;
385 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
388 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
389 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
390 c.get_bb.spba = cpu_to_le64(ppa.ppa);
392 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
396 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
399 dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
404 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
405 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
406 dev_err(ctrl->dev, "bbt format mismatch\n");
411 if (le16_to_cpu(bb_tbl->verid) != 1) {
413 dev_err(ctrl->dev, "bbt version not supported\n");
417 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
419 dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
420 le32_to_cpu(bb_tbl->tblks), nr_blocks);
424 ppa = dev_to_generic_addr(nvmdev, ppa);
425 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
431 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
434 struct nvme_ns *ns = nvmdev->q->queuedata;
435 struct nvme_nvm_command c = {};
438 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
439 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
440 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
441 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
442 c.set_bb.value = type;
444 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
447 dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
451 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
452 struct nvme_ns *ns, struct nvme_nvm_command *c)
454 c->ph_rw.opcode = rqd->opcode;
455 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
456 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
457 c->ph_rw.control = cpu_to_le16(rqd->flags);
458 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
460 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
461 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
462 rqd->bio->bi_iter.bi_sector));
465 static void nvme_nvm_end_io(struct request *rq, int error)
467 struct nvm_rq *rqd = rq->end_io_data;
469 nvm_end_io(rqd, error);
472 blk_mq_free_request(rq);
475 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
477 struct request_queue *q = dev->q;
478 struct nvme_ns *ns = q->queuedata;
480 struct bio *bio = rqd->bio;
481 struct nvme_nvm_command *cmd;
483 rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
487 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
489 blk_mq_free_request(rq);
493 rq->cmd_type = REQ_TYPE_DRV_PRIV;
494 rq->ioprio = bio_prio(bio);
496 if (bio_has_data(bio))
497 rq->nr_phys_segments = bio_phys_segments(q, bio);
499 rq->__data_len = bio->bi_iter.bi_size;
500 rq->bio = rq->biotail = bio;
502 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
504 rq->cmd = (unsigned char *)cmd;
505 rq->cmd_len = sizeof(struct nvme_nvm_command);
506 rq->special = (void *)0;
508 rq->end_io_data = rqd;
510 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
515 static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
517 struct request_queue *q = dev->q;
518 struct nvme_ns *ns = q->queuedata;
519 struct nvme_nvm_command c = {};
521 c.erase.opcode = NVM_OP_ERASE;
522 c.erase.nsid = cpu_to_le32(ns->ns_id);
523 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
524 c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
526 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
529 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
531 struct nvme_ns *ns = nvmdev->q->queuedata;
533 return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
536 static void nvme_nvm_destroy_dma_pool(void *pool)
538 struct dma_pool *dma_pool = pool;
540 dma_pool_destroy(dma_pool);
543 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
544 gfp_t mem_flags, dma_addr_t *dma_handler)
546 return dma_pool_alloc(pool, mem_flags, dma_handler);
549 static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
550 dma_addr_t dma_handler)
552 dma_pool_free(pool, ppa_list, dma_handler);
555 static struct nvm_dev_ops nvme_nvm_dev_ops = {
556 .identity = nvme_nvm_identity,
558 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
560 .get_bb_tbl = nvme_nvm_get_bb_tbl,
561 .set_bb_tbl = nvme_nvm_set_bb_tbl,
563 .submit_io = nvme_nvm_submit_io,
564 .erase_block = nvme_nvm_erase_block,
566 .create_dma_pool = nvme_nvm_create_dma_pool,
567 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
568 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
569 .dev_dma_free = nvme_nvm_dev_dma_free,
574 int nvme_nvm_register(struct request_queue *q, char *disk_name)
576 return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
579 void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
581 nvm_unregister(disk_name);
584 /* move to shared place when used in multiple places. */
585 #define PCI_VENDOR_ID_CNEX 0x1d1d
586 #define PCI_DEVICE_ID_CNEX_WL 0x2807
587 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
589 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
591 struct nvme_ctrl *ctrl = ns->ctrl;
592 /* XXX: this is poking into PCI structures from generic code! */
593 struct pci_dev *pdev = to_pci_dev(ctrl->dev);
595 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
596 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
597 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
601 /* CNEX Labs - PCI ID + Vendor specific bit */
602 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
603 pdev->device == PCI_DEVICE_ID_CNEX_WL &&