1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/types.h>
7 #include <uapi/linux/lightnvm.h>
20 #define NVM_GEN_CH_BITS (8)
21 #define NVM_GEN_LUN_BITS (8)
22 #define NVM_GEN_BLK_BITS (16)
23 #define NVM_GEN_RESERVED (32)
26 #define NVM_12_PG_BITS (16)
27 #define NVM_12_PL_BITS (4)
28 #define NVM_12_SEC_BITS (4)
29 #define NVM_12_RESERVED (8)
32 #define NVM_20_SEC_BITS (24)
33 #define NVM_20_RESERVED (8)
36 NVM_OCSSD_SPEC_12 = 12,
37 NVM_OCSSD_SPEC_20 = 20,
41 /* Generic structure for all addresses */
43 /* generic device format */
45 u64 ch : NVM_GEN_CH_BITS;
46 u64 lun : NVM_GEN_LUN_BITS;
47 u64 blk : NVM_GEN_BLK_BITS;
48 u64 reserved : NVM_GEN_RESERVED;
51 /* 1.2 device format */
53 u64 ch : NVM_GEN_CH_BITS;
54 u64 lun : NVM_GEN_LUN_BITS;
55 u64 blk : NVM_GEN_BLK_BITS;
56 u64 pg : NVM_12_PG_BITS;
57 u64 pl : NVM_12_PL_BITS;
58 u64 sec : NVM_12_SEC_BITS;
59 u64 reserved : NVM_12_RESERVED;
62 /* 2.0 device format */
64 u64 grp : NVM_GEN_CH_BITS;
65 u64 pu : NVM_GEN_LUN_BITS;
66 u64 chk : NVM_GEN_BLK_BITS;
67 u64 sec : NVM_20_SEC_BITS;
68 u64 reserved : NVM_20_RESERVED;
86 typedef int (nvm_id_fn)(struct nvm_dev *);
87 typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
88 typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
89 typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
90 struct nvm_chk_meta *);
91 typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
92 typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
93 typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
94 typedef void (nvm_destroy_dma_pool_fn)(void *);
95 typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
97 typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
101 nvm_op_bb_tbl_fn *get_bb_tbl;
102 nvm_op_set_bb_fn *set_bb_tbl;
104 nvm_get_chk_meta_fn *get_chk_meta;
106 nvm_submit_io_fn *submit_io;
107 nvm_submit_io_sync_fn *submit_io_sync;
109 nvm_create_dma_pool_fn *create_dma_pool;
110 nvm_destroy_dma_pool_fn *destroy_dma_pool;
111 nvm_dev_dma_alloc_fn *dev_dma_alloc;
112 nvm_dev_dma_free_fn *dev_dma_free;
117 #include <linux/blkdev.h>
118 #include <linux/file.h>
119 #include <linux/dmapool.h>
120 #include <uapi/linux/lightnvm.h>
123 /* HW Responsibilities */
124 NVM_RSP_L2P = 1 << 0,
125 NVM_RSP_ECC = 1 << 1,
127 /* Physical Adressing Mode */
128 NVM_ADDRMODE_LINEAR = 0,
129 NVM_ADDRMODE_CHANNEL = 1,
131 /* Plane programming mode for LUN */
132 NVM_PLANE_SINGLE = 1,
133 NVM_PLANE_DOUBLE = 2,
137 NVM_RSP_SUCCESS = 0x0,
138 NVM_RSP_NOT_CHANGEABLE = 0x1,
139 NVM_RSP_ERR_FAILWRITE = 0x40ff,
140 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
141 NVM_RSP_ERR_FAILECC = 0x4281,
142 NVM_RSP_ERR_FAILCRC = 0x4004,
143 NVM_RSP_WARN_HIGHECC = 0x4700,
146 NVM_OP_PWRITE = 0x91,
150 /* PPA Command Flags */
151 NVM_IO_SNGL_ACCESS = 0x0,
152 NVM_IO_DUAL_ACCESS = 0x1,
153 NVM_IO_QUAD_ACCESS = 0x2,
155 /* NAND Access Modes */
156 NVM_IO_SUSPEND = 0x80,
157 NVM_IO_SLC_MODE = 0x100,
158 NVM_IO_SCRAMBLE_ENABLE = 0x200,
161 NVM_BLK_T_FREE = 0x0,
163 NVM_BLK_T_GRWN_BAD = 0x2,
165 NVM_BLK_T_HOST = 0x8,
167 /* Memory capabilities */
168 NVM_ID_CAP_SLC = 0x1,
169 NVM_ID_CAP_CMD_SUSPEND = 0x2,
170 NVM_ID_CAP_SCRAMBLE = 0x4,
171 NVM_ID_CAP_ENCRYPT = 0x8,
174 NVM_ID_FMTYPE_SLC = 0,
175 NVM_ID_FMTYPE_MLC = 1,
177 /* Device capabilities */
178 NVM_ID_DCAP_BBLKMGMT = 0x1,
179 NVM_UD_DCAP_ECC = 0x2,
182 struct nvm_id_lp_mlc {
187 struct nvm_id_lp_tbl {
189 struct nvm_id_lp_mlc mlc;
192 struct nvm_addrf_12 {
237 NVM_CHK_ST_FREE = 1 << 0,
238 NVM_CHK_ST_CLOSED = 1 << 1,
239 NVM_CHK_ST_OPEN = 1 << 2,
240 NVM_CHK_ST_OFFLINE = 1 << 3,
243 NVM_CHK_TP_W_SEQ = 1 << 0,
244 NVM_CHK_TP_W_RAN = 1 << 1,
245 NVM_CHK_TP_SZ_SPEC = 1 << 4,
249 * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
250 * buffer can be used when converting from little endian to cpu addressing.
252 struct nvm_chk_meta {
263 struct list_head list;
264 struct nvm_tgt_dev *dev;
265 struct nvm_tgt_type *type;
266 struct gendisk *disk;
269 #define ADDR_EMPTY (~0ULL)
271 #define NVM_TARGET_DEFAULT_OP (101)
272 #define NVM_TARGET_MIN_OP (3)
273 #define NVM_TARGET_MAX_OP (80)
275 #define NVM_VERSION_MAJOR 1
276 #define NVM_VERSION_MINOR 0
277 #define NVM_VERSION_PATCH 0
279 #define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
282 typedef void (nvm_end_io_fn)(struct nvm_rq *);
285 struct nvm_tgt_dev *dev;
290 struct ppa_addr ppa_addr;
291 dma_addr_t dma_ppa_list;
294 struct ppa_addr *ppa_list;
297 dma_addr_t dma_meta_list;
299 nvm_end_io_fn *end_io;
305 u64 ppa_status; /* ppa media status */
308 int is_seq; /* Sequential hint flag. 1.2 only */
313 static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
315 return pdu - sizeof(struct nvm_rq);
318 static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
323 static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
325 return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
329 NVM_BLK_ST_FREE = 0x1, /* Free block */
330 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
331 NVM_BLK_ST_BAD = 0x8, /* Bad block */
334 /* Instance geometry */
336 /* device reported version */
340 /* kernel short version */
343 /* instance specific geometry */
345 int num_lun; /* per channel */
347 /* calculated values */
348 int all_luns; /* across channels */
349 int all_chunks; /* across channels */
351 int op; /* over-provision in instance */
353 sector_t total_secs; /* across channels */
356 u32 num_chk; /* chunks per lun */
357 u32 clba; /* sectors per chunk */
358 u16 csecs; /* sector size */
359 u16 sos; /* out-of-band area size */
361 /* device write constrains */
362 u32 ws_min; /* minimum write size */
363 u32 ws_opt; /* optimal write size */
364 u32 mw_cunits; /* distance required for successful read */
365 u32 maxoc; /* maximum open chunks */
366 u32 maxocpu; /* maximum open chunks per parallel unit */
368 /* device capabilities */
372 u32 trdt; /* Avg. Tread (ns) */
373 u32 trdm; /* Max Tread (ns) */
374 u32 tprt; /* Avg. Tprog (ns) */
375 u32 tprm; /* Max Tprog (ns) */
376 u32 tbet; /* Avg. Terase (ns) */
377 u32 tbem; /* Max Terase (ns) */
379 /* generic address format */
380 struct nvm_addrf addrf;
382 /* 1.2 compatibility */
399 /* sub-device structure */
401 /* Device information */
404 /* Base ppas for target LUNs */
405 struct ppa_addr *luns;
407 struct request_queue *q;
409 struct nvm_dev *parent;
414 struct nvm_dev_ops *ops;
416 struct list_head devices;
418 /* Device information */
421 unsigned long *lun_map;
425 struct request_queue *q;
426 char name[DISK_NAME_LEN];
434 /* target management */
435 struct list_head area_list;
436 struct list_head targets;
439 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
442 struct nvm_geo *geo = &dev->geo;
445 if (geo->version == NVM_OCSSD_SPEC_12) {
446 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
448 l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
449 l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
450 l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
451 l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
452 l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
453 l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
455 struct nvm_addrf *lbaf = &geo->addrf;
457 l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
458 l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
459 l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
460 l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
466 static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
469 struct nvm_geo *geo = &dev->geo;
474 if (geo->version == NVM_OCSSD_SPEC_12) {
475 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
477 l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
478 l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
479 l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
480 l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
481 l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
482 l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
484 struct nvm_addrf *lbaf = &geo->addrf;
486 l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
487 l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
488 l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
489 l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
495 static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
498 struct nvm_geo *geo = &dev->geo;
501 if (geo->version == NVM_OCSSD_SPEC_12) {
502 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
504 caddr = (u64)p.g.pg << ppaf->pg_offset;
505 caddr |= (u64)p.g.pl << ppaf->pln_offset;
506 caddr |= (u64)p.g.sec << ppaf->sec_offset;
514 static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
515 void *addrf, u32 ppa32)
517 struct ppa_addr ppa64;
522 ppa64.ppa = ADDR_EMPTY;
523 } else if (ppa32 & (1U << 31)) {
524 ppa64.c.line = ppa32 & ((~0U) >> 1);
525 ppa64.c.is_cached = 1;
527 struct nvm_geo *geo = &dev->geo;
529 if (geo->version == NVM_OCSSD_SPEC_12) {
530 struct nvm_addrf_12 *ppaf = addrf;
532 ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
534 ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
536 ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
538 ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
540 ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
542 ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
545 struct nvm_addrf *lbaf = addrf;
547 ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
549 ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
551 ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
553 ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
561 static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
562 void *addrf, struct ppa_addr ppa64)
566 if (ppa64.ppa == ADDR_EMPTY) {
568 } else if (ppa64.c.is_cached) {
569 ppa32 |= ppa64.c.line;
572 struct nvm_geo *geo = &dev->geo;
574 if (geo->version == NVM_OCSSD_SPEC_12) {
575 struct nvm_addrf_12 *ppaf = addrf;
577 ppa32 |= ppa64.g.ch << ppaf->ch_offset;
578 ppa32 |= ppa64.g.lun << ppaf->lun_offset;
579 ppa32 |= ppa64.g.blk << ppaf->blk_offset;
580 ppa32 |= ppa64.g.pg << ppaf->pg_offset;
581 ppa32 |= ppa64.g.pl << ppaf->pln_offset;
582 ppa32 |= ppa64.g.sec << ppaf->sec_offset;
584 struct nvm_addrf *lbaf = addrf;
586 ppa32 |= ppa64.m.grp << lbaf->ch_offset;
587 ppa32 |= ppa64.m.pu << lbaf->lun_offset;
588 ppa32 |= ppa64.m.chk << lbaf->chk_offset;
589 ppa32 |= ppa64.m.sec << lbaf->sec_offset;
596 static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
597 struct ppa_addr *ppa)
599 struct nvm_geo *geo = &dev->geo;
602 if (geo->version == NVM_OCSSD_SPEC_12) {
603 int sec = ppa->g.sec;
606 if (sec == geo->ws_min) {
611 if (pg == geo->num_pg) {
616 if (pl == geo->num_pln)
626 if (ppa->m.sec == geo->clba)
633 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
634 typedef sector_t (nvm_tgt_capacity_fn)(void *);
635 typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
637 typedef void (nvm_tgt_exit_fn)(void *, bool);
638 typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
639 typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
642 NVM_TGT_F_DEV_L2P = 0,
643 NVM_TGT_F_HOST_L2P = 1 << 0,
646 struct nvm_tgt_type {
648 unsigned int version[3];
651 /* target entry points */
652 nvm_tgt_make_rq_fn *make_rq;
653 nvm_tgt_capacity_fn *capacity;
655 /* module-specific init/teardown */
656 nvm_tgt_init_fn *init;
657 nvm_tgt_exit_fn *exit;
660 nvm_tgt_sysfs_init_fn *sysfs_init;
661 nvm_tgt_sysfs_exit_fn *sysfs_exit;
663 /* For internal use */
664 struct list_head list;
665 struct module *owner;
668 extern int nvm_register_tgt_type(struct nvm_tgt_type *);
669 extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
671 extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
672 extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
674 extern struct nvm_dev *nvm_alloc_dev(int);
675 extern int nvm_register(struct nvm_dev *);
676 extern void nvm_unregister(struct nvm_dev *);
678 extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
679 int, struct nvm_chk_meta *);
680 extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
682 extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
683 extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
684 extern void nvm_end_io(struct nvm_rq *);
686 #else /* CONFIG_NVM */
689 static inline struct nvm_dev *nvm_alloc_dev(int node)
691 return ERR_PTR(-EINVAL);
693 static inline int nvm_register(struct nvm_dev *dev)
697 static inline void nvm_unregister(struct nvm_dev *dev) {}
698 #endif /* CONFIG_NVM */
699 #endif /* LIGHTNVM.H */