1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright 2017 - Free Electrons
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
10 #ifndef __LINUX_MTD_NAND_H
11 #define __LINUX_MTD_NAND_H
13 #include <linux/mtd/mtd.h>
18 * struct nand_memory_organization - Memory organization structure
19 * @bits_per_cell: number of bits per NAND cell
20 * @pagesize: page size
21 * @oobsize: OOB area size
22 * @pages_per_eraseblock: number of pages per eraseblock
23 * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
24 * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
25 * @planes_per_lun: number of planes per LUN
26 * @luns_per_target: number of LUN per target (target is a synonym for die)
27 * @ntargets: total number of targets exposed by the NAND device
29 struct nand_memory_organization {
30 unsigned int bits_per_cell;
31 unsigned int pagesize;
33 unsigned int pages_per_eraseblock;
34 unsigned int eraseblocks_per_lun;
35 unsigned int max_bad_eraseblocks_per_lun;
36 unsigned int planes_per_lun;
37 unsigned int luns_per_target;
38 unsigned int ntargets;
41 #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
43 .bits_per_cell = (bpc), \
46 .pages_per_eraseblock = (ppe), \
47 .eraseblocks_per_lun = (epl), \
48 .max_bad_eraseblocks_per_lun = (mbb), \
49 .planes_per_lun = (ppl), \
50 .luns_per_target = (lpt), \
55 * struct nand_row_converter - Information needed to convert an absolute offset
57 * @lun_addr_shift: position of the LUN identifier in the row address
58 * @eraseblock_addr_shift: position of the eraseblock identifier in the row
61 struct nand_row_converter {
62 unsigned int lun_addr_shift;
63 unsigned int eraseblock_addr_shift;
67 * struct nand_pos - NAND position object
68 * @target: the NAND target/die
69 * @lun: the LUN identifier
70 * @plane: the plane within the LUN
71 * @eraseblock: the eraseblock within the LUN
72 * @page: the page within the LUN
74 * These information are usually used by specific sub-layers to select the
75 * appropriate target/die and generate a row address to pass to the device.
81 unsigned int eraseblock;
86 * enum nand_page_io_req_type - Direction of an I/O request
87 * @NAND_PAGE_READ: from the chip, to the controller
88 * @NAND_PAGE_WRITE: from the controller, to the chip
90 enum nand_page_io_req_type {
96 * struct nand_page_io_req - NAND I/O request object
97 * @type: the type of page I/O: read or write
98 * @pos: the position this I/O request is targeting
99 * @dataoffs: the offset within the page
100 * @datalen: number of data bytes to read from/write to this page
101 * @databuf: buffer to store data in or get data from
102 * @ooboffs: the OOB offset within the page
103 * @ooblen: the number of OOB bytes to read from/write to this page
104 * @oobbuf: buffer to store OOB data in or get OOB data from
105 * @mode: one of the %MTD_OPS_XXX mode
107 * This object is used to pass per-page I/O requests to NAND sub-layers. This
108 * way all useful information are already formatted in a useful way and
109 * specific NAND layers can focus on translating these information into
110 * specific commands/operations.
112 struct nand_page_io_req {
113 enum nand_page_io_req_type type;
115 unsigned int dataoffs;
116 unsigned int datalen;
121 unsigned int ooboffs;
130 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
131 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
132 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
135 * enum nand_ecc_engine_type - NAND ECC engine type
136 * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
137 * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
138 * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
139 * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
140 * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
142 enum nand_ecc_engine_type {
143 NAND_ECC_ENGINE_TYPE_INVALID,
144 NAND_ECC_ENGINE_TYPE_NONE,
145 NAND_ECC_ENGINE_TYPE_SOFT,
146 NAND_ECC_ENGINE_TYPE_ON_HOST,
147 NAND_ECC_ENGINE_TYPE_ON_DIE,
151 * enum nand_ecc_placement - NAND ECC bytes placement
152 * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
153 * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
154 * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
155 * interleaved with regular data in the main
158 enum nand_ecc_placement {
159 NAND_ECC_PLACEMENT_UNKNOWN,
160 NAND_ECC_PLACEMENT_OOB,
161 NAND_ECC_PLACEMENT_INTERLEAVED,
165 * enum nand_ecc_algo - NAND ECC algorithm
166 * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
167 * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
168 * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
169 * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
172 NAND_ECC_ALGO_UNKNOWN,
173 NAND_ECC_ALGO_HAMMING,
179 * struct nand_ecc_props - NAND ECC properties
180 * @engine_type: ECC engine type
181 * @placement: OOB placement (if relevant)
182 * @algo: ECC algorithm (if relevant)
183 * @strength: ECC strength
184 * @step_size: Number of bytes per step
185 * @flags: Misc properties
187 struct nand_ecc_props {
188 enum nand_ecc_engine_type engine_type;
189 enum nand_ecc_placement placement;
190 enum nand_ecc_algo algo;
191 unsigned int strength;
192 unsigned int step_size;
196 #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
198 /* NAND ECC misc flags */
199 #define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
202 * struct nand_bbt - bad block table object
203 * @cache: in memory BBT cache
206 unsigned long *cache;
210 * struct nand_ops - NAND operations
211 * @erase: erase a specific block. No need to check if the block is bad before
212 * erasing, this has been taken care of by the generic NAND layer
213 * @markbad: mark a specific block bad. No need to check if the block is
214 * already marked bad, this has been taken care of by the generic
215 * NAND layer. This method should just write the BBM (Bad Block
216 * Marker) so that future call to struct_nand_ops->isbad() return
218 * @isbad: check whether a block is bad or not. This method should just read
219 * the BBM and return whether the block is bad or not based on what it
222 * These are all low level operations that should be implemented by specialized
223 * NAND layers (SPI NAND, raw NAND, ...).
226 int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
227 int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
228 bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
232 * struct nand_ecc_context - Context for the ECC engine
233 * @conf: basic ECC engine parameters
234 * @nsteps: number of ECC steps
235 * @total: total number of bytes used for storing ECC codes, this is used by
236 * generic OOB layouts
237 * @priv: ECC engine driver private data
239 struct nand_ecc_context {
240 struct nand_ecc_props conf;
247 * struct nand_ecc_engine_ops - ECC engine operations
248 * @init_ctx: given a desired user configuration for the pointed NAND device,
249 * requests the ECC engine driver to setup a configuration with
250 * values it supports.
251 * @cleanup_ctx: clean the context initialized by @init_ctx.
252 * @prepare_io_req: is called before reading/writing a page to prepare the I/O
253 * request to be performed with ECC correction.
254 * @finish_io_req: is called after reading/writing a page to terminate the I/O
255 * request and ensure proper ECC correction.
257 struct nand_ecc_engine_ops {
258 int (*init_ctx)(struct nand_device *nand);
259 void (*cleanup_ctx)(struct nand_device *nand);
260 int (*prepare_io_req)(struct nand_device *nand,
261 struct nand_page_io_req *req);
262 int (*finish_io_req)(struct nand_device *nand,
263 struct nand_page_io_req *req);
267 * struct nand_ecc_engine - ECC engine abstraction for NAND devices
268 * @ops: ECC engine operations
270 struct nand_ecc_engine {
271 struct nand_ecc_engine_ops *ops;
274 void of_get_nand_ecc_user_config(struct nand_device *nand);
275 int nand_ecc_init_ctx(struct nand_device *nand);
276 void nand_ecc_cleanup_ctx(struct nand_device *nand);
277 int nand_ecc_prepare_io_req(struct nand_device *nand,
278 struct nand_page_io_req *req);
279 int nand_ecc_finish_io_req(struct nand_device *nand,
280 struct nand_page_io_req *req);
281 bool nand_ecc_is_strong_enough(struct nand_device *nand);
282 struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
283 struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
285 #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
286 struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
288 static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
292 #endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
294 #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
295 struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
297 static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
301 #endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
304 * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
305 * @orig_req: Pointer to the original IO request
306 * @nand: Related NAND device, to have access to its memory organization
307 * @page_buffer_size: Real size of the page buffer to use (can be set by the
308 * user before the tweaking mechanism initialization)
309 * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
310 * user before the tweaking mechanism initialization)
311 * @spare_databuf: Data bounce buffer
312 * @spare_oobbuf: OOB bounce buffer
313 * @bounce_data: Flag indicating a data bounce buffer is used
314 * @bounce_oob: Flag indicating an OOB bounce buffer is used
316 struct nand_ecc_req_tweak_ctx {
317 struct nand_page_io_req orig_req;
318 struct nand_device *nand;
319 unsigned int page_buffer_size;
320 unsigned int oob_buffer_size;
327 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
328 struct nand_device *nand);
329 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
330 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
331 struct nand_page_io_req *req);
332 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
333 struct nand_page_io_req *req);
336 * struct nand_ecc - Information relative to the ECC
337 * @defaults: Default values, depend on the underlying subsystem
338 * @requirements: ECC requirements from the NAND chip perspective
339 * @user_conf: User desires in terms of ECC parameters
340 * @ctx: ECC context for the ECC engine, derived from the device @requirements
341 * the @user_conf and the @defaults
342 * @ondie_engine: On-die ECC engine reference, if any
343 * @engine: ECC engine actually bound
346 struct nand_ecc_props defaults;
347 struct nand_ecc_props requirements;
348 struct nand_ecc_props user_conf;
349 struct nand_ecc_context ctx;
350 struct nand_ecc_engine *ondie_engine;
351 struct nand_ecc_engine *engine;
355 * struct nand_device - NAND device
356 * @mtd: MTD instance attached to the NAND device
357 * @memorg: memory layout
358 * @ecc: NAND ECC object attached to the NAND device
359 * @rowconv: position to row address converter
360 * @bbt: bad block table info
361 * @ops: NAND operations attached to the NAND device
363 * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
364 * should declare their own NAND object embedding a nand_device struct (that's
365 * how inheritance is done).
366 * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
367 * be filled at device detection time to reflect the NAND device
368 * capabilities/requirements. Once this is done nanddev_init() can be called.
369 * It will take care of converting NAND information into MTD ones, which means
370 * the specialized NAND layers should never manually tweak
371 * struct_nand_device->mtd except for the ->_read/write() hooks.
375 struct nand_memory_organization memorg;
377 struct nand_row_converter rowconv;
379 const struct nand_ops *ops;
383 * struct nand_io_iter - NAND I/O iterator
384 * @req: current I/O request
385 * @oobbytes_per_page: maximum number of OOB bytes per page
386 * @dataleft: remaining number of data bytes to read/write
387 * @oobleft: remaining number of OOB bytes to read/write
389 * Can be used by specialized NAND layers to iterate over all pages covered
390 * by an MTD I/O request, which should greatly simplifies the boiler-plate
391 * code needed to read/write data from/to a NAND device.
393 struct nand_io_iter {
394 struct nand_page_io_req req;
395 unsigned int oobbytes_per_page;
396 unsigned int dataleft;
397 unsigned int oobleft;
401 * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
404 * Return: the NAND device embedding @mtd.
406 static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
408 return container_of(mtd, struct nand_device, mtd);
412 * nanddev_to_mtd() - Get the MTD device attached to a NAND device
415 * Return: the MTD device embedded in @nand.
417 static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
423 * nanddev_bits_per_cell() - Get the number of bits per cell
426 * Return: the number of bits per cell.
428 static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
430 return nand->memorg.bits_per_cell;
434 * nanddev_page_size() - Get NAND page size
437 * Return: the page size.
439 static inline size_t nanddev_page_size(const struct nand_device *nand)
441 return nand->memorg.pagesize;
445 * nanddev_per_page_oobsize() - Get NAND OOB size
448 * Return: the OOB size.
450 static inline unsigned int
451 nanddev_per_page_oobsize(const struct nand_device *nand)
453 return nand->memorg.oobsize;
457 * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
460 * Return: the number of pages per eraseblock.
462 static inline unsigned int
463 nanddev_pages_per_eraseblock(const struct nand_device *nand)
465 return nand->memorg.pages_per_eraseblock;
469 * nanddev_pages_per_target() - Get the number of pages per target
472 * Return: the number of pages per target.
474 static inline unsigned int
475 nanddev_pages_per_target(const struct nand_device *nand)
477 return nand->memorg.pages_per_eraseblock *
478 nand->memorg.eraseblocks_per_lun *
479 nand->memorg.luns_per_target;
483 * nanddev_per_page_oobsize() - Get NAND erase block size
486 * Return: the eraseblock size.
488 static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
490 return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
494 * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
497 * Return: the number of eraseblocks per LUN.
499 static inline unsigned int
500 nanddev_eraseblocks_per_lun(const struct nand_device *nand)
502 return nand->memorg.eraseblocks_per_lun;
506 * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
509 * Return: the number of eraseblocks per target.
511 static inline unsigned int
512 nanddev_eraseblocks_per_target(const struct nand_device *nand)
514 return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
518 * nanddev_target_size() - Get the total size provided by a single target/die
521 * Return: the total size exposed by a single target/die in bytes.
523 static inline u64 nanddev_target_size(const struct nand_device *nand)
525 return (u64)nand->memorg.luns_per_target *
526 nand->memorg.eraseblocks_per_lun *
527 nand->memorg.pages_per_eraseblock *
528 nand->memorg.pagesize;
532 * nanddev_ntarget() - Get the total of targets
535 * Return: the number of targets/dies exposed by @nand.
537 static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
539 return nand->memorg.ntargets;
543 * nanddev_neraseblocks() - Get the total number of eraseblocks
546 * Return: the total number of eraseblocks exposed by @nand.
548 static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
550 return nand->memorg.ntargets * nand->memorg.luns_per_target *
551 nand->memorg.eraseblocks_per_lun;
555 * nanddev_size() - Get NAND size
558 * Return: the total size (in bytes) exposed by @nand.
560 static inline u64 nanddev_size(const struct nand_device *nand)
562 return nanddev_target_size(nand) * nanddev_ntargets(nand);
566 * nanddev_get_memorg() - Extract memory organization info from a NAND device
569 * This can be used by the upper layer to fill the memorg info before calling
572 * Return: the memorg object embedded in the NAND device.
574 static inline struct nand_memory_organization *
575 nanddev_get_memorg(struct nand_device *nand)
577 return &nand->memorg;
581 * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
584 static inline const struct nand_ecc_props *
585 nanddev_get_ecc_conf(struct nand_device *nand)
587 return &nand->ecc.ctx.conf;
591 * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
594 static inline unsigned int
595 nanddev_get_ecc_nsteps(struct nand_device *nand)
597 return nand->ecc.ctx.nsteps;
601 * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
604 static inline unsigned int
605 nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
607 return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
611 * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
615 static inline const struct nand_ecc_props *
616 nanddev_get_ecc_requirements(struct nand_device *nand)
618 return &nand->ecc.requirements;
622 * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
625 * @reqs: Requirements
628 nanddev_set_ecc_requirements(struct nand_device *nand,
629 const struct nand_ecc_props *reqs)
631 nand->ecc.requirements = *reqs;
634 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
635 struct module *owner);
636 void nanddev_cleanup(struct nand_device *nand);
639 * nanddev_register() - Register a NAND device
642 * Register a NAND device.
643 * This function is just a wrapper around mtd_device_register()
644 * registering the MTD device embedded in @nand.
646 * Return: 0 in case of success, a negative error code otherwise.
648 static inline int nanddev_register(struct nand_device *nand)
650 return mtd_device_register(&nand->mtd, NULL, 0);
654 * nanddev_unregister() - Unregister a NAND device
657 * Unregister a NAND device.
658 * This function is just a wrapper around mtd_device_unregister()
659 * unregistering the MTD device embedded in @nand.
661 * Return: 0 in case of success, a negative error code otherwise.
663 static inline int nanddev_unregister(struct nand_device *nand)
665 return mtd_device_unregister(&nand->mtd);
669 * nanddev_set_of_node() - Attach a DT node to a NAND device
673 * Attach a DT node to a NAND device.
675 static inline void nanddev_set_of_node(struct nand_device *nand,
676 struct device_node *np)
678 mtd_set_of_node(&nand->mtd, np);
682 * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
685 * Return: the DT node attached to @nand.
687 static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
689 return mtd_get_of_node(&nand->mtd);
693 * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
695 * @offs: absolute NAND offset (usually passed by the MTD layer)
696 * @pos: a NAND position object to fill in
698 * Converts @offs into a nand_pos representation.
700 * Return: the offset within the NAND page pointed by @pos.
702 static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
704 struct nand_pos *pos)
706 unsigned int pageoffs;
709 pageoffs = do_div(tmp, nand->memorg.pagesize);
710 pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
711 pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
712 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
713 pos->lun = do_div(tmp, nand->memorg.luns_per_target);
720 * nanddev_pos_cmp() - Compare two NAND positions
721 * @a: First NAND position
722 * @b: Second NAND position
724 * Compares two NAND positions.
726 * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
728 static inline int nanddev_pos_cmp(const struct nand_pos *a,
729 const struct nand_pos *b)
731 if (a->target != b->target)
732 return a->target < b->target ? -1 : 1;
734 if (a->lun != b->lun)
735 return a->lun < b->lun ? -1 : 1;
737 if (a->eraseblock != b->eraseblock)
738 return a->eraseblock < b->eraseblock ? -1 : 1;
740 if (a->page != b->page)
741 return a->page < b->page ? -1 : 1;
747 * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
749 * @pos: the NAND position to convert
751 * Converts @pos NAND position into an absolute offset.
753 * Return: the absolute offset. Note that @pos points to the beginning of a
754 * page, if one wants to point to a specific offset within this page
755 * the returned offset has to be adjusted manually.
757 static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
758 const struct nand_pos *pos)
765 (pos->target * nand->memorg.luns_per_target)) *
766 nand->memorg.eraseblocks_per_lun) *
767 nand->memorg.pages_per_eraseblock);
769 return (loff_t)npages * nand->memorg.pagesize;
773 * nanddev_pos_to_row() - Extract a row address from a NAND position
775 * @pos: the position to convert
777 * Converts a NAND position into a row address that can then be passed to the
780 * Return: the row address extracted from @pos.
782 static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
783 const struct nand_pos *pos)
785 return (pos->lun << nand->rowconv.lun_addr_shift) |
786 (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
791 * nanddev_pos_next_target() - Move a position to the next target/die
793 * @pos: the position to update
795 * Updates @pos to point to the start of the next target/die. Useful when you
796 * want to iterate over all targets/dies of a NAND device.
798 static inline void nanddev_pos_next_target(struct nand_device *nand,
799 struct nand_pos *pos)
809 * nanddev_pos_next_lun() - Move a position to the next LUN
811 * @pos: the position to update
813 * Updates @pos to point to the start of the next LUN. Useful when you want to
814 * iterate over all LUNs of a NAND device.
816 static inline void nanddev_pos_next_lun(struct nand_device *nand,
817 struct nand_pos *pos)
819 if (pos->lun >= nand->memorg.luns_per_target - 1)
820 return nanddev_pos_next_target(nand, pos);
829 * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
831 * @pos: the position to update
833 * Updates @pos to point to the start of the next eraseblock. Useful when you
834 * want to iterate over all eraseblocks of a NAND device.
836 static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
837 struct nand_pos *pos)
839 if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
840 return nanddev_pos_next_lun(nand, pos);
844 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
848 * nanddev_pos_next_page() - Move a position to the next page
850 * @pos: the position to update
852 * Updates @pos to point to the start of the next page. Useful when you want to
853 * iterate over all pages of a NAND device.
855 static inline void nanddev_pos_next_page(struct nand_device *nand,
856 struct nand_pos *pos)
858 if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
859 return nanddev_pos_next_eraseblock(nand, pos);
865 * nand_io_iter_init - Initialize a NAND I/O iterator
867 * @offs: absolute offset
869 * @iter: NAND I/O iterator
871 * Initializes a NAND iterator based on the information passed by the MTD
874 static inline void nanddev_io_iter_init(struct nand_device *nand,
875 enum nand_page_io_req_type reqtype,
876 loff_t offs, struct mtd_oob_ops *req,
877 struct nand_io_iter *iter)
879 struct mtd_info *mtd = nanddev_to_mtd(nand);
881 iter->req.type = reqtype;
882 iter->req.mode = req->mode;
883 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
884 iter->req.ooboffs = req->ooboffs;
885 iter->oobbytes_per_page = mtd_oobavail(mtd, req);
886 iter->dataleft = req->len;
887 iter->oobleft = req->ooblen;
888 iter->req.databuf.in = req->datbuf;
889 iter->req.datalen = min_t(unsigned int,
890 nand->memorg.pagesize - iter->req.dataoffs,
892 iter->req.oobbuf.in = req->oobbuf;
893 iter->req.ooblen = min_t(unsigned int,
894 iter->oobbytes_per_page - iter->req.ooboffs,
899 * nand_io_iter_next_page - Move to the next page
901 * @iter: NAND I/O iterator
903 * Updates the @iter to point to the next page.
905 static inline void nanddev_io_iter_next_page(struct nand_device *nand,
906 struct nand_io_iter *iter)
908 nanddev_pos_next_page(nand, &iter->req.pos);
909 iter->dataleft -= iter->req.datalen;
910 iter->req.databuf.in += iter->req.datalen;
911 iter->oobleft -= iter->req.ooblen;
912 iter->req.oobbuf.in += iter->req.ooblen;
913 iter->req.dataoffs = 0;
914 iter->req.ooboffs = 0;
915 iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
917 iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
922 * nand_io_iter_end - Should end iteration or not
924 * @iter: NAND I/O iterator
926 * Check whether @iter has reached the end of the NAND portion it was asked to
929 * Return: true if @iter has reached the end of the iteration request, false
932 static inline bool nanddev_io_iter_end(struct nand_device *nand,
933 const struct nand_io_iter *iter)
935 if (iter->dataleft || iter->oobleft)
942 * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
945 * @start: start address to read/write from
946 * @req: MTD I/O request
947 * @iter: NAND I/O iterator
949 * Should be used for iterate over pages that are contained in an MTD request.
951 #define nanddev_io_for_each_page(nand, type, start, req, iter) \
952 for (nanddev_io_iter_init(nand, type, start, req, iter); \
953 !nanddev_io_iter_end(nand, iter); \
954 nanddev_io_iter_next_page(nand, iter))
956 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
957 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
958 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
959 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
961 /* ECC related functions */
962 int nanddev_ecc_engine_init(struct nand_device *nand);
963 void nanddev_ecc_engine_cleanup(struct nand_device *nand);
965 /* BBT related functions */
966 enum nand_bbt_block_status {
967 NAND_BBT_BLOCK_STATUS_UNKNOWN,
970 NAND_BBT_BLOCK_RESERVED,
971 NAND_BBT_BLOCK_FACTORY_BAD,
972 NAND_BBT_BLOCK_NUM_STATUS,
975 int nanddev_bbt_init(struct nand_device *nand);
976 void nanddev_bbt_cleanup(struct nand_device *nand);
977 int nanddev_bbt_update(struct nand_device *nand);
978 int nanddev_bbt_get_block_status(const struct nand_device *nand,
980 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
981 enum nand_bbt_block_status status);
982 int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
985 * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
987 * @pos: the NAND position we want to get BBT entry for
989 * Return the BBT entry used to store information about the eraseblock pointed
992 * Return: the BBT entry storing information about eraseblock pointed by @pos.
994 static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
995 const struct nand_pos *pos)
997 return pos->eraseblock +
998 ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
999 nand->memorg.eraseblocks_per_lun);
1003 * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
1004 * @nand: NAND device
1006 * Return: true if the BBT has been initialized, false otherwise.
1008 static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
1010 return !!nand->bbt.cache;
1013 /* MTD -> NAND helper functions. */
1014 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
1015 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
1017 #endif /* __LINUX_MTD_NAND_H */