3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
13 * David Woodhouse for adding multichip support
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand_ecc.h>
42 #include <linux/mtd/nand_bch.h>
43 #include <linux/interrupt.h>
44 #include <linux/bitops.h>
46 #include <linux/mtd/partitions.h>
48 #include <linux/gpio/consumer.h>
50 #include "internals.h"
52 /* Define default oob placement schemes for large and small page devices */
53 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
54 struct mtd_oob_region *oobregion)
56 struct nand_chip *chip = mtd_to_nand(mtd);
57 struct nand_ecc_ctrl *ecc = &chip->ecc;
63 oobregion->offset = 0;
64 if (mtd->oobsize == 16)
65 oobregion->length = 4;
67 oobregion->length = 3;
69 if (mtd->oobsize == 8)
72 oobregion->offset = 6;
73 oobregion->length = ecc->total - 4;
79 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
80 struct mtd_oob_region *oobregion)
85 if (mtd->oobsize == 16) {
89 oobregion->length = 8;
90 oobregion->offset = 8;
92 oobregion->length = 2;
94 oobregion->offset = 3;
96 oobregion->offset = 6;
102 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
103 .ecc = nand_ooblayout_ecc_sp,
104 .free = nand_ooblayout_free_sp,
106 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
108 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
109 struct mtd_oob_region *oobregion)
111 struct nand_chip *chip = mtd_to_nand(mtd);
112 struct nand_ecc_ctrl *ecc = &chip->ecc;
114 if (section || !ecc->total)
117 oobregion->length = ecc->total;
118 oobregion->offset = mtd->oobsize - oobregion->length;
123 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
124 struct mtd_oob_region *oobregion)
126 struct nand_chip *chip = mtd_to_nand(mtd);
127 struct nand_ecc_ctrl *ecc = &chip->ecc;
132 oobregion->length = mtd->oobsize - ecc->total - 2;
133 oobregion->offset = 2;
138 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
139 .ecc = nand_ooblayout_ecc_lp,
140 .free = nand_ooblayout_free_lp,
142 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
145 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
146 * are placed at a fixed offset.
148 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
149 struct mtd_oob_region *oobregion)
151 struct nand_chip *chip = mtd_to_nand(mtd);
152 struct nand_ecc_ctrl *ecc = &chip->ecc;
157 switch (mtd->oobsize) {
159 oobregion->offset = 40;
162 oobregion->offset = 80;
168 oobregion->length = ecc->total;
169 if (oobregion->offset + oobregion->length > mtd->oobsize)
175 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
176 struct mtd_oob_region *oobregion)
178 struct nand_chip *chip = mtd_to_nand(mtd);
179 struct nand_ecc_ctrl *ecc = &chip->ecc;
182 if (section < 0 || section > 1)
185 switch (mtd->oobsize) {
197 oobregion->offset = 2;
198 oobregion->length = ecc_offset - 2;
200 oobregion->offset = ecc_offset + ecc->total;
201 oobregion->length = mtd->oobsize - oobregion->offset;
207 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
208 .ecc = nand_ooblayout_ecc_lp_hamming,
209 .free = nand_ooblayout_free_lp_hamming,
212 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
216 /* Start address must align on block boundary */
217 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 pr_debug("%s: unaligned address\n", __func__);
222 /* Length must align on block boundary */
223 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: length not block aligned\n", __func__);
232 * nand_select_target() - Select a NAND target (A.K.A. die)
233 * @chip: NAND chip object
234 * @cs: the CS line to select. Note that this CS id is always from the chip
235 * PoV, not the controller one
237 * Select a NAND target so that further operations executed on @chip go to the
238 * selected NAND target.
240 void nand_select_target(struct nand_chip *chip, unsigned int cs)
243 * cs should always lie between 0 and chip->numchips, when that's not
244 * the case it's a bug and the caller should be fixed.
246 if (WARN_ON(cs > chip->numchips))
251 if (chip->legacy.select_chip)
252 chip->legacy.select_chip(chip, cs);
254 EXPORT_SYMBOL_GPL(nand_select_target);
257 * nand_deselect_target() - Deselect the currently selected target
258 * @chip: NAND chip object
260 * Deselect the currently selected NAND target. The result of operations
261 * executed on @chip after the target has been deselected is undefined.
263 void nand_deselect_target(struct nand_chip *chip)
265 if (chip->legacy.select_chip)
266 chip->legacy.select_chip(chip, -1);
270 EXPORT_SYMBOL_GPL(nand_deselect_target);
273 * nand_release_device - [GENERIC] release chip
274 * @chip: NAND chip object
276 * Release chip lock and wake up anyone waiting on the device.
278 static void nand_release_device(struct nand_chip *chip)
280 /* Release the controller and the chip */
281 mutex_unlock(&chip->controller->lock);
282 mutex_unlock(&chip->lock);
286 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
287 * @chip: NAND chip object
288 * @ofs: offset from device start
290 * Check, if the block is bad.
292 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
294 struct mtd_info *mtd = nand_to_mtd(chip);
295 int page, page_end, res;
298 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
299 ofs += mtd->erasesize - mtd->writesize;
301 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
302 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
304 for (; page < page_end; page++) {
305 res = chip->ecc.read_oob(chip, page);
309 bad = chip->oob_poi[chip->badblockpos];
311 if (likely(chip->badblockbits == 8))
314 res = hweight8(bad) < chip->badblockbits;
322 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
324 if (chip->legacy.block_bad)
325 return chip->legacy.block_bad(chip, ofs);
327 return nand_block_bad(chip, ofs);
331 * nand_get_device - [GENERIC] Get chip for selected access
332 * @chip: NAND chip structure
334 * Lock the device and its controller for exclusive access
336 * Return: -EBUSY if the chip has been suspended, 0 otherwise
338 static int nand_get_device(struct nand_chip *chip)
340 mutex_lock(&chip->lock);
341 if (chip->suspended) {
342 mutex_unlock(&chip->lock);
345 mutex_lock(&chip->controller->lock);
351 * nand_check_wp - [GENERIC] check if the chip is write protected
352 * @chip: NAND chip object
354 * Check, if the device is write protected. The function expects, that the
355 * device is already selected.
357 static int nand_check_wp(struct nand_chip *chip)
362 /* Broken xD cards report WP despite being writable */
363 if (chip->options & NAND_BROKEN_XD)
366 /* Check the WP bit */
367 ret = nand_status_op(chip, &status);
371 return status & NAND_STATUS_WP ? 0 : 1;
375 * nand_fill_oob - [INTERN] Transfer client buffer to oob
376 * @chip: NAND chip object
377 * @oob: oob data buffer
378 * @len: oob data write length
379 * @ops: oob ops structure
381 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
382 struct mtd_oob_ops *ops)
384 struct mtd_info *mtd = nand_to_mtd(chip);
388 * Initialise to all 0xFF, to avoid the possibility of left over OOB
389 * data from a previous OOB read.
391 memset(chip->oob_poi, 0xff, mtd->oobsize);
395 case MTD_OPS_PLACE_OOB:
397 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
400 case MTD_OPS_AUTO_OOB:
401 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
413 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
414 * @chip: NAND chip object
415 * @to: offset to write to
416 * @ops: oob operation description structure
418 * NAND write out-of-band.
420 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
421 struct mtd_oob_ops *ops)
423 struct mtd_info *mtd = nand_to_mtd(chip);
424 int chipnr, page, status, len, ret;
426 pr_debug("%s: to = 0x%08x, len = %i\n",
427 __func__, (unsigned int)to, (int)ops->ooblen);
429 len = mtd_oobavail(mtd, ops);
431 /* Do not allow write past end of page */
432 if ((ops->ooboffs + ops->ooblen) > len) {
433 pr_debug("%s: attempt to write past end of page\n",
438 chipnr = (int)(to >> chip->chip_shift);
441 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
442 * of my DiskOnChip 2000 test units) will clear the whole data page too
443 * if we don't do this. I have no clue why, but I seem to have 'fixed'
444 * it in the doc2000 driver in August 1999. dwmw2.
446 ret = nand_reset(chip, chipnr);
450 nand_select_target(chip, chipnr);
452 /* Shift to get page */
453 page = (int)(to >> chip->page_shift);
455 /* Check, if it is write protected */
456 if (nand_check_wp(chip)) {
457 nand_deselect_target(chip);
461 /* Invalidate the page cache, if we write to the cached page */
462 if (page == chip->pagebuf)
465 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
467 if (ops->mode == MTD_OPS_RAW)
468 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
470 status = chip->ecc.write_oob(chip, page & chip->pagemask);
472 nand_deselect_target(chip);
477 ops->oobretlen = ops->ooblen;
483 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
484 * @chip: NAND chip object
485 * @ofs: offset from device start
487 * This is the default implementation, which can be overridden by a hardware
488 * specific driver. It provides the details for writing a bad block marker to a
491 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
493 struct mtd_info *mtd = nand_to_mtd(chip);
494 struct mtd_oob_ops ops;
495 uint8_t buf[2] = { 0, 0 };
496 int ret = 0, res, i = 0;
498 memset(&ops, 0, sizeof(ops));
500 ops.ooboffs = chip->badblockpos;
501 if (chip->options & NAND_BUSWIDTH_16) {
502 ops.ooboffs &= ~0x01;
503 ops.len = ops.ooblen = 2;
505 ops.len = ops.ooblen = 1;
507 ops.mode = MTD_OPS_PLACE_OOB;
509 /* Write to first/last page(s) if necessary */
510 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
511 ofs += mtd->erasesize - mtd->writesize;
513 res = nand_do_write_oob(chip, ofs, &ops);
518 ofs += mtd->writesize;
519 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
525 * nand_markbad_bbm - mark a block by updating the BBM
526 * @chip: NAND chip object
527 * @ofs: offset of the block to mark bad
529 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
531 if (chip->legacy.block_markbad)
532 return chip->legacy.block_markbad(chip, ofs);
534 return nand_default_block_markbad(chip, ofs);
538 * nand_block_markbad_lowlevel - mark a block bad
539 * @chip: NAND chip object
540 * @ofs: offset from device start
542 * This function performs the generic NAND bad block marking steps (i.e., bad
543 * block table(s) and/or marker(s)). We only allow the hardware driver to
544 * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
546 * We try operations in the following order:
548 * (1) erase the affected block, to allow OOB marker to be written cleanly
549 * (2) write bad block marker to OOB area of affected block (unless flag
550 * NAND_BBT_NO_OOB_BBM is present)
553 * Note that we retain the first error encountered in (2) or (3), finish the
554 * procedures, and dump the error in the end.
556 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
558 struct mtd_info *mtd = nand_to_mtd(chip);
561 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
562 struct erase_info einfo;
564 /* Attempt erase before marking OOB */
565 memset(&einfo, 0, sizeof(einfo));
567 einfo.len = 1ULL << chip->phys_erase_shift;
568 nand_erase_nand(chip, &einfo, 0);
570 /* Write bad block marker to OOB */
571 ret = nand_get_device(chip);
575 ret = nand_markbad_bbm(chip, ofs);
576 nand_release_device(chip);
579 /* Mark block bad in BBT */
581 res = nand_markbad_bbt(chip, ofs);
587 mtd->ecc_stats.badblocks++;
593 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
594 * @mtd: MTD device structure
595 * @ofs: offset from device start
597 * Check if the block is marked as reserved.
599 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
601 struct nand_chip *chip = mtd_to_nand(mtd);
605 /* Return info from the table */
606 return nand_isreserved_bbt(chip, ofs);
610 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
611 * @chip: NAND chip object
612 * @ofs: offset from device start
613 * @allowbbt: 1, if its allowed to access the bbt area
615 * Check, if the block is bad. Either by reading the bad block table or
616 * calling of the scan function.
618 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
620 /* Return info from the table */
622 return nand_isbad_bbt(chip, ofs, allowbbt);
624 return nand_isbad_bbm(chip, ofs);
628 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
629 * @chip: NAND chip structure
630 * @timeout_ms: Timeout in ms
632 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
633 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
636 * This helper is intended to be used when the controller does not have access
637 * to the NAND R/B pin.
639 * Be aware that calling this helper from an ->exec_op() implementation means
640 * ->exec_op() must be re-entrant.
642 * Return 0 if the NAND chip is ready, a negative error otherwise.
644 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
646 const struct nand_sdr_timings *timings;
650 if (!nand_has_exec_op(chip))
653 /* Wait tWB before polling the STATUS reg. */
654 timings = nand_get_sdr_timings(&chip->data_interface);
655 ndelay(PSEC_TO_NSEC(timings->tWB_max));
657 ret = nand_status_op(chip, NULL);
661 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
663 ret = nand_read_data_op(chip, &status, sizeof(status), true);
667 if (status & NAND_STATUS_READY)
671 * Typical lowest execution time for a tR on most NANDs is 10us,
672 * use this as polling delay before doing something smarter (ie.
673 * deriving a delay from the timeout value, timeout_ms/ratio).
676 } while (time_before(jiffies, timeout_ms));
679 * We have to exit READ_STATUS mode in order to read real data on the
680 * bus in case the WAITRDY instruction is preceding a DATA_IN
683 nand_exit_status_op(chip);
688 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
690 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
693 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
694 * @chip: NAND chip structure
695 * @gpiod: GPIO descriptor of R/B pin
696 * @timeout_ms: Timeout in ms
698 * Poll the R/B GPIO pin until it becomes ready. If that does not happen
699 * whitin the specified timeout, -ETIMEDOUT is returned.
701 * This helper is intended to be used when the controller has access to the
702 * NAND R/B pin over GPIO.
704 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
706 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
707 unsigned long timeout_ms)
709 /* Wait until R/B pin indicates chip is ready or timeout occurs */
710 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
712 if (gpiod_get_value_cansleep(gpiod))
716 } while (time_before(jiffies, timeout_ms));
718 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
720 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
723 * panic_nand_wait - [GENERIC] wait until the command is done
724 * @chip: NAND chip structure
727 * Wait for command done. This is a helper function for nand_wait used when
728 * we are in interrupt context. May happen when in panic and trying to write
729 * an oops through mtdoops.
731 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
734 for (i = 0; i < timeo; i++) {
735 if (chip->legacy.dev_ready) {
736 if (chip->legacy.dev_ready(chip))
742 ret = nand_read_data_op(chip, &status, sizeof(status),
747 if (status & NAND_STATUS_READY)
754 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
756 return (chip->parameters.supports_set_get_features &&
757 test_bit(addr, chip->parameters.get_feature_list));
760 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
762 return (chip->parameters.supports_set_get_features &&
763 test_bit(addr, chip->parameters.set_feature_list));
767 * nand_reset_data_interface - Reset data interface and timings
768 * @chip: The NAND chip
769 * @chipnr: Internal die id
771 * Reset the Data interface and timings to ONFI mode 0.
773 * Returns 0 for success or negative error code otherwise.
775 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
779 if (!nand_has_setup_data_iface(chip))
783 * The ONFI specification says:
785 * To transition from NV-DDR or NV-DDR2 to the SDR data
786 * interface, the host shall use the Reset (FFh) command
787 * using SDR timing mode 0. A device in any timing mode is
788 * required to recognize Reset (FFh) command issued in SDR
792 * Configure the data interface in SDR mode and set the
793 * timings to timing mode 0.
796 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
797 ret = chip->controller->ops->setup_data_interface(chip, chipnr,
798 &chip->data_interface);
800 pr_err("Failed to configure data interface to SDR timing mode 0\n");
806 * nand_setup_data_interface - Setup the best data interface and timings
807 * @chip: The NAND chip
808 * @chipnr: Internal die id
810 * Find and configure the best data interface and NAND timings supported by
811 * the chip and the driver.
812 * First tries to retrieve supported timing modes from ONFI information,
813 * and if the NAND chip does not support ONFI, relies on the
814 * ->onfi_timing_mode_default specified in the nand_ids table.
816 * Returns 0 for success or negative error code otherwise.
818 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
820 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
821 chip->onfi_timing_mode_default,
825 if (!nand_has_setup_data_iface(chip))
828 /* Change the mode on the chip side (if supported by the NAND chip) */
829 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
830 nand_select_target(chip, chipnr);
831 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
833 nand_deselect_target(chip);
838 /* Change the mode on the controller side */
839 ret = chip->controller->ops->setup_data_interface(chip, chipnr,
840 &chip->data_interface);
844 /* Check the mode has been accepted by the chip, if supported */
845 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
848 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
849 nand_select_target(chip, chipnr);
850 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
852 nand_deselect_target(chip);
856 if (tmode_param[0] != chip->onfi_timing_mode_default) {
857 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
858 chip->onfi_timing_mode_default);
866 * Fallback to mode 0 if the chip explicitly did not ack the chosen
869 nand_reset_data_interface(chip, chipnr);
870 nand_select_target(chip, chipnr);
872 nand_deselect_target(chip);
878 * nand_init_data_interface - find the best data interface and timings
879 * @chip: The NAND chip
881 * Find the best data interface and NAND timings supported by the chip
883 * First tries to retrieve supported timing modes from ONFI information,
884 * and if the NAND chip does not support ONFI, relies on the
885 * ->onfi_timing_mode_default specified in the nand_ids table. After this
886 * function nand_chip->data_interface is initialized with the best timing mode
889 * Returns 0 for success or negative error code otherwise.
891 static int nand_init_data_interface(struct nand_chip *chip)
893 int modes, mode, ret;
895 if (!nand_has_setup_data_iface(chip))
899 * First try to identify the best timings from ONFI parameters and
900 * if the NAND does not support ONFI, fallback to the default ONFI
903 if (chip->parameters.onfi) {
904 modes = chip->parameters.onfi->async_timing_mode;
906 if (!chip->onfi_timing_mode_default)
909 modes = GENMASK(chip->onfi_timing_mode_default, 0);
912 for (mode = fls(modes) - 1; mode >= 0; mode--) {
913 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
918 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
919 * controller supports the requested timings.
921 ret = chip->controller->ops->setup_data_interface(chip,
922 NAND_DATA_IFACE_CHECK_ONLY,
923 &chip->data_interface);
925 chip->onfi_timing_mode_default = mode;
934 * nand_fill_column_cycles - fill the column cycles of an address
935 * @chip: The NAND chip
936 * @addrs: Array of address cycles to fill
937 * @offset_in_page: The offset in the page
939 * Fills the first or the first two bytes of the @addrs field depending
940 * on the NAND bus width and the page size.
942 * Returns the number of cycles needed to encode the column, or a negative
943 * error code in case one of the arguments is invalid.
945 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
946 unsigned int offset_in_page)
948 struct mtd_info *mtd = nand_to_mtd(chip);
950 /* Make sure the offset is less than the actual page size. */
951 if (offset_in_page > mtd->writesize + mtd->oobsize)
955 * On small page NANDs, there's a dedicated command to access the OOB
956 * area, and the column address is relative to the start of the OOB
957 * area, not the start of the page. Asjust the address accordingly.
959 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
960 offset_in_page -= mtd->writesize;
963 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
964 * wide, then it must be divided by 2.
966 if (chip->options & NAND_BUSWIDTH_16) {
967 if (WARN_ON(offset_in_page % 2))
973 addrs[0] = offset_in_page;
976 * Small page NANDs use 1 cycle for the columns, while large page NANDs
979 if (mtd->writesize <= 512)
982 addrs[1] = offset_in_page >> 8;
987 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
988 unsigned int offset_in_page, void *buf,
991 struct mtd_info *mtd = nand_to_mtd(chip);
992 const struct nand_sdr_timings *sdr =
993 nand_get_sdr_timings(&chip->data_interface);
995 struct nand_op_instr instrs[] = {
996 NAND_OP_CMD(NAND_CMD_READ0, 0),
997 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
998 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
999 PSEC_TO_NSEC(sdr->tRR_min)),
1000 NAND_OP_DATA_IN(len, buf, 0),
1002 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1005 /* Drop the DATA_IN instruction if len is set to 0. */
1009 if (offset_in_page >= mtd->writesize)
1010 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1011 else if (offset_in_page >= 256 &&
1012 !(chip->options & NAND_BUSWIDTH_16))
1013 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1015 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1020 addrs[2] = page >> 8;
1022 if (chip->options & NAND_ROW_ADDR_3) {
1023 addrs[3] = page >> 16;
1024 instrs[1].ctx.addr.naddrs++;
1027 return nand_exec_op(chip, &op);
1030 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1031 unsigned int offset_in_page, void *buf,
1034 const struct nand_sdr_timings *sdr =
1035 nand_get_sdr_timings(&chip->data_interface);
1037 struct nand_op_instr instrs[] = {
1038 NAND_OP_CMD(NAND_CMD_READ0, 0),
1039 NAND_OP_ADDR(4, addrs, 0),
1040 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1041 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1042 PSEC_TO_NSEC(sdr->tRR_min)),
1043 NAND_OP_DATA_IN(len, buf, 0),
1045 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1048 /* Drop the DATA_IN instruction if len is set to 0. */
1052 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1057 addrs[3] = page >> 8;
1059 if (chip->options & NAND_ROW_ADDR_3) {
1060 addrs[4] = page >> 16;
1061 instrs[1].ctx.addr.naddrs++;
1064 return nand_exec_op(chip, &op);
1068 * nand_read_page_op - Do a READ PAGE operation
1069 * @chip: The NAND chip
1070 * @page: page to read
1071 * @offset_in_page: offset within the page
1072 * @buf: buffer used to store the data
1073 * @len: length of the buffer
1075 * This function issues a READ PAGE operation.
1076 * This function does not select/unselect the CS line.
1078 * Returns 0 on success, a negative error code otherwise.
1080 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1081 unsigned int offset_in_page, void *buf, unsigned int len)
1083 struct mtd_info *mtd = nand_to_mtd(chip);
1088 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1091 if (nand_has_exec_op(chip)) {
1092 if (mtd->writesize > 512)
1093 return nand_lp_exec_read_page_op(chip, page,
1094 offset_in_page, buf,
1097 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1101 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1103 chip->legacy.read_buf(chip, buf, len);
1107 EXPORT_SYMBOL_GPL(nand_read_page_op);
1110 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1111 * @chip: The NAND chip
1112 * @page: parameter page to read
1113 * @buf: buffer used to store the data
1114 * @len: length of the buffer
1116 * This function issues a READ PARAMETER PAGE operation.
1117 * This function does not select/unselect the CS line.
1119 * Returns 0 on success, a negative error code otherwise.
1121 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1130 if (nand_has_exec_op(chip)) {
1131 const struct nand_sdr_timings *sdr =
1132 nand_get_sdr_timings(&chip->data_interface);
1133 struct nand_op_instr instrs[] = {
1134 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1135 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1136 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1137 PSEC_TO_NSEC(sdr->tRR_min)),
1138 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1140 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1142 /* Drop the DATA_IN instruction if len is set to 0. */
1146 return nand_exec_op(chip, &op);
1149 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1150 for (i = 0; i < len; i++)
1151 p[i] = chip->legacy.read_byte(chip);
1157 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1158 * @chip: The NAND chip
1159 * @offset_in_page: offset within the page
1160 * @buf: buffer used to store the data
1161 * @len: length of the buffer
1162 * @force_8bit: force 8-bit bus access
1164 * This function issues a CHANGE READ COLUMN operation.
1165 * This function does not select/unselect the CS line.
1167 * Returns 0 on success, a negative error code otherwise.
1169 int nand_change_read_column_op(struct nand_chip *chip,
1170 unsigned int offset_in_page, void *buf,
1171 unsigned int len, bool force_8bit)
1173 struct mtd_info *mtd = nand_to_mtd(chip);
1178 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1181 /* Small page NANDs do not support column change. */
1182 if (mtd->writesize <= 512)
1185 if (nand_has_exec_op(chip)) {
1186 const struct nand_sdr_timings *sdr =
1187 nand_get_sdr_timings(&chip->data_interface);
1189 struct nand_op_instr instrs[] = {
1190 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1191 NAND_OP_ADDR(2, addrs, 0),
1192 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1193 PSEC_TO_NSEC(sdr->tCCS_min)),
1194 NAND_OP_DATA_IN(len, buf, 0),
1196 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1199 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1203 /* Drop the DATA_IN instruction if len is set to 0. */
1207 instrs[3].ctx.data.force_8bit = force_8bit;
1209 return nand_exec_op(chip, &op);
1212 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1214 chip->legacy.read_buf(chip, buf, len);
1218 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1221 * nand_read_oob_op - Do a READ OOB operation
1222 * @chip: The NAND chip
1223 * @page: page to read
1224 * @offset_in_oob: offset within the OOB area
1225 * @buf: buffer used to store the data
1226 * @len: length of the buffer
1228 * This function issues a READ OOB operation.
1229 * This function does not select/unselect the CS line.
1231 * Returns 0 on success, a negative error code otherwise.
1233 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1234 unsigned int offset_in_oob, void *buf, unsigned int len)
1236 struct mtd_info *mtd = nand_to_mtd(chip);
1241 if (offset_in_oob + len > mtd->oobsize)
1244 if (nand_has_exec_op(chip))
1245 return nand_read_page_op(chip, page,
1246 mtd->writesize + offset_in_oob,
1249 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1251 chip->legacy.read_buf(chip, buf, len);
1255 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1257 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1258 unsigned int offset_in_page, const void *buf,
1259 unsigned int len, bool prog)
1261 struct mtd_info *mtd = nand_to_mtd(chip);
1262 const struct nand_sdr_timings *sdr =
1263 nand_get_sdr_timings(&chip->data_interface);
1265 struct nand_op_instr instrs[] = {
1267 * The first instruction will be dropped if we're dealing
1268 * with a large page NAND and adjusted if we're dealing
1269 * with a small page NAND and the page offset is > 255.
1271 NAND_OP_CMD(NAND_CMD_READ0, 0),
1272 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1273 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1274 NAND_OP_DATA_OUT(len, buf, 0),
1275 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1276 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1278 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1279 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1286 addrs[naddrs++] = page;
1287 addrs[naddrs++] = page >> 8;
1288 if (chip->options & NAND_ROW_ADDR_3)
1289 addrs[naddrs++] = page >> 16;
1291 instrs[2].ctx.addr.naddrs = naddrs;
1293 /* Drop the last two instructions if we're not programming the page. */
1296 /* Also drop the DATA_OUT instruction if empty. */
1301 if (mtd->writesize <= 512) {
1303 * Small pages need some more tweaking: we have to adjust the
1304 * first instruction depending on the page offset we're trying
1307 if (offset_in_page >= mtd->writesize)
1308 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1309 else if (offset_in_page >= 256 &&
1310 !(chip->options & NAND_BUSWIDTH_16))
1311 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1314 * Drop the first command if we're dealing with a large page
1321 ret = nand_exec_op(chip, &op);
1325 ret = nand_status_op(chip, &status);
1333 * nand_prog_page_begin_op - starts a PROG PAGE operation
1334 * @chip: The NAND chip
1335 * @page: page to write
1336 * @offset_in_page: offset within the page
1337 * @buf: buffer containing the data to write to the page
1338 * @len: length of the buffer
1340 * This function issues the first half of a PROG PAGE operation.
1341 * This function does not select/unselect the CS line.
1343 * Returns 0 on success, a negative error code otherwise.
1345 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1346 unsigned int offset_in_page, const void *buf,
1349 struct mtd_info *mtd = nand_to_mtd(chip);
1354 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1357 if (nand_has_exec_op(chip))
1358 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1361 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1364 chip->legacy.write_buf(chip, buf, len);
1368 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1371 * nand_prog_page_end_op - ends a PROG PAGE operation
1372 * @chip: The NAND chip
1374 * This function issues the second half of a PROG PAGE operation.
1375 * This function does not select/unselect the CS line.
1377 * Returns 0 on success, a negative error code otherwise.
1379 int nand_prog_page_end_op(struct nand_chip *chip)
1384 if (nand_has_exec_op(chip)) {
1385 const struct nand_sdr_timings *sdr =
1386 nand_get_sdr_timings(&chip->data_interface);
1387 struct nand_op_instr instrs[] = {
1388 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1389 PSEC_TO_NSEC(sdr->tWB_max)),
1390 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1392 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1394 ret = nand_exec_op(chip, &op);
1398 ret = nand_status_op(chip, &status);
1402 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1403 ret = chip->legacy.waitfunc(chip);
1410 if (status & NAND_STATUS_FAIL)
1415 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1418 * nand_prog_page_op - Do a full PROG PAGE operation
1419 * @chip: The NAND chip
1420 * @page: page to write
1421 * @offset_in_page: offset within the page
1422 * @buf: buffer containing the data to write to the page
1423 * @len: length of the buffer
1425 * This function issues a full PROG PAGE operation.
1426 * This function does not select/unselect the CS line.
1428 * Returns 0 on success, a negative error code otherwise.
1430 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1431 unsigned int offset_in_page, const void *buf,
1434 struct mtd_info *mtd = nand_to_mtd(chip);
1440 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1443 if (nand_has_exec_op(chip)) {
1444 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1447 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1449 chip->legacy.write_buf(chip, buf, len);
1450 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1451 status = chip->legacy.waitfunc(chip);
1454 if (status & NAND_STATUS_FAIL)
1459 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1462 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1463 * @chip: The NAND chip
1464 * @offset_in_page: offset within the page
1465 * @buf: buffer containing the data to send to the NAND
1466 * @len: length of the buffer
1467 * @force_8bit: force 8-bit bus access
1469 * This function issues a CHANGE WRITE COLUMN operation.
1470 * This function does not select/unselect the CS line.
1472 * Returns 0 on success, a negative error code otherwise.
1474 int nand_change_write_column_op(struct nand_chip *chip,
1475 unsigned int offset_in_page,
1476 const void *buf, unsigned int len,
1479 struct mtd_info *mtd = nand_to_mtd(chip);
1484 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1487 /* Small page NANDs do not support column change. */
1488 if (mtd->writesize <= 512)
1491 if (nand_has_exec_op(chip)) {
1492 const struct nand_sdr_timings *sdr =
1493 nand_get_sdr_timings(&chip->data_interface);
1495 struct nand_op_instr instrs[] = {
1496 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1497 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1498 NAND_OP_DATA_OUT(len, buf, 0),
1500 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1503 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1507 instrs[2].ctx.data.force_8bit = force_8bit;
1509 /* Drop the DATA_OUT instruction if len is set to 0. */
1513 return nand_exec_op(chip, &op);
1516 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1518 chip->legacy.write_buf(chip, buf, len);
1522 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1525 * nand_readid_op - Do a READID operation
1526 * @chip: The NAND chip
1527 * @addr: address cycle to pass after the READID command
1528 * @buf: buffer used to store the ID
1529 * @len: length of the buffer
1531 * This function sends a READID command and reads back the ID returned by the
1533 * This function does not select/unselect the CS line.
1535 * Returns 0 on success, a negative error code otherwise.
1537 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1546 if (nand_has_exec_op(chip)) {
1547 const struct nand_sdr_timings *sdr =
1548 nand_get_sdr_timings(&chip->data_interface);
1549 struct nand_op_instr instrs[] = {
1550 NAND_OP_CMD(NAND_CMD_READID, 0),
1551 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1552 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1554 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1556 /* Drop the DATA_IN instruction if len is set to 0. */
1560 return nand_exec_op(chip, &op);
1563 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1565 for (i = 0; i < len; i++)
1566 id[i] = chip->legacy.read_byte(chip);
1570 EXPORT_SYMBOL_GPL(nand_readid_op);
1573 * nand_status_op - Do a STATUS operation
1574 * @chip: The NAND chip
1575 * @status: out variable to store the NAND status
1577 * This function sends a STATUS command and reads back the status returned by
1579 * This function does not select/unselect the CS line.
1581 * Returns 0 on success, a negative error code otherwise.
1583 int nand_status_op(struct nand_chip *chip, u8 *status)
1585 if (nand_has_exec_op(chip)) {
1586 const struct nand_sdr_timings *sdr =
1587 nand_get_sdr_timings(&chip->data_interface);
1588 struct nand_op_instr instrs[] = {
1589 NAND_OP_CMD(NAND_CMD_STATUS,
1590 PSEC_TO_NSEC(sdr->tADL_min)),
1591 NAND_OP_8BIT_DATA_IN(1, status, 0),
1593 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1598 return nand_exec_op(chip, &op);
1601 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1603 *status = chip->legacy.read_byte(chip);
1607 EXPORT_SYMBOL_GPL(nand_status_op);
1610 * nand_exit_status_op - Exit a STATUS operation
1611 * @chip: The NAND chip
1613 * This function sends a READ0 command to cancel the effect of the STATUS
1614 * command to avoid reading only the status until a new read command is sent.
1616 * This function does not select/unselect the CS line.
1618 * Returns 0 on success, a negative error code otherwise.
1620 int nand_exit_status_op(struct nand_chip *chip)
1622 if (nand_has_exec_op(chip)) {
1623 struct nand_op_instr instrs[] = {
1624 NAND_OP_CMD(NAND_CMD_READ0, 0),
1626 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1628 return nand_exec_op(chip, &op);
1631 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1637 * nand_erase_op - Do an erase operation
1638 * @chip: The NAND chip
1639 * @eraseblock: block to erase
1641 * This function sends an ERASE command and waits for the NAND to be ready
1643 * This function does not select/unselect the CS line.
1645 * Returns 0 on success, a negative error code otherwise.
1647 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1649 unsigned int page = eraseblock <<
1650 (chip->phys_erase_shift - chip->page_shift);
1654 if (nand_has_exec_op(chip)) {
1655 const struct nand_sdr_timings *sdr =
1656 nand_get_sdr_timings(&chip->data_interface);
1657 u8 addrs[3] = { page, page >> 8, page >> 16 };
1658 struct nand_op_instr instrs[] = {
1659 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1660 NAND_OP_ADDR(2, addrs, 0),
1661 NAND_OP_CMD(NAND_CMD_ERASE2,
1662 PSEC_TO_MSEC(sdr->tWB_max)),
1663 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1665 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1667 if (chip->options & NAND_ROW_ADDR_3)
1668 instrs[1].ctx.addr.naddrs++;
1670 ret = nand_exec_op(chip, &op);
1674 ret = nand_status_op(chip, &status);
1678 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1679 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1681 ret = chip->legacy.waitfunc(chip);
1688 if (status & NAND_STATUS_FAIL)
1693 EXPORT_SYMBOL_GPL(nand_erase_op);
1696 * nand_set_features_op - Do a SET FEATURES operation
1697 * @chip: The NAND chip
1698 * @feature: feature id
1699 * @data: 4 bytes of data
1701 * This function sends a SET FEATURES command and waits for the NAND to be
1702 * ready before returning.
1703 * This function does not select/unselect the CS line.
1705 * Returns 0 on success, a negative error code otherwise.
1707 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1710 const u8 *params = data;
1713 if (nand_has_exec_op(chip)) {
1714 const struct nand_sdr_timings *sdr =
1715 nand_get_sdr_timings(&chip->data_interface);
1716 struct nand_op_instr instrs[] = {
1717 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1718 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1719 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1720 PSEC_TO_NSEC(sdr->tWB_max)),
1721 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1723 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1725 return nand_exec_op(chip, &op);
1728 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1729 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1730 chip->legacy.write_byte(chip, params[i]);
1732 ret = chip->legacy.waitfunc(chip);
1736 if (ret & NAND_STATUS_FAIL)
1743 * nand_get_features_op - Do a GET FEATURES operation
1744 * @chip: The NAND chip
1745 * @feature: feature id
1746 * @data: 4 bytes of data
1748 * This function sends a GET FEATURES command and waits for the NAND to be
1749 * ready before returning.
1750 * This function does not select/unselect the CS line.
1752 * Returns 0 on success, a negative error code otherwise.
1754 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1760 if (nand_has_exec_op(chip)) {
1761 const struct nand_sdr_timings *sdr =
1762 nand_get_sdr_timings(&chip->data_interface);
1763 struct nand_op_instr instrs[] = {
1764 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1765 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1766 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1767 PSEC_TO_NSEC(sdr->tRR_min)),
1768 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1771 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1773 return nand_exec_op(chip, &op);
1776 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1777 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1778 params[i] = chip->legacy.read_byte(chip);
1783 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1784 unsigned int delay_ns)
1786 if (nand_has_exec_op(chip)) {
1787 struct nand_op_instr instrs[] = {
1788 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1789 PSEC_TO_NSEC(delay_ns)),
1791 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1793 return nand_exec_op(chip, &op);
1796 /* Apply delay or wait for ready/busy pin */
1797 if (!chip->legacy.dev_ready)
1798 udelay(chip->legacy.chip_delay);
1800 nand_wait_ready(chip);
1806 * nand_reset_op - Do a reset operation
1807 * @chip: The NAND chip
1809 * This function sends a RESET command and waits for the NAND to be ready
1811 * This function does not select/unselect the CS line.
1813 * Returns 0 on success, a negative error code otherwise.
1815 int nand_reset_op(struct nand_chip *chip)
1817 if (nand_has_exec_op(chip)) {
1818 const struct nand_sdr_timings *sdr =
1819 nand_get_sdr_timings(&chip->data_interface);
1820 struct nand_op_instr instrs[] = {
1821 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1822 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1824 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1826 return nand_exec_op(chip, &op);
1829 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1833 EXPORT_SYMBOL_GPL(nand_reset_op);
1836 * nand_read_data_op - Read data from the NAND
1837 * @chip: The NAND chip
1838 * @buf: buffer used to store the data
1839 * @len: length of the buffer
1840 * @force_8bit: force 8-bit bus access
1842 * This function does a raw data read on the bus. Usually used after launching
1843 * another NAND operation like nand_read_page_op().
1844 * This function does not select/unselect the CS line.
1846 * Returns 0 on success, a negative error code otherwise.
1848 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1854 if (nand_has_exec_op(chip)) {
1855 struct nand_op_instr instrs[] = {
1856 NAND_OP_DATA_IN(len, buf, 0),
1858 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1860 instrs[0].ctx.data.force_8bit = force_8bit;
1862 return nand_exec_op(chip, &op);
1869 for (i = 0; i < len; i++)
1870 p[i] = chip->legacy.read_byte(chip);
1872 chip->legacy.read_buf(chip, buf, len);
1877 EXPORT_SYMBOL_GPL(nand_read_data_op);
1880 * nand_write_data_op - Write data from the NAND
1881 * @chip: The NAND chip
1882 * @buf: buffer containing the data to send on the bus
1883 * @len: length of the buffer
1884 * @force_8bit: force 8-bit bus access
1886 * This function does a raw data write on the bus. Usually used after launching
1887 * another NAND operation like nand_write_page_begin_op().
1888 * This function does not select/unselect the CS line.
1890 * Returns 0 on success, a negative error code otherwise.
1892 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1893 unsigned int len, bool force_8bit)
1898 if (nand_has_exec_op(chip)) {
1899 struct nand_op_instr instrs[] = {
1900 NAND_OP_DATA_OUT(len, buf, 0),
1902 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1904 instrs[0].ctx.data.force_8bit = force_8bit;
1906 return nand_exec_op(chip, &op);
1913 for (i = 0; i < len; i++)
1914 chip->legacy.write_byte(chip, p[i]);
1916 chip->legacy.write_buf(chip, buf, len);
1921 EXPORT_SYMBOL_GPL(nand_write_data_op);
1924 * struct nand_op_parser_ctx - Context used by the parser
1925 * @instrs: array of all the instructions that must be addressed
1926 * @ninstrs: length of the @instrs array
1927 * @subop: Sub-operation to be passed to the NAND controller
1929 * This structure is used by the core to split NAND operations into
1930 * sub-operations that can be handled by the NAND controller.
1932 struct nand_op_parser_ctx {
1933 const struct nand_op_instr *instrs;
1934 unsigned int ninstrs;
1935 struct nand_subop subop;
1939 * nand_op_parser_must_split_instr - Checks if an instruction must be split
1940 * @pat: the parser pattern element that matches @instr
1941 * @instr: pointer to the instruction to check
1942 * @start_offset: this is an in/out parameter. If @instr has already been
1943 * split, then @start_offset is the offset from which to start
1944 * (either an address cycle or an offset in the data buffer).
1945 * Conversely, if the function returns true (ie. instr must be
1946 * split), this parameter is updated to point to the first
1947 * data/address cycle that has not been taken care of.
1949 * Some NAND controllers are limited and cannot send X address cycles with a
1950 * unique operation, or cannot read/write more than Y bytes at the same time.
1951 * In this case, split the instruction that does not fit in a single
1952 * controller-operation into two or more chunks.
1954 * Returns true if the instruction must be split, false otherwise.
1955 * The @start_offset parameter is also updated to the offset at which the next
1956 * bundle of instruction must start (if an address or a data instruction).
1959 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1960 const struct nand_op_instr *instr,
1961 unsigned int *start_offset)
1963 switch (pat->type) {
1964 case NAND_OP_ADDR_INSTR:
1965 if (!pat->ctx.addr.maxcycles)
1968 if (instr->ctx.addr.naddrs - *start_offset >
1969 pat->ctx.addr.maxcycles) {
1970 *start_offset += pat->ctx.addr.maxcycles;
1975 case NAND_OP_DATA_IN_INSTR:
1976 case NAND_OP_DATA_OUT_INSTR:
1977 if (!pat->ctx.data.maxlen)
1980 if (instr->ctx.data.len - *start_offset >
1981 pat->ctx.data.maxlen) {
1982 *start_offset += pat->ctx.data.maxlen;
1995 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
1996 * remaining in the parser context
1997 * @pat: the pattern to test
1998 * @ctx: the parser context structure to match with the pattern @pat
2000 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2001 * Returns true if this is the case, false ortherwise. When true is returned,
2002 * @ctx->subop is updated with the set of instructions to be passed to the
2003 * controller driver.
2006 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2007 struct nand_op_parser_ctx *ctx)
2009 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2010 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2011 const struct nand_op_instr *instr = ctx->subop.instrs;
2012 unsigned int i, ninstrs;
2014 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2016 * The pattern instruction does not match the operation
2017 * instruction. If the instruction is marked optional in the
2018 * pattern definition, we skip the pattern element and continue
2019 * to the next one. If the element is mandatory, there's no
2020 * match and we can return false directly.
2022 if (instr->type != pat->elems[i].type) {
2023 if (!pat->elems[i].optional)
2030 * Now check the pattern element constraints. If the pattern is
2031 * not able to handle the whole instruction in a single step,
2032 * we have to split it.
2033 * The last_instr_end_off value comes back updated to point to
2034 * the position where we have to split the instruction (the
2035 * start of the next subop chunk).
2037 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2050 * This can happen if all instructions of a pattern are optional.
2051 * Still, if there's not at least one instruction handled by this
2052 * pattern, this is not a match, and we should try the next one (if
2059 * We had a match on the pattern head, but the pattern may be longer
2060 * than the instructions we're asked to execute. We need to make sure
2061 * there's no mandatory elements in the pattern tail.
2063 for (; i < pat->nelems; i++) {
2064 if (!pat->elems[i].optional)
2069 * We have a match: update the subop structure accordingly and return
2072 ctx->subop.ninstrs = ninstrs;
2073 ctx->subop.last_instr_end_off = instr_offset;
2078 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2079 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2081 const struct nand_op_instr *instr;
2085 pr_debug("executing subop:\n");
2087 for (i = 0; i < ctx->ninstrs; i++) {
2088 instr = &ctx->instrs[i];
2090 if (instr == &ctx->subop.instrs[0])
2093 switch (instr->type) {
2094 case NAND_OP_CMD_INSTR:
2095 pr_debug("%sCMD [0x%02x]\n", prefix,
2096 instr->ctx.cmd.opcode);
2098 case NAND_OP_ADDR_INSTR:
2099 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
2100 instr->ctx.addr.naddrs,
2101 instr->ctx.addr.naddrs < 64 ?
2102 instr->ctx.addr.naddrs : 64,
2103 instr->ctx.addr.addrs);
2105 case NAND_OP_DATA_IN_INSTR:
2106 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
2107 instr->ctx.data.len,
2108 instr->ctx.data.force_8bit ?
2109 ", force 8-bit" : "");
2111 case NAND_OP_DATA_OUT_INSTR:
2112 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2113 instr->ctx.data.len,
2114 instr->ctx.data.force_8bit ?
2115 ", force 8-bit" : "");
2117 case NAND_OP_WAITRDY_INSTR:
2118 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
2119 instr->ctx.waitrdy.timeout_ms);
2123 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2128 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2135 * nand_op_parser_exec_op - exec_op parser
2136 * @chip: the NAND chip
2137 * @parser: patterns description provided by the controller driver
2138 * @op: the NAND operation to address
2139 * @check_only: when true, the function only checks if @op can be handled but
2140 * does not execute the operation
2142 * Helper function designed to ease integration of NAND controller drivers that
2143 * only support a limited set of instruction sequences. The supported sequences
2144 * are described in @parser, and the framework takes care of splitting @op into
2145 * multiple sub-operations (if required) and pass them back to the ->exec()
2146 * callback of the matching pattern if @check_only is set to false.
2148 * NAND controller drivers should call this function from their own ->exec_op()
2151 * Returns 0 on success, a negative error code otherwise. A failure can be
2152 * caused by an unsupported operation (none of the supported patterns is able
2153 * to handle the requested operation), or an error returned by one of the
2154 * matching pattern->exec() hook.
2156 int nand_op_parser_exec_op(struct nand_chip *chip,
2157 const struct nand_op_parser *parser,
2158 const struct nand_operation *op, bool check_only)
2160 struct nand_op_parser_ctx ctx = {
2161 .subop.instrs = op->instrs,
2162 .instrs = op->instrs,
2163 .ninstrs = op->ninstrs,
2167 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2170 for (i = 0; i < parser->npatterns; i++) {
2171 const struct nand_op_parser_pattern *pattern;
2173 pattern = &parser->patterns[i];
2174 if (!nand_op_parser_match_pat(pattern, &ctx))
2177 nand_op_parser_trace(&ctx);
2182 ret = pattern->exec(chip, &ctx.subop);
2189 if (i == parser->npatterns) {
2190 pr_debug("->exec_op() parser: pattern not found!\n");
2195 * Update the context structure by pointing to the start of the
2198 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2199 if (ctx.subop.last_instr_end_off)
2200 ctx.subop.instrs -= 1;
2202 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2207 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2209 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2211 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2212 instr->type == NAND_OP_DATA_OUT_INSTR);
2215 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2216 unsigned int instr_idx)
2218 return subop && instr_idx < subop->ninstrs;
2221 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2222 unsigned int instr_idx)
2227 return subop->first_instr_start_off;
2231 * nand_subop_get_addr_start_off - Get the start offset in an address array
2232 * @subop: The entire sub-operation
2233 * @instr_idx: Index of the instruction inside the sub-operation
2235 * During driver development, one could be tempted to directly use the
2236 * ->addr.addrs field of address instructions. This is wrong as address
2237 * instructions might be split.
2239 * Given an address instruction, returns the offset of the first cycle to issue.
2241 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2242 unsigned int instr_idx)
2244 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2245 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2248 return nand_subop_get_start_off(subop, instr_idx);
2250 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2253 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2254 * @subop: The entire sub-operation
2255 * @instr_idx: Index of the instruction inside the sub-operation
2257 * During driver development, one could be tempted to directly use the
2258 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2261 * Given an address instruction, returns the number of address cycle to issue.
2263 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2264 unsigned int instr_idx)
2266 int start_off, end_off;
2268 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2269 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2272 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2274 if (instr_idx == subop->ninstrs - 1 &&
2275 subop->last_instr_end_off)
2276 end_off = subop->last_instr_end_off;
2278 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2280 return end_off - start_off;
2282 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2285 * nand_subop_get_data_start_off - Get the start offset in a data array
2286 * @subop: The entire sub-operation
2287 * @instr_idx: Index of the instruction inside the sub-operation
2289 * During driver development, one could be tempted to directly use the
2290 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2291 * instructions might be split.
2293 * Given a data instruction, returns the offset to start from.
2295 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2296 unsigned int instr_idx)
2298 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2299 !nand_instr_is_data(&subop->instrs[instr_idx])))
2302 return nand_subop_get_start_off(subop, instr_idx);
2304 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2307 * nand_subop_get_data_len - Get the number of bytes to retrieve
2308 * @subop: The entire sub-operation
2309 * @instr_idx: Index of the instruction inside the sub-operation
2311 * During driver development, one could be tempted to directly use the
2312 * ->data->len field of a data instruction. This is wrong as data instructions
2315 * Returns the length of the chunk of data to send/receive.
2317 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2318 unsigned int instr_idx)
2320 int start_off = 0, end_off;
2322 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2323 !nand_instr_is_data(&subop->instrs[instr_idx])))
2326 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2328 if (instr_idx == subop->ninstrs - 1 &&
2329 subop->last_instr_end_off)
2330 end_off = subop->last_instr_end_off;
2332 end_off = subop->instrs[instr_idx].ctx.data.len;
2334 return end_off - start_off;
2336 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2339 * nand_reset - Reset and initialize a NAND device
2340 * @chip: The NAND chip
2341 * @chipnr: Internal die id
2343 * Save the timings data structure, then apply SDR timings mode 0 (see
2344 * nand_reset_data_interface for details), do the reset operation, and
2345 * apply back the previous timings.
2347 * Returns 0 on success, a negative error code otherwise.
2349 int nand_reset(struct nand_chip *chip, int chipnr)
2351 struct nand_data_interface saved_data_intf = chip->data_interface;
2354 ret = nand_reset_data_interface(chip, chipnr);
2359 * The CS line has to be released before we can apply the new NAND
2360 * interface settings, hence this weird nand_select_target()
2361 * nand_deselect_target() dance.
2363 nand_select_target(chip, chipnr);
2364 ret = nand_reset_op(chip);
2365 nand_deselect_target(chip);
2370 * A nand_reset_data_interface() put both the NAND chip and the NAND
2371 * controller in timings mode 0. If the default mode for this chip is
2372 * also 0, no need to proceed to the change again. Plus, at probe time,
2373 * nand_setup_data_interface() uses ->set/get_features() which would
2374 * fail anyway as the parameter page is not available yet.
2376 if (!chip->onfi_timing_mode_default)
2379 chip->data_interface = saved_data_intf;
2380 ret = nand_setup_data_interface(chip, chipnr);
2386 EXPORT_SYMBOL_GPL(nand_reset);
2389 * nand_get_features - wrapper to perform a GET_FEATURE
2390 * @chip: NAND chip info structure
2391 * @addr: feature address
2392 * @subfeature_param: the subfeature parameters, a four bytes array
2394 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2395 * operation cannot be handled.
2397 int nand_get_features(struct nand_chip *chip, int addr,
2398 u8 *subfeature_param)
2400 if (!nand_supports_get_features(chip, addr))
2403 if (chip->legacy.get_features)
2404 return chip->legacy.get_features(chip, addr, subfeature_param);
2406 return nand_get_features_op(chip, addr, subfeature_param);
2410 * nand_set_features - wrapper to perform a SET_FEATURE
2411 * @chip: NAND chip info structure
2412 * @addr: feature address
2413 * @subfeature_param: the subfeature parameters, a four bytes array
2415 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2416 * operation cannot be handled.
2418 int nand_set_features(struct nand_chip *chip, int addr,
2419 u8 *subfeature_param)
2421 if (!nand_supports_set_features(chip, addr))
2424 if (chip->legacy.set_features)
2425 return chip->legacy.set_features(chip, addr, subfeature_param);
2427 return nand_set_features_op(chip, addr, subfeature_param);
2431 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2432 * @buf: buffer to test
2433 * @len: buffer length
2434 * @bitflips_threshold: maximum number of bitflips
2436 * Check if a buffer contains only 0xff, which means the underlying region
2437 * has been erased and is ready to be programmed.
2438 * The bitflips_threshold specify the maximum number of bitflips before
2439 * considering the region is not erased.
2440 * Note: The logic of this function has been extracted from the memweight
2441 * implementation, except that nand_check_erased_buf function exit before
2442 * testing the whole buffer if the number of bitflips exceed the
2443 * bitflips_threshold value.
2445 * Returns a positive number of bitflips less than or equal to
2446 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2449 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2451 const unsigned char *bitmap = buf;
2455 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2457 weight = hweight8(*bitmap);
2458 bitflips += BITS_PER_BYTE - weight;
2459 if (unlikely(bitflips > bitflips_threshold))
2463 for (; len >= sizeof(long);
2464 len -= sizeof(long), bitmap += sizeof(long)) {
2465 unsigned long d = *((unsigned long *)bitmap);
2468 weight = hweight_long(d);
2469 bitflips += BITS_PER_LONG - weight;
2470 if (unlikely(bitflips > bitflips_threshold))
2474 for (; len > 0; len--, bitmap++) {
2475 weight = hweight8(*bitmap);
2476 bitflips += BITS_PER_BYTE - weight;
2477 if (unlikely(bitflips > bitflips_threshold))
2485 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2487 * @data: data buffer to test
2488 * @datalen: data length
2490 * @ecclen: ECC length
2491 * @extraoob: extra OOB buffer
2492 * @extraooblen: extra OOB length
2493 * @bitflips_threshold: maximum number of bitflips
2495 * Check if a data buffer and its associated ECC and OOB data contains only
2496 * 0xff pattern, which means the underlying region has been erased and is
2497 * ready to be programmed.
2498 * The bitflips_threshold specify the maximum number of bitflips before
2499 * considering the region as not erased.
2502 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2503 * different from the NAND page size. When fixing bitflips, ECC engines will
2504 * report the number of errors per chunk, and the NAND core infrastructure
2505 * expect you to return the maximum number of bitflips for the whole page.
2506 * This is why you should always use this function on a single chunk and
2507 * not on the whole page. After checking each chunk you should update your
2508 * max_bitflips value accordingly.
2509 * 2/ When checking for bitflips in erased pages you should not only check
2510 * the payload data but also their associated ECC data, because a user might
2511 * have programmed almost all bits to 1 but a few. In this case, we
2512 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
2514 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2515 * data are protected by the ECC engine.
2516 * It could also be used if you support subpages and want to attach some
2517 * extra OOB data to an ECC chunk.
2519 * Returns a positive number of bitflips less than or equal to
2520 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2521 * threshold. In case of success, the passed buffers are filled with 0xff.
2523 int nand_check_erased_ecc_chunk(void *data, int datalen,
2524 void *ecc, int ecclen,
2525 void *extraoob, int extraooblen,
2526 int bitflips_threshold)
2528 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2530 data_bitflips = nand_check_erased_buf(data, datalen,
2531 bitflips_threshold);
2532 if (data_bitflips < 0)
2533 return data_bitflips;
2535 bitflips_threshold -= data_bitflips;
2537 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2538 if (ecc_bitflips < 0)
2539 return ecc_bitflips;
2541 bitflips_threshold -= ecc_bitflips;
2543 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2544 bitflips_threshold);
2545 if (extraoob_bitflips < 0)
2546 return extraoob_bitflips;
2549 memset(data, 0xff, datalen);
2552 memset(ecc, 0xff, ecclen);
2554 if (extraoob_bitflips)
2555 memset(extraoob, 0xff, extraooblen);
2557 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2559 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2562 * nand_read_page_raw_notsupp - dummy read raw page function
2563 * @chip: nand chip info structure
2564 * @buf: buffer to store read data
2565 * @oob_required: caller requires OOB data read to chip->oob_poi
2566 * @page: page number to read
2568 * Returns -ENOTSUPP unconditionally.
2570 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2571 int oob_required, int page)
2577 * nand_read_page_raw - [INTERN] read raw page data without ecc
2578 * @chip: nand chip info structure
2579 * @buf: buffer to store read data
2580 * @oob_required: caller requires OOB data read to chip->oob_poi
2581 * @page: page number to read
2583 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2585 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2588 struct mtd_info *mtd = nand_to_mtd(chip);
2591 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2596 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2604 EXPORT_SYMBOL(nand_read_page_raw);
2607 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2608 * @chip: nand chip info structure
2609 * @buf: buffer to store read data
2610 * @oob_required: caller requires OOB data read to chip->oob_poi
2611 * @page: page number to read
2613 * We need a special oob layout and handling even when OOB isn't used.
2615 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2616 int oob_required, int page)
2618 struct mtd_info *mtd = nand_to_mtd(chip);
2619 int eccsize = chip->ecc.size;
2620 int eccbytes = chip->ecc.bytes;
2621 uint8_t *oob = chip->oob_poi;
2622 int steps, size, ret;
2624 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2628 for (steps = chip->ecc.steps; steps > 0; steps--) {
2629 ret = nand_read_data_op(chip, buf, eccsize, false);
2635 if (chip->ecc.prepad) {
2636 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2641 oob += chip->ecc.prepad;
2644 ret = nand_read_data_op(chip, oob, eccbytes, false);
2650 if (chip->ecc.postpad) {
2651 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2656 oob += chip->ecc.postpad;
2660 size = mtd->oobsize - (oob - chip->oob_poi);
2662 ret = nand_read_data_op(chip, oob, size, false);
2671 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2672 * @chip: nand chip info structure
2673 * @buf: buffer to store read data
2674 * @oob_required: caller requires OOB data read to chip->oob_poi
2675 * @page: page number to read
2677 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2678 int oob_required, int page)
2680 struct mtd_info *mtd = nand_to_mtd(chip);
2681 int i, eccsize = chip->ecc.size, ret;
2682 int eccbytes = chip->ecc.bytes;
2683 int eccsteps = chip->ecc.steps;
2685 uint8_t *ecc_calc = chip->ecc.calc_buf;
2686 uint8_t *ecc_code = chip->ecc.code_buf;
2687 unsigned int max_bitflips = 0;
2689 chip->ecc.read_page_raw(chip, buf, 1, page);
2691 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2692 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2694 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2699 eccsteps = chip->ecc.steps;
2702 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2705 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2707 mtd->ecc_stats.failed++;
2709 mtd->ecc_stats.corrected += stat;
2710 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2713 return max_bitflips;
2717 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2718 * @chip: nand chip info structure
2719 * @data_offs: offset of requested data within the page
2720 * @readlen: data length
2721 * @bufpoi: buffer to store read data
2722 * @page: page number to read
2724 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2725 uint32_t readlen, uint8_t *bufpoi, int page)
2727 struct mtd_info *mtd = nand_to_mtd(chip);
2728 int start_step, end_step, num_steps, ret;
2730 int data_col_addr, i, gaps = 0;
2731 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2732 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2733 int index, section = 0;
2734 unsigned int max_bitflips = 0;
2735 struct mtd_oob_region oobregion = { };
2737 /* Column address within the page aligned to ECC size (256bytes) */
2738 start_step = data_offs / chip->ecc.size;
2739 end_step = (data_offs + readlen - 1) / chip->ecc.size;
2740 num_steps = end_step - start_step + 1;
2741 index = start_step * chip->ecc.bytes;
2743 /* Data size aligned to ECC ecc.size */
2744 datafrag_len = num_steps * chip->ecc.size;
2745 eccfrag_len = num_steps * chip->ecc.bytes;
2747 data_col_addr = start_step * chip->ecc.size;
2748 /* If we read not a page aligned data */
2749 p = bufpoi + data_col_addr;
2750 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2755 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2756 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2759 * The performance is faster if we position offsets according to
2760 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2762 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
2766 if (oobregion.length < eccfrag_len)
2770 ret = nand_change_read_column_op(chip, mtd->writesize,
2771 chip->oob_poi, mtd->oobsize,
2777 * Send the command to read the particular ECC bytes take care
2778 * about buswidth alignment in read_buf.
2780 aligned_pos = oobregion.offset & ~(busw - 1);
2781 aligned_len = eccfrag_len;
2782 if (oobregion.offset & (busw - 1))
2784 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2788 ret = nand_change_read_column_op(chip,
2789 mtd->writesize + aligned_pos,
2790 &chip->oob_poi[aligned_pos],
2791 aligned_len, false);
2796 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2797 chip->oob_poi, index, eccfrag_len);
2801 p = bufpoi + data_col_addr;
2802 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2805 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2806 &chip->ecc.calc_buf[i]);
2807 if (stat == -EBADMSG &&
2808 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2809 /* check for empty pages with bitflips */
2810 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2811 &chip->ecc.code_buf[i],
2814 chip->ecc.strength);
2818 mtd->ecc_stats.failed++;
2820 mtd->ecc_stats.corrected += stat;
2821 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2824 return max_bitflips;
2828 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2829 * @chip: nand chip info structure
2830 * @buf: buffer to store read data
2831 * @oob_required: caller requires OOB data read to chip->oob_poi
2832 * @page: page number to read
2834 * Not for syndrome calculating ECC controllers which need a special oob layout.
2836 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2837 int oob_required, int page)
2839 struct mtd_info *mtd = nand_to_mtd(chip);
2840 int i, eccsize = chip->ecc.size, ret;
2841 int eccbytes = chip->ecc.bytes;
2842 int eccsteps = chip->ecc.steps;
2844 uint8_t *ecc_calc = chip->ecc.calc_buf;
2845 uint8_t *ecc_code = chip->ecc.code_buf;
2846 unsigned int max_bitflips = 0;
2848 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2852 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2853 chip->ecc.hwctl(chip, NAND_ECC_READ);
2855 ret = nand_read_data_op(chip, p, eccsize, false);
2859 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2862 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2866 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2871 eccsteps = chip->ecc.steps;
2874 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2877 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2878 if (stat == -EBADMSG &&
2879 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2880 /* check for empty pages with bitflips */
2881 stat = nand_check_erased_ecc_chunk(p, eccsize,
2882 &ecc_code[i], eccbytes,
2884 chip->ecc.strength);
2888 mtd->ecc_stats.failed++;
2890 mtd->ecc_stats.corrected += stat;
2891 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2894 return max_bitflips;
2898 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2899 * @chip: nand chip info structure
2900 * @buf: buffer to store read data
2901 * @oob_required: caller requires OOB data read to chip->oob_poi
2902 * @page: page number to read
2904 * Hardware ECC for large page chips, require OOB to be read first. For this
2905 * ECC mode, the write_page method is re-used from ECC_HW. These methods
2906 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2907 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2908 * the data area, by overwriting the NAND manufacturer bad block markings.
2910 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2911 int oob_required, int page)
2913 struct mtd_info *mtd = nand_to_mtd(chip);
2914 int i, eccsize = chip->ecc.size, ret;
2915 int eccbytes = chip->ecc.bytes;
2916 int eccsteps = chip->ecc.steps;
2918 uint8_t *ecc_code = chip->ecc.code_buf;
2919 uint8_t *ecc_calc = chip->ecc.calc_buf;
2920 unsigned int max_bitflips = 0;
2922 /* Read the OOB area first */
2923 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2927 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2931 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2936 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2939 chip->ecc.hwctl(chip, NAND_ECC_READ);
2941 ret = nand_read_data_op(chip, p, eccsize, false);
2945 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2947 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2948 if (stat == -EBADMSG &&
2949 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2950 /* check for empty pages with bitflips */
2951 stat = nand_check_erased_ecc_chunk(p, eccsize,
2952 &ecc_code[i], eccbytes,
2954 chip->ecc.strength);
2958 mtd->ecc_stats.failed++;
2960 mtd->ecc_stats.corrected += stat;
2961 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2964 return max_bitflips;
2968 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2969 * @chip: nand chip info structure
2970 * @buf: buffer to store read data
2971 * @oob_required: caller requires OOB data read to chip->oob_poi
2972 * @page: page number to read
2974 * The hw generator calculates the error syndrome automatically. Therefore we
2975 * need a special oob layout and handling.
2977 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2978 int oob_required, int page)
2980 struct mtd_info *mtd = nand_to_mtd(chip);
2981 int ret, i, eccsize = chip->ecc.size;
2982 int eccbytes = chip->ecc.bytes;
2983 int eccsteps = chip->ecc.steps;
2984 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2986 uint8_t *oob = chip->oob_poi;
2987 unsigned int max_bitflips = 0;
2989 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2993 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2996 chip->ecc.hwctl(chip, NAND_ECC_READ);
2998 ret = nand_read_data_op(chip, p, eccsize, false);
3002 if (chip->ecc.prepad) {
3003 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3008 oob += chip->ecc.prepad;
3011 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3013 ret = nand_read_data_op(chip, oob, eccbytes, false);
3017 stat = chip->ecc.correct(chip, p, oob, NULL);
3021 if (chip->ecc.postpad) {
3022 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3027 oob += chip->ecc.postpad;
3030 if (stat == -EBADMSG &&
3031 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3032 /* check for empty pages with bitflips */
3033 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3037 chip->ecc.strength);
3041 mtd->ecc_stats.failed++;
3043 mtd->ecc_stats.corrected += stat;
3044 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3048 /* Calculate remaining oob bytes */
3049 i = mtd->oobsize - (oob - chip->oob_poi);
3051 ret = nand_read_data_op(chip, oob, i, false);
3056 return max_bitflips;
3060 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3061 * @chip: NAND chip object
3062 * @oob: oob destination address
3063 * @ops: oob ops structure
3064 * @len: size of oob to transfer
3066 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3067 struct mtd_oob_ops *ops, size_t len)
3069 struct mtd_info *mtd = nand_to_mtd(chip);
3072 switch (ops->mode) {
3074 case MTD_OPS_PLACE_OOB:
3076 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3079 case MTD_OPS_AUTO_OOB:
3080 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3092 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3093 * @chip: NAND chip object
3094 * @retry_mode: the retry mode to use
3096 * Some vendors supply a special command to shift the Vt threshold, to be used
3097 * when there are too many bitflips in a page (i.e., ECC error). After setting
3098 * a new threshold, the host should retry reading the page.
3100 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3102 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3104 if (retry_mode >= chip->read_retries)
3107 if (!chip->setup_read_retry)
3110 return chip->setup_read_retry(chip, retry_mode);
3113 static void nand_wait_readrdy(struct nand_chip *chip)
3115 const struct nand_sdr_timings *sdr;
3117 if (!(chip->options & NAND_NEED_READRDY))
3120 sdr = nand_get_sdr_timings(&chip->data_interface);
3121 WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3125 * nand_do_read_ops - [INTERN] Read data with ECC
3126 * @chip: NAND chip object
3127 * @from: offset to read from
3128 * @ops: oob ops structure
3130 * Internal function. Called with chip held.
3132 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3133 struct mtd_oob_ops *ops)
3135 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3136 struct mtd_info *mtd = nand_to_mtd(chip);
3138 uint32_t readlen = ops->len;
3139 uint32_t oobreadlen = ops->ooblen;
3140 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3142 uint8_t *bufpoi, *oob, *buf;
3144 unsigned int max_bitflips = 0;
3146 bool ecc_fail = false;
3148 chipnr = (int)(from >> chip->chip_shift);
3149 nand_select_target(chip, chipnr);
3151 realpage = (int)(from >> chip->page_shift);
3152 page = realpage & chip->pagemask;
3154 col = (int)(from & (mtd->writesize - 1));
3158 oob_required = oob ? 1 : 0;
3161 unsigned int ecc_failures = mtd->ecc_stats.failed;
3163 bytes = min(mtd->writesize - col, readlen);
3164 aligned = (bytes == mtd->writesize);
3168 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3169 use_bufpoi = !virt_addr_valid(buf) ||
3170 !IS_ALIGNED((unsigned long)buf,
3175 /* Is the current page in the buffer? */
3176 if (realpage != chip->pagebuf || oob) {
3177 bufpoi = use_bufpoi ? chip->data_buf : buf;
3179 if (use_bufpoi && aligned)
3180 pr_debug("%s: using read bounce buffer for buf@%p\n",
3185 * Now read the page into the buffer. Absent an error,
3186 * the read methods return max bitflips per ecc step.
3188 if (unlikely(ops->mode == MTD_OPS_RAW))
3189 ret = chip->ecc.read_page_raw(chip, bufpoi,
3192 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3194 ret = chip->ecc.read_subpage(chip, col, bytes,
3197 ret = chip->ecc.read_page(chip, bufpoi,
3198 oob_required, page);
3201 /* Invalidate page cache */
3206 /* Transfer not aligned data */
3208 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3209 !(mtd->ecc_stats.failed - ecc_failures) &&
3210 (ops->mode != MTD_OPS_RAW)) {
3211 chip->pagebuf = realpage;
3212 chip->pagebuf_bitflips = ret;
3214 /* Invalidate page cache */
3217 memcpy(buf, chip->data_buf + col, bytes);
3220 if (unlikely(oob)) {
3221 int toread = min(oobreadlen, max_oobsize);
3224 oob = nand_transfer_oob(chip, oob, ops,
3226 oobreadlen -= toread;
3230 nand_wait_readrdy(chip);
3232 if (mtd->ecc_stats.failed - ecc_failures) {
3233 if (retry_mode + 1 < chip->read_retries) {
3235 ret = nand_setup_read_retry(chip,
3240 /* Reset failures; retry */
3241 mtd->ecc_stats.failed = ecc_failures;
3244 /* No more retry modes; real failure */
3250 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3252 memcpy(buf, chip->data_buf + col, bytes);
3254 max_bitflips = max_t(unsigned int, max_bitflips,
3255 chip->pagebuf_bitflips);
3260 /* Reset to retry mode 0 */
3262 ret = nand_setup_read_retry(chip, 0);
3271 /* For subsequent reads align to page boundary */
3273 /* Increment page address */
3276 page = realpage & chip->pagemask;
3277 /* Check, if we cross a chip boundary */
3280 nand_deselect_target(chip);
3281 nand_select_target(chip, chipnr);
3284 nand_deselect_target(chip);
3286 ops->retlen = ops->len - (size_t) readlen;
3288 ops->oobretlen = ops->ooblen - oobreadlen;
3296 return max_bitflips;
3300 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3301 * @chip: nand chip info structure
3302 * @page: page number to read
3304 int nand_read_oob_std(struct nand_chip *chip, int page)
3306 struct mtd_info *mtd = nand_to_mtd(chip);
3308 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3310 EXPORT_SYMBOL(nand_read_oob_std);
3313 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3315 * @chip: nand chip info structure
3316 * @page: page number to read
3318 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3320 struct mtd_info *mtd = nand_to_mtd(chip);
3321 int length = mtd->oobsize;
3322 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3323 int eccsize = chip->ecc.size;
3324 uint8_t *bufpoi = chip->oob_poi;
3325 int i, toread, sndrnd = 0, pos, ret;
3327 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3331 for (i = 0; i < chip->ecc.steps; i++) {
3335 pos = eccsize + i * (eccsize + chunk);
3336 if (mtd->writesize > 512)
3337 ret = nand_change_read_column_op(chip, pos,
3341 ret = nand_read_page_op(chip, page, pos, NULL,
3348 toread = min_t(int, length, chunk);
3350 ret = nand_read_data_op(chip, bufpoi, toread, false);
3358 ret = nand_read_data_op(chip, bufpoi, length, false);
3367 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3368 * @chip: nand chip info structure
3369 * @page: page number to write
3371 int nand_write_oob_std(struct nand_chip *chip, int page)
3373 struct mtd_info *mtd = nand_to_mtd(chip);
3375 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3378 EXPORT_SYMBOL(nand_write_oob_std);
3381 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3382 * with syndrome - only for large page flash
3383 * @chip: nand chip info structure
3384 * @page: page number to write
3386 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3388 struct mtd_info *mtd = nand_to_mtd(chip);
3389 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3390 int eccsize = chip->ecc.size, length = mtd->oobsize;
3391 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3392 const uint8_t *bufpoi = chip->oob_poi;
3395 * data-ecc-data-ecc ... ecc-oob
3397 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3399 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3400 pos = steps * (eccsize + chunk);
3405 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3409 for (i = 0; i < steps; i++) {
3411 if (mtd->writesize <= 512) {
3412 uint32_t fill = 0xFFFFFFFF;
3416 int num = min_t(int, len, 4);
3418 ret = nand_write_data_op(chip, &fill,
3426 pos = eccsize + i * (eccsize + chunk);
3427 ret = nand_change_write_column_op(chip, pos,
3435 len = min_t(int, length, chunk);
3437 ret = nand_write_data_op(chip, bufpoi, len, false);
3445 ret = nand_write_data_op(chip, bufpoi, length, false);
3450 return nand_prog_page_end_op(chip);
3454 * nand_do_read_oob - [INTERN] NAND read out-of-band
3455 * @chip: NAND chip object
3456 * @from: offset to read from
3457 * @ops: oob operations description structure
3459 * NAND read out-of-band data from the spare area.
3461 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3462 struct mtd_oob_ops *ops)
3464 struct mtd_info *mtd = nand_to_mtd(chip);
3465 unsigned int max_bitflips = 0;
3466 int page, realpage, chipnr;
3467 struct mtd_ecc_stats stats;
3468 int readlen = ops->ooblen;
3470 uint8_t *buf = ops->oobbuf;
3473 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3474 __func__, (unsigned long long)from, readlen);
3476 stats = mtd->ecc_stats;
3478 len = mtd_oobavail(mtd, ops);
3480 chipnr = (int)(from >> chip->chip_shift);
3481 nand_select_target(chip, chipnr);
3483 /* Shift to get page */
3484 realpage = (int)(from >> chip->page_shift);
3485 page = realpage & chip->pagemask;
3488 if (ops->mode == MTD_OPS_RAW)
3489 ret = chip->ecc.read_oob_raw(chip, page);
3491 ret = chip->ecc.read_oob(chip, page);
3496 len = min(len, readlen);
3497 buf = nand_transfer_oob(chip, buf, ops, len);
3499 nand_wait_readrdy(chip);
3501 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3507 /* Increment page address */
3510 page = realpage & chip->pagemask;
3511 /* Check, if we cross a chip boundary */
3514 nand_deselect_target(chip);
3515 nand_select_target(chip, chipnr);
3518 nand_deselect_target(chip);
3520 ops->oobretlen = ops->ooblen - readlen;
3525 if (mtd->ecc_stats.failed - stats.failed)
3528 return max_bitflips;
3532 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3533 * @mtd: MTD device structure
3534 * @from: offset to read from
3535 * @ops: oob operation description structure
3537 * NAND read data and/or out-of-band data.
3539 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3540 struct mtd_oob_ops *ops)
3542 struct nand_chip *chip = mtd_to_nand(mtd);
3547 if (ops->mode != MTD_OPS_PLACE_OOB &&
3548 ops->mode != MTD_OPS_AUTO_OOB &&
3549 ops->mode != MTD_OPS_RAW)
3552 ret = nand_get_device(chip);
3557 ret = nand_do_read_oob(chip, from, ops);
3559 ret = nand_do_read_ops(chip, from, ops);
3561 nand_release_device(chip);
3566 * nand_write_page_raw_notsupp - dummy raw page write function
3567 * @chip: nand chip info structure
3569 * @oob_required: must write chip->oob_poi to OOB
3570 * @page: page number to write
3572 * Returns -ENOTSUPP unconditionally.
3574 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3575 int oob_required, int page)
3581 * nand_write_page_raw - [INTERN] raw page write function
3582 * @chip: nand chip info structure
3584 * @oob_required: must write chip->oob_poi to OOB
3585 * @page: page number to write
3587 * Not for syndrome calculating ECC controllers, which use a special oob layout.
3589 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3590 int oob_required, int page)
3592 struct mtd_info *mtd = nand_to_mtd(chip);
3595 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3600 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3606 return nand_prog_page_end_op(chip);
3608 EXPORT_SYMBOL(nand_write_page_raw);
3611 * nand_write_page_raw_syndrome - [INTERN] raw page write function
3612 * @chip: nand chip info structure
3614 * @oob_required: must write chip->oob_poi to OOB
3615 * @page: page number to write
3617 * We need a special oob layout and handling even when ECC isn't checked.
3619 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3620 const uint8_t *buf, int oob_required,
3623 struct mtd_info *mtd = nand_to_mtd(chip);
3624 int eccsize = chip->ecc.size;
3625 int eccbytes = chip->ecc.bytes;
3626 uint8_t *oob = chip->oob_poi;
3627 int steps, size, ret;
3629 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3633 for (steps = chip->ecc.steps; steps > 0; steps--) {
3634 ret = nand_write_data_op(chip, buf, eccsize, false);
3640 if (chip->ecc.prepad) {
3641 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3646 oob += chip->ecc.prepad;
3649 ret = nand_write_data_op(chip, oob, eccbytes, false);
3655 if (chip->ecc.postpad) {
3656 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3661 oob += chip->ecc.postpad;
3665 size = mtd->oobsize - (oob - chip->oob_poi);
3667 ret = nand_write_data_op(chip, oob, size, false);
3672 return nand_prog_page_end_op(chip);
3675 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3676 * @chip: nand chip info structure
3678 * @oob_required: must write chip->oob_poi to OOB
3679 * @page: page number to write
3681 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3682 int oob_required, int page)
3684 struct mtd_info *mtd = nand_to_mtd(chip);
3685 int i, eccsize = chip->ecc.size, ret;
3686 int eccbytes = chip->ecc.bytes;
3687 int eccsteps = chip->ecc.steps;
3688 uint8_t *ecc_calc = chip->ecc.calc_buf;
3689 const uint8_t *p = buf;
3691 /* Software ECC calculation */
3692 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3693 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3695 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3700 return chip->ecc.write_page_raw(chip, buf, 1, page);
3704 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3705 * @chip: nand chip info structure
3707 * @oob_required: must write chip->oob_poi to OOB
3708 * @page: page number to write
3710 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3711 int oob_required, int page)
3713 struct mtd_info *mtd = nand_to_mtd(chip);
3714 int i, eccsize = chip->ecc.size, ret;
3715 int eccbytes = chip->ecc.bytes;
3716 int eccsteps = chip->ecc.steps;
3717 uint8_t *ecc_calc = chip->ecc.calc_buf;
3718 const uint8_t *p = buf;
3720 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3724 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3725 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3727 ret = nand_write_data_op(chip, p, eccsize, false);
3731 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3734 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3739 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3743 return nand_prog_page_end_op(chip);
3748 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3749 * @chip: nand chip info structure
3750 * @offset: column address of subpage within the page
3751 * @data_len: data length
3753 * @oob_required: must write chip->oob_poi to OOB
3754 * @page: page number to write
3756 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3757 uint32_t data_len, const uint8_t *buf,
3758 int oob_required, int page)
3760 struct mtd_info *mtd = nand_to_mtd(chip);
3761 uint8_t *oob_buf = chip->oob_poi;
3762 uint8_t *ecc_calc = chip->ecc.calc_buf;
3763 int ecc_size = chip->ecc.size;
3764 int ecc_bytes = chip->ecc.bytes;
3765 int ecc_steps = chip->ecc.steps;
3766 uint32_t start_step = offset / ecc_size;
3767 uint32_t end_step = (offset + data_len - 1) / ecc_size;
3768 int oob_bytes = mtd->oobsize / ecc_steps;
3771 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3775 for (step = 0; step < ecc_steps; step++) {
3776 /* configure controller for WRITE access */
3777 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3779 /* write data (untouched subpages already masked by 0xFF) */
3780 ret = nand_write_data_op(chip, buf, ecc_size, false);
3784 /* mask ECC of un-touched subpages by padding 0xFF */
3785 if ((step < start_step) || (step > end_step))
3786 memset(ecc_calc, 0xff, ecc_bytes);
3788 chip->ecc.calculate(chip, buf, ecc_calc);
3790 /* mask OOB of un-touched subpages by padding 0xFF */
3791 /* if oob_required, preserve OOB metadata of written subpage */
3792 if (!oob_required || (step < start_step) || (step > end_step))
3793 memset(oob_buf, 0xff, oob_bytes);
3796 ecc_calc += ecc_bytes;
3797 oob_buf += oob_bytes;
3800 /* copy calculated ECC for whole page to chip->buffer->oob */
3801 /* this include masked-value(0xFF) for unwritten subpages */
3802 ecc_calc = chip->ecc.calc_buf;
3803 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3808 /* write OOB buffer to NAND device */
3809 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3813 return nand_prog_page_end_op(chip);
3818 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3819 * @chip: nand chip info structure
3821 * @oob_required: must write chip->oob_poi to OOB
3822 * @page: page number to write
3824 * The hw generator calculates the error syndrome automatically. Therefore we
3825 * need a special oob layout and handling.
3827 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3828 int oob_required, int page)
3830 struct mtd_info *mtd = nand_to_mtd(chip);
3831 int i, eccsize = chip->ecc.size;
3832 int eccbytes = chip->ecc.bytes;
3833 int eccsteps = chip->ecc.steps;
3834 const uint8_t *p = buf;
3835 uint8_t *oob = chip->oob_poi;
3838 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3842 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3843 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3845 ret = nand_write_data_op(chip, p, eccsize, false);
3849 if (chip->ecc.prepad) {
3850 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3855 oob += chip->ecc.prepad;
3858 chip->ecc.calculate(chip, p, oob);
3860 ret = nand_write_data_op(chip, oob, eccbytes, false);
3866 if (chip->ecc.postpad) {
3867 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3872 oob += chip->ecc.postpad;
3876 /* Calculate remaining oob bytes */
3877 i = mtd->oobsize - (oob - chip->oob_poi);
3879 ret = nand_write_data_op(chip, oob, i, false);
3884 return nand_prog_page_end_op(chip);
3888 * nand_write_page - write one page
3889 * @chip: NAND chip descriptor
3890 * @offset: address offset within the page
3891 * @data_len: length of actual data to be written
3892 * @buf: the data to write
3893 * @oob_required: must write chip->oob_poi to OOB
3894 * @page: page number to write
3895 * @raw: use _raw version of write_page
3897 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3898 int data_len, const uint8_t *buf, int oob_required,
3901 struct mtd_info *mtd = nand_to_mtd(chip);
3902 int status, subpage;
3904 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3905 chip->ecc.write_subpage)
3906 subpage = offset || (data_len < mtd->writesize);
3911 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3914 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3915 oob_required, page);
3917 status = chip->ecc.write_page(chip, buf, oob_required, page);
3925 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
3928 * nand_do_write_ops - [INTERN] NAND write with ECC
3929 * @chip: NAND chip object
3930 * @to: offset to write to
3931 * @ops: oob operations description structure
3933 * NAND write with ECC.
3935 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3936 struct mtd_oob_ops *ops)
3938 struct mtd_info *mtd = nand_to_mtd(chip);
3939 int chipnr, realpage, page, column;
3940 uint32_t writelen = ops->len;
3942 uint32_t oobwritelen = ops->ooblen;
3943 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3945 uint8_t *oob = ops->oobbuf;
3946 uint8_t *buf = ops->datbuf;
3948 int oob_required = oob ? 1 : 0;
3954 /* Reject writes, which are not page aligned */
3955 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3956 pr_notice("%s: attempt to write non page aligned data\n",
3961 column = to & (mtd->writesize - 1);
3963 chipnr = (int)(to >> chip->chip_shift);
3964 nand_select_target(chip, chipnr);
3966 /* Check, if it is write protected */
3967 if (nand_check_wp(chip)) {
3972 realpage = (int)(to >> chip->page_shift);
3973 page = realpage & chip->pagemask;
3975 /* Invalidate the page cache, when we write to the cached page */
3976 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
3977 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
3980 /* Don't allow multipage oob writes with offset */
3981 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
3987 int bytes = mtd->writesize;
3988 uint8_t *wbuf = buf;
3990 int part_pagewr = (column || writelen < mtd->writesize);
3994 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3995 use_bufpoi = !virt_addr_valid(buf) ||
3996 !IS_ALIGNED((unsigned long)buf,
4001 /* Partial page write?, or need to use bounce buffer */
4003 pr_debug("%s: using write bounce buffer for buf@%p\n",
4006 bytes = min_t(int, bytes - column, writelen);
4008 memset(chip->data_buf, 0xff, mtd->writesize);
4009 memcpy(&chip->data_buf[column], buf, bytes);
4010 wbuf = chip->data_buf;
4013 if (unlikely(oob)) {
4014 size_t len = min(oobwritelen, oobmaxlen);
4015 oob = nand_fill_oob(chip, oob, len, ops);
4018 /* We still need to erase leftover OOB data */
4019 memset(chip->oob_poi, 0xff, mtd->oobsize);
4022 ret = nand_write_page(chip, column, bytes, wbuf,
4024 (ops->mode == MTD_OPS_RAW));
4036 page = realpage & chip->pagemask;
4037 /* Check, if we cross a chip boundary */
4040 nand_deselect_target(chip);
4041 nand_select_target(chip, chipnr);
4045 ops->retlen = ops->len - writelen;
4047 ops->oobretlen = ops->ooblen;
4050 nand_deselect_target(chip);
4055 * panic_nand_write - [MTD Interface] NAND write with ECC
4056 * @mtd: MTD device structure
4057 * @to: offset to write to
4058 * @len: number of bytes to write
4059 * @retlen: pointer to variable to store the number of written bytes
4060 * @buf: the data to write
4062 * NAND write with ECC. Used when performing writes in interrupt context, this
4063 * may for example be called by mtdoops when writing an oops while in panic.
4065 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4066 size_t *retlen, const uint8_t *buf)
4068 struct nand_chip *chip = mtd_to_nand(mtd);
4069 int chipnr = (int)(to >> chip->chip_shift);
4070 struct mtd_oob_ops ops;
4073 nand_select_target(chip, chipnr);
4075 /* Wait for the device to get ready */
4076 panic_nand_wait(chip, 400);
4078 memset(&ops, 0, sizeof(ops));
4080 ops.datbuf = (uint8_t *)buf;
4081 ops.mode = MTD_OPS_PLACE_OOB;
4083 ret = nand_do_write_ops(chip, to, &ops);
4085 *retlen = ops.retlen;
4090 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4091 * @mtd: MTD device structure
4092 * @to: offset to write to
4093 * @ops: oob operation description structure
4095 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4096 struct mtd_oob_ops *ops)
4098 struct nand_chip *chip = mtd_to_nand(mtd);
4099 int ret = -ENOTSUPP;
4103 ret = nand_get_device(chip);
4107 switch (ops->mode) {
4108 case MTD_OPS_PLACE_OOB:
4109 case MTD_OPS_AUTO_OOB:
4118 ret = nand_do_write_oob(chip, to, ops);
4120 ret = nand_do_write_ops(chip, to, ops);
4123 nand_release_device(chip);
4128 * nand_erase - [MTD Interface] erase block(s)
4129 * @mtd: MTD device structure
4130 * @instr: erase instruction
4132 * Erase one ore more blocks.
4134 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4136 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4140 * nand_erase_nand - [INTERN] erase block(s)
4141 * @chip: NAND chip object
4142 * @instr: erase instruction
4143 * @allowbbt: allow erasing the bbt area
4145 * Erase one ore more blocks.
4147 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4150 int page, pages_per_block, ret, chipnr;
4153 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4154 __func__, (unsigned long long)instr->addr,
4155 (unsigned long long)instr->len);
4157 if (check_offs_len(chip, instr->addr, instr->len))
4160 /* Grab the lock and see if the device is available */
4161 ret = nand_get_device(chip);
4165 /* Shift to get first page */
4166 page = (int)(instr->addr >> chip->page_shift);
4167 chipnr = (int)(instr->addr >> chip->chip_shift);
4169 /* Calculate pages in each block */
4170 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4172 /* Select the NAND device */
4173 nand_select_target(chip, chipnr);
4175 /* Check, if it is write protected */
4176 if (nand_check_wp(chip)) {
4177 pr_debug("%s: device is write protected!\n",
4183 /* Loop through the pages */
4187 /* Check if we have a bad block, we do not erase bad blocks! */
4188 if (nand_block_checkbad(chip, ((loff_t) page) <<
4189 chip->page_shift, allowbbt)) {
4190 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4197 * Invalidate the page cache, if we erase the block which
4198 * contains the current cached page.
4200 if (page <= chip->pagebuf && chip->pagebuf <
4201 (page + pages_per_block))
4204 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4205 (chip->phys_erase_shift - chip->page_shift));
4207 pr_debug("%s: failed erase, page 0x%08x\n",
4210 ((loff_t)page << chip->page_shift);
4214 /* Increment page address and decrement length */
4215 len -= (1ULL << chip->phys_erase_shift);
4216 page += pages_per_block;
4218 /* Check, if we cross a chip boundary */
4219 if (len && !(page & chip->pagemask)) {
4221 nand_deselect_target(chip);
4222 nand_select_target(chip, chipnr);
4229 /* Deselect and wake up anyone waiting on the device */
4230 nand_deselect_target(chip);
4231 nand_release_device(chip);
4233 /* Return more or less happy */
4238 * nand_sync - [MTD Interface] sync
4239 * @mtd: MTD device structure
4241 * Sync is actually a wait for chip ready function.
4243 static void nand_sync(struct mtd_info *mtd)
4245 struct nand_chip *chip = mtd_to_nand(mtd);
4247 pr_debug("%s: called\n", __func__);
4249 /* Grab the lock and see if the device is available */
4250 WARN_ON(nand_get_device(chip));
4251 /* Release it and go back */
4252 nand_release_device(chip);
4256 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4257 * @mtd: MTD device structure
4258 * @offs: offset relative to mtd start
4260 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4262 struct nand_chip *chip = mtd_to_nand(mtd);
4263 int chipnr = (int)(offs >> chip->chip_shift);
4266 /* Select the NAND device */
4267 ret = nand_get_device(chip);
4271 nand_select_target(chip, chipnr);
4273 ret = nand_block_checkbad(chip, offs, 0);
4275 nand_deselect_target(chip);
4276 nand_release_device(chip);
4282 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4283 * @mtd: MTD device structure
4284 * @ofs: offset relative to mtd start
4286 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4290 ret = nand_block_isbad(mtd, ofs);
4292 /* If it was bad already, return success and do nothing */
4298 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4302 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4303 * @mtd: MTD device structure
4304 * @ofs: offset relative to mtd start
4305 * @len: length of mtd
4307 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4309 struct nand_chip *chip = mtd_to_nand(mtd);
4310 u32 part_start_block;
4316 * max_bb_per_die and blocks_per_die used to determine
4317 * the maximum bad block count.
4319 if (!chip->max_bb_per_die || !chip->blocks_per_die)
4322 /* Get the start and end of the partition in erase blocks. */
4323 part_start_block = mtd_div_by_eb(ofs, mtd);
4324 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4326 /* Get the start and end LUNs of the partition. */
4327 part_start_die = part_start_block / chip->blocks_per_die;
4328 part_end_die = part_end_block / chip->blocks_per_die;
4331 * Look up the bad blocks per unit and multiply by the number of units
4332 * that the partition spans.
4334 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4338 * nand_suspend - [MTD Interface] Suspend the NAND flash
4339 * @mtd: MTD device structure
4341 static int nand_suspend(struct mtd_info *mtd)
4343 struct nand_chip *chip = mtd_to_nand(mtd);
4345 mutex_lock(&chip->lock);
4346 chip->suspended = 1;
4347 mutex_unlock(&chip->lock);
4353 * nand_resume - [MTD Interface] Resume the NAND flash
4354 * @mtd: MTD device structure
4356 static void nand_resume(struct mtd_info *mtd)
4358 struct nand_chip *chip = mtd_to_nand(mtd);
4360 mutex_lock(&chip->lock);
4361 if (chip->suspended)
4362 chip->suspended = 0;
4364 pr_err("%s called for a chip which is not in suspended state\n",
4366 mutex_unlock(&chip->lock);
4370 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4371 * prevent further operations
4372 * @mtd: MTD device structure
4374 static void nand_shutdown(struct mtd_info *mtd)
4379 /* Set default functions */
4380 static void nand_set_defaults(struct nand_chip *chip)
4382 /* If no controller is provided, use the dummy, legacy one. */
4383 if (!chip->controller) {
4384 chip->controller = &chip->legacy.dummy_controller;
4385 nand_controller_init(chip->controller);
4388 nand_legacy_set_defaults(chip);
4390 if (!chip->buf_align)
4391 chip->buf_align = 1;
4394 /* Sanitize ONFI strings so we can safely print them */
4395 void sanitize_string(uint8_t *s, size_t len)
4399 /* Null terminate */
4402 /* Remove non printable chars */
4403 for (i = 0; i < len - 1; i++) {
4404 if (s[i] < ' ' || s[i] > 127)
4408 /* Remove trailing spaces */
4413 * nand_id_has_period - Check if an ID string has a given wraparound period
4414 * @id_data: the ID string
4415 * @arrlen: the length of the @id_data array
4416 * @period: the period of repitition
4418 * Check if an ID string is repeated within a given sequence of bytes at
4419 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4420 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4421 * if the repetition has a period of @period; otherwise, returns zero.
4423 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4426 for (i = 0; i < period; i++)
4427 for (j = i + period; j < arrlen; j += period)
4428 if (id_data[i] != id_data[j])
4434 * nand_id_len - Get the length of an ID string returned by CMD_READID
4435 * @id_data: the ID string
4436 * @arrlen: the length of the @id_data array
4438 * Returns the length of the ID string, according to known wraparound/trailing
4439 * zero patterns. If no pattern exists, returns the length of the array.
4441 static int nand_id_len(u8 *id_data, int arrlen)
4443 int last_nonzero, period;
4445 /* Find last non-zero byte */
4446 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4447 if (id_data[last_nonzero])
4451 if (last_nonzero < 0)
4454 /* Calculate wraparound period */
4455 for (period = 1; period < arrlen; period++)
4456 if (nand_id_has_period(id_data, arrlen, period))
4459 /* There's a repeated pattern */
4460 if (period < arrlen)
4463 /* There are trailing zeros */
4464 if (last_nonzero < arrlen - 1)
4465 return last_nonzero + 1;
4467 /* No pattern detected */
4471 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4472 static int nand_get_bits_per_cell(u8 cellinfo)
4476 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4477 bits >>= NAND_CI_CELLTYPE_SHIFT;
4482 * Many new NAND share similar device ID codes, which represent the size of the
4483 * chip. The rest of the parameters must be decoded according to generic or
4484 * manufacturer-specific "extended ID" decoding patterns.
4486 void nand_decode_ext_id(struct nand_chip *chip)
4488 struct mtd_info *mtd = nand_to_mtd(chip);
4490 u8 *id_data = chip->id.data;
4491 /* The 3rd id byte holds MLC / multichip data */
4492 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4493 /* The 4th id byte is the important one */
4497 mtd->writesize = 1024 << (extid & 0x03);
4500 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4502 /* Calc blocksize. Blocksize is multiples of 64KiB */
4503 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4505 /* Get buswidth information */
4507 chip->options |= NAND_BUSWIDTH_16;
4509 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4512 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4513 * decodes a matching ID table entry and assigns the MTD size parameters for
4516 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4518 struct mtd_info *mtd = nand_to_mtd(chip);
4520 mtd->erasesize = type->erasesize;
4521 mtd->writesize = type->pagesize;
4522 mtd->oobsize = mtd->writesize / 32;
4524 /* All legacy ID NAND are small-page, SLC */
4525 chip->bits_per_cell = 1;
4529 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4530 * heuristic patterns using various detected parameters (e.g., manufacturer,
4531 * page size, cell-type information).
4533 static void nand_decode_bbm_options(struct nand_chip *chip)
4535 struct mtd_info *mtd = nand_to_mtd(chip);
4537 /* Set the bad block position */
4538 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4539 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
4541 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
4544 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4546 return type->id_len;
4549 static bool find_full_id_nand(struct nand_chip *chip,
4550 struct nand_flash_dev *type)
4552 struct mtd_info *mtd = nand_to_mtd(chip);
4553 u8 *id_data = chip->id.data;
4555 if (!strncmp(type->id, id_data, type->id_len)) {
4556 mtd->writesize = type->pagesize;
4557 mtd->erasesize = type->erasesize;
4558 mtd->oobsize = type->oobsize;
4560 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4561 chip->chipsize = (uint64_t)type->chipsize << 20;
4562 chip->options |= type->options;
4563 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
4564 chip->ecc_step_ds = NAND_ECC_STEP(type);
4565 chip->onfi_timing_mode_default =
4566 type->onfi_timing_mode_default;
4568 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4569 if (!chip->parameters.model)
4578 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4579 * compliant and does not have a full-id or legacy-id entry in the nand_ids
4582 static void nand_manufacturer_detect(struct nand_chip *chip)
4585 * Try manufacturer detection if available and use
4586 * nand_decode_ext_id() otherwise.
4588 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4589 chip->manufacturer.desc->ops->detect) {
4590 /* The 3rd id byte holds MLC / multichip data */
4591 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4592 chip->manufacturer.desc->ops->detect(chip);
4594 nand_decode_ext_id(chip);
4599 * Manufacturer initialization. This function is called for all NANDs including
4600 * ONFI and JEDEC compliant ones.
4601 * Manufacturer drivers should put all their specific initialization code in
4602 * their ->init() hook.
4604 static int nand_manufacturer_init(struct nand_chip *chip)
4606 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4607 !chip->manufacturer.desc->ops->init)
4610 return chip->manufacturer.desc->ops->init(chip);
4614 * Manufacturer cleanup. This function is called for all NANDs including
4615 * ONFI and JEDEC compliant ones.
4616 * Manufacturer drivers should put all their specific cleanup code in their
4619 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4621 /* Release manufacturer private data */
4622 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4623 chip->manufacturer.desc->ops->cleanup)
4624 chip->manufacturer.desc->ops->cleanup(chip);
4628 nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4630 return manufacturer ? manufacturer->name : "Unknown";
4634 * Get the flash and manufacturer id and lookup if the type is supported.
4636 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4638 const struct nand_manufacturer *manufacturer;
4639 struct mtd_info *mtd = nand_to_mtd(chip);
4641 u8 *id_data = chip->id.data;
4645 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4648 ret = nand_reset(chip, 0);
4652 /* Select the device */
4653 nand_select_target(chip, 0);
4655 /* Send the command for reading device ID */
4656 ret = nand_readid_op(chip, 0, id_data, 2);
4660 /* Read manufacturer and device IDs */
4661 maf_id = id_data[0];
4662 dev_id = id_data[1];
4665 * Try again to make sure, as some systems the bus-hold or other
4666 * interface concerns can cause random data which looks like a
4667 * possibly credible NAND flash to appear. If the two results do
4668 * not match, ignore the device completely.
4671 /* Read entire ID string */
4672 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4676 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4677 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4678 maf_id, dev_id, id_data[0], id_data[1]);
4682 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4684 /* Try to identify manufacturer */
4685 manufacturer = nand_get_manufacturer(maf_id);
4686 chip->manufacturer.desc = manufacturer;
4689 type = nand_flash_ids;
4692 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4694 * This is required to make sure initial NAND bus width set by the
4695 * NAND controller driver is coherent with the real NAND bus width
4696 * (extracted by auto-detection code).
4698 busw = chip->options & NAND_BUSWIDTH_16;
4701 * The flag is only set (never cleared), reset it to its default value
4702 * before starting auto-detection.
4704 chip->options &= ~NAND_BUSWIDTH_16;
4706 for (; type->name != NULL; type++) {
4707 if (is_full_id_nand(type)) {
4708 if (find_full_id_nand(chip, type))
4710 } else if (dev_id == type->dev_id) {
4715 if (!type->name || !type->pagesize) {
4716 /* Check if the chip is ONFI compliant */
4717 ret = nand_onfi_detect(chip);
4723 /* Check if the chip is JEDEC compliant */
4724 ret = nand_jedec_detect(chip);
4734 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4735 if (!chip->parameters.model)
4738 chip->chipsize = (uint64_t)type->chipsize << 20;
4740 if (!type->pagesize)
4741 nand_manufacturer_detect(chip);
4743 nand_decode_id(chip, type);
4745 /* Get chip options */
4746 chip->options |= type->options;
4750 mtd->name = chip->parameters.model;
4752 if (chip->options & NAND_BUSWIDTH_AUTO) {
4753 WARN_ON(busw & NAND_BUSWIDTH_16);
4754 nand_set_defaults(chip);
4755 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4757 * Check, if buswidth is correct. Hardware drivers should set
4760 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4762 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4764 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4765 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4768 goto free_detect_allocation;
4771 nand_decode_bbm_options(chip);
4773 /* Calculate the address shift from the page size */
4774 chip->page_shift = ffs(mtd->writesize) - 1;
4775 /* Convert chipsize to number of pages per chip -1 */
4776 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4778 chip->bbt_erase_shift = chip->phys_erase_shift =
4779 ffs(mtd->erasesize) - 1;
4780 if (chip->chipsize & 0xffffffff)
4781 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4783 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4784 chip->chip_shift += 32 - 1;
4787 if (chip->chip_shift - chip->page_shift > 16)
4788 chip->options |= NAND_ROW_ADDR_3;
4790 chip->badblockbits = 8;
4792 nand_legacy_adjust_cmdfunc(chip);
4794 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4796 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4797 chip->parameters.model);
4798 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4799 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4800 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4803 free_detect_allocation:
4804 kfree(chip->parameters.model);
4809 static const char * const nand_ecc_modes[] = {
4810 [NAND_ECC_NONE] = "none",
4811 [NAND_ECC_SOFT] = "soft",
4812 [NAND_ECC_HW] = "hw",
4813 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4814 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4815 [NAND_ECC_ON_DIE] = "on-die",
4818 static int of_get_nand_ecc_mode(struct device_node *np)
4823 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4827 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4828 if (!strcasecmp(pm, nand_ecc_modes[i]))
4832 * For backward compatibility we support few obsoleted values that don't
4833 * have their mappings into nand_ecc_modes_t anymore (they were merged
4834 * with other enums).
4836 if (!strcasecmp(pm, "soft_bch"))
4837 return NAND_ECC_SOFT;
4842 static const char * const nand_ecc_algos[] = {
4843 [NAND_ECC_HAMMING] = "hamming",
4844 [NAND_ECC_BCH] = "bch",
4845 [NAND_ECC_RS] = "rs",
4848 static int of_get_nand_ecc_algo(struct device_node *np)
4853 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4855 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4856 if (!strcasecmp(pm, nand_ecc_algos[i]))
4862 * For backward compatibility we also read "nand-ecc-mode" checking
4863 * for some obsoleted values that were specifying ECC algorithm.
4865 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4869 if (!strcasecmp(pm, "soft"))
4870 return NAND_ECC_HAMMING;
4871 else if (!strcasecmp(pm, "soft_bch"))
4872 return NAND_ECC_BCH;
4877 static int of_get_nand_ecc_step_size(struct device_node *np)
4882 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4883 return ret ? ret : val;
4886 static int of_get_nand_ecc_strength(struct device_node *np)
4891 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4892 return ret ? ret : val;
4895 static int of_get_nand_bus_width(struct device_node *np)
4899 if (of_property_read_u32(np, "nand-bus-width", &val))
4911 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4913 return of_property_read_bool(np, "nand-on-flash-bbt");
4916 static int nand_dt_init(struct nand_chip *chip)
4918 struct device_node *dn = nand_get_flash_node(chip);
4919 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4924 if (of_get_nand_bus_width(dn) == 16)
4925 chip->options |= NAND_BUSWIDTH_16;
4927 if (of_property_read_bool(dn, "nand-is-boot-medium"))
4928 chip->options |= NAND_IS_BOOT_MEDIUM;
4930 if (of_get_nand_on_flash_bbt(dn))
4931 chip->bbt_options |= NAND_BBT_USE_FLASH;
4933 ecc_mode = of_get_nand_ecc_mode(dn);
4934 ecc_algo = of_get_nand_ecc_algo(dn);
4935 ecc_strength = of_get_nand_ecc_strength(dn);
4936 ecc_step = of_get_nand_ecc_step_size(dn);
4939 chip->ecc.mode = ecc_mode;
4942 chip->ecc.algo = ecc_algo;
4944 if (ecc_strength >= 0)
4945 chip->ecc.strength = ecc_strength;
4948 chip->ecc.size = ecc_step;
4950 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4951 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4957 * nand_scan_ident - Scan for the NAND device
4958 * @chip: NAND chip object
4959 * @maxchips: number of chips to scan for
4960 * @table: alternative NAND ID table
4962 * This is the first phase of the normal nand_scan() function. It reads the
4963 * flash ID and sets up MTD fields accordingly.
4965 * This helper used to be called directly from controller drivers that needed
4966 * to tweak some ECC-related parameters before nand_scan_tail(). This separation
4967 * prevented dynamic allocations during this phase which was unconvenient and
4968 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
4970 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
4971 struct nand_flash_dev *table)
4973 struct mtd_info *mtd = nand_to_mtd(chip);
4974 int nand_maf_id, nand_dev_id;
4978 /* Assume all dies are deselected when we enter nand_scan_ident(). */
4981 mutex_init(&chip->lock);
4983 /* Enforce the right timings for reset/detection */
4984 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
4986 ret = nand_dt_init(chip);
4990 if (!mtd->name && mtd->dev.parent)
4991 mtd->name = dev_name(mtd->dev.parent);
4994 * Start with chips->numchips = maxchips to let nand_select_target() do
4995 * its job. chip->numchips will be adjusted after.
4997 chip->numchips = maxchips;
4999 /* Set the default functions */
5000 nand_set_defaults(chip);
5002 ret = nand_legacy_check_hooks(chip);
5006 /* Read the flash type */
5007 ret = nand_detect(chip, table);
5009 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5010 pr_warn("No NAND device found\n");
5011 nand_deselect_target(chip);
5015 nand_maf_id = chip->id.data[0];
5016 nand_dev_id = chip->id.data[1];
5018 nand_deselect_target(chip);
5020 /* Check for a chip array */
5021 for (i = 1; i < maxchips; i++) {
5024 /* See comment in nand_get_flash_type for reset */
5025 ret = nand_reset(chip, i);
5029 nand_select_target(chip, i);
5030 /* Send the command for reading device ID */
5031 ret = nand_readid_op(chip, 0, id, sizeof(id));
5034 /* Read manufacturer and device IDs */
5035 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5036 nand_deselect_target(chip);
5039 nand_deselect_target(chip);
5042 pr_info("%d chips detected\n", i);
5044 /* Store the number of chips and calc total size for mtd */
5046 mtd->size = i * chip->chipsize;
5051 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5053 kfree(chip->parameters.model);
5054 kfree(chip->parameters.onfi);
5057 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5059 struct mtd_info *mtd = nand_to_mtd(chip);
5060 struct nand_ecc_ctrl *ecc = &chip->ecc;
5062 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5065 switch (ecc->algo) {
5066 case NAND_ECC_HAMMING:
5067 ecc->calculate = nand_calculate_ecc;
5068 ecc->correct = nand_correct_data;
5069 ecc->read_page = nand_read_page_swecc;
5070 ecc->read_subpage = nand_read_subpage;
5071 ecc->write_page = nand_write_page_swecc;
5072 ecc->read_page_raw = nand_read_page_raw;
5073 ecc->write_page_raw = nand_write_page_raw;
5074 ecc->read_oob = nand_read_oob_std;
5075 ecc->write_oob = nand_write_oob_std;
5081 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
5082 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5086 if (!mtd_nand_has_bch()) {
5087 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5090 ecc->calculate = nand_bch_calculate_ecc;
5091 ecc->correct = nand_bch_correct_data;
5092 ecc->read_page = nand_read_page_swecc;
5093 ecc->read_subpage = nand_read_subpage;
5094 ecc->write_page = nand_write_page_swecc;
5095 ecc->read_page_raw = nand_read_page_raw;
5096 ecc->write_page_raw = nand_write_page_raw;
5097 ecc->read_oob = nand_read_oob_std;
5098 ecc->write_oob = nand_write_oob_std;
5101 * Board driver should supply ecc.size and ecc.strength
5102 * values to select how many bits are correctable.
5103 * Otherwise, default to 4 bits for large page devices.
5105 if (!ecc->size && (mtd->oobsize >= 64)) {
5111 * if no ecc placement scheme was provided pickup the default
5114 if (!mtd->ooblayout) {
5115 /* handle large page devices only */
5116 if (mtd->oobsize < 64) {
5117 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5121 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5126 * We can only maximize ECC config when the default layout is
5127 * used, otherwise we don't know how many bytes can really be
5130 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5131 ecc->options & NAND_ECC_MAXIMIZE) {
5134 /* Always prefer 1k blocks over 512bytes ones */
5136 steps = mtd->writesize / ecc->size;
5138 /* Reserve 2 bytes for the BBM */
5139 bytes = (mtd->oobsize - 2) / steps;
5140 ecc->strength = bytes * 8 / fls(8 * ecc->size);
5143 /* See nand_bch_init() for details. */
5145 ecc->priv = nand_bch_init(mtd);
5147 WARN(1, "BCH ECC initialization failed!\n");
5152 WARN(1, "Unsupported ECC algorithm!\n");
5158 * nand_check_ecc_caps - check the sanity of preset ECC settings
5159 * @chip: nand chip info structure
5160 * @caps: ECC caps info structure
5161 * @oobavail: OOB size that the ECC engine can use
5163 * When ECC step size and strength are already set, check if they are supported
5164 * by the controller and the calculated ECC bytes fit within the chip's OOB.
5165 * On success, the calculated ECC bytes is set.
5168 nand_check_ecc_caps(struct nand_chip *chip,
5169 const struct nand_ecc_caps *caps, int oobavail)
5171 struct mtd_info *mtd = nand_to_mtd(chip);
5172 const struct nand_ecc_step_info *stepinfo;
5173 int preset_step = chip->ecc.size;
5174 int preset_strength = chip->ecc.strength;
5175 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5178 for (i = 0; i < caps->nstepinfos; i++) {
5179 stepinfo = &caps->stepinfos[i];
5181 if (stepinfo->stepsize != preset_step)
5184 for (j = 0; j < stepinfo->nstrengths; j++) {
5185 if (stepinfo->strengths[j] != preset_strength)
5188 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5190 if (WARN_ON_ONCE(ecc_bytes < 0))
5193 if (ecc_bytes * nsteps > oobavail) {
5194 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5195 preset_step, preset_strength);
5199 chip->ecc.bytes = ecc_bytes;
5205 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5206 preset_step, preset_strength);
5212 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5213 * @chip: nand chip info structure
5214 * @caps: ECC engine caps info structure
5215 * @oobavail: OOB size that the ECC engine can use
5217 * If a chip's ECC requirement is provided, try to meet it with the least
5218 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5219 * On success, the chosen ECC settings are set.
5222 nand_match_ecc_req(struct nand_chip *chip,
5223 const struct nand_ecc_caps *caps, int oobavail)
5225 struct mtd_info *mtd = nand_to_mtd(chip);
5226 const struct nand_ecc_step_info *stepinfo;
5227 int req_step = chip->ecc_step_ds;
5228 int req_strength = chip->ecc_strength_ds;
5229 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5230 int best_step, best_strength, best_ecc_bytes;
5231 int best_ecc_bytes_total = INT_MAX;
5234 /* No information provided by the NAND chip */
5235 if (!req_step || !req_strength)
5238 /* number of correctable bits the chip requires in a page */
5239 req_corr = mtd->writesize / req_step * req_strength;
5241 for (i = 0; i < caps->nstepinfos; i++) {
5242 stepinfo = &caps->stepinfos[i];
5243 step_size = stepinfo->stepsize;
5245 for (j = 0; j < stepinfo->nstrengths; j++) {
5246 strength = stepinfo->strengths[j];
5249 * If both step size and strength are smaller than the
5250 * chip's requirement, it is not easy to compare the
5251 * resulted reliability.
5253 if (step_size < req_step && strength < req_strength)
5256 if (mtd->writesize % step_size)
5259 nsteps = mtd->writesize / step_size;
5261 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5262 if (WARN_ON_ONCE(ecc_bytes < 0))
5264 ecc_bytes_total = ecc_bytes * nsteps;
5266 if (ecc_bytes_total > oobavail ||
5267 strength * nsteps < req_corr)
5271 * We assume the best is to meet the chip's requrement
5272 * with the least number of ECC bytes.
5274 if (ecc_bytes_total < best_ecc_bytes_total) {
5275 best_ecc_bytes_total = ecc_bytes_total;
5276 best_step = step_size;
5277 best_strength = strength;
5278 best_ecc_bytes = ecc_bytes;
5283 if (best_ecc_bytes_total == INT_MAX)
5286 chip->ecc.size = best_step;
5287 chip->ecc.strength = best_strength;
5288 chip->ecc.bytes = best_ecc_bytes;
5294 * nand_maximize_ecc - choose the max ECC strength available
5295 * @chip: nand chip info structure
5296 * @caps: ECC engine caps info structure
5297 * @oobavail: OOB size that the ECC engine can use
5299 * Choose the max ECC strength that is supported on the controller, and can fit
5300 * within the chip's OOB. On success, the chosen ECC settings are set.
5303 nand_maximize_ecc(struct nand_chip *chip,
5304 const struct nand_ecc_caps *caps, int oobavail)
5306 struct mtd_info *mtd = nand_to_mtd(chip);
5307 const struct nand_ecc_step_info *stepinfo;
5308 int step_size, strength, nsteps, ecc_bytes, corr;
5311 int best_strength, best_ecc_bytes;
5314 for (i = 0; i < caps->nstepinfos; i++) {
5315 stepinfo = &caps->stepinfos[i];
5316 step_size = stepinfo->stepsize;
5318 /* If chip->ecc.size is already set, respect it */
5319 if (chip->ecc.size && step_size != chip->ecc.size)
5322 for (j = 0; j < stepinfo->nstrengths; j++) {
5323 strength = stepinfo->strengths[j];
5325 if (mtd->writesize % step_size)
5328 nsteps = mtd->writesize / step_size;
5330 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5331 if (WARN_ON_ONCE(ecc_bytes < 0))
5334 if (ecc_bytes * nsteps > oobavail)
5337 corr = strength * nsteps;
5340 * If the number of correctable bits is the same,
5341 * bigger step_size has more reliability.
5343 if (corr > best_corr ||
5344 (corr == best_corr && step_size > best_step)) {
5346 best_step = step_size;
5347 best_strength = strength;
5348 best_ecc_bytes = ecc_bytes;
5356 chip->ecc.size = best_step;
5357 chip->ecc.strength = best_strength;
5358 chip->ecc.bytes = best_ecc_bytes;
5364 * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5365 * @chip: nand chip info structure
5366 * @caps: ECC engine caps info structure
5367 * @oobavail: OOB size that the ECC engine can use
5369 * Choose the ECC configuration according to following logic
5371 * 1. If both ECC step size and ECC strength are already set (usually by DT)
5372 * then check if it is supported by this controller.
5373 * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5374 * 3. Otherwise, try to match the ECC step size and ECC strength closest
5375 * to the chip's requirement. If available OOB size can't fit the chip
5376 * requirement then fallback to the maximum ECC step size and ECC strength.
5378 * On success, the chosen ECC settings are set.
5380 int nand_ecc_choose_conf(struct nand_chip *chip,
5381 const struct nand_ecc_caps *caps, int oobavail)
5383 struct mtd_info *mtd = nand_to_mtd(chip);
5385 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5388 if (chip->ecc.size && chip->ecc.strength)
5389 return nand_check_ecc_caps(chip, caps, oobavail);
5391 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5392 return nand_maximize_ecc(chip, caps, oobavail);
5394 if (!nand_match_ecc_req(chip, caps, oobavail))
5397 return nand_maximize_ecc(chip, caps, oobavail);
5399 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5402 * Check if the chip configuration meet the datasheet requirements.
5404 * If our configuration corrects A bits per B bytes and the minimum
5405 * required correction level is X bits per Y bytes, then we must ensure
5406 * both of the following are true:
5408 * (1) A / B >= X / Y
5411 * Requirement (1) ensures we can correct for the required bitflip density.
5412 * Requirement (2) ensures we can correct even when all bitflips are clumped
5413 * in the same sector.
5415 static bool nand_ecc_strength_good(struct nand_chip *chip)
5417 struct mtd_info *mtd = nand_to_mtd(chip);
5418 struct nand_ecc_ctrl *ecc = &chip->ecc;
5421 if (ecc->size == 0 || chip->ecc_step_ds == 0)
5422 /* Not enough information */
5426 * We get the number of corrected bits per page to compare
5427 * the correction density.
5429 corr = (mtd->writesize * ecc->strength) / ecc->size;
5430 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
5432 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
5436 * nand_scan_tail - Scan for the NAND device
5437 * @chip: NAND chip object
5439 * This is the second phase of the normal nand_scan() function. It fills out
5440 * all the uninitialized function pointers with the defaults and scans for a
5441 * bad block table if appropriate.
5443 static int nand_scan_tail(struct nand_chip *chip)
5445 struct mtd_info *mtd = nand_to_mtd(chip);
5446 struct nand_ecc_ctrl *ecc = &chip->ecc;
5449 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
5450 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5451 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5455 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5456 if (!chip->data_buf)
5460 * FIXME: some NAND manufacturer drivers expect the first die to be
5461 * selected when manufacturer->init() is called. They should be fixed
5462 * to explictly select the relevant die when interacting with the NAND
5465 nand_select_target(chip, 0);
5466 ret = nand_manufacturer_init(chip);
5467 nand_deselect_target(chip);
5471 /* Set the internal oob buffer location, just after the page data */
5472 chip->oob_poi = chip->data_buf + mtd->writesize;
5475 * If no default placement scheme is given, select an appropriate one.
5477 if (!mtd->ooblayout &&
5478 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5479 switch (mtd->oobsize) {
5482 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5486 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5490 * Expose the whole OOB area to users if ECC_NONE
5491 * is passed. We could do that for all kind of
5492 * ->oobsize, but we must keep the old large/small
5493 * page with ECC layout when ->oobsize <= 128 for
5494 * compatibility reasons.
5496 if (ecc->mode == NAND_ECC_NONE) {
5497 mtd_set_ooblayout(mtd,
5498 &nand_ooblayout_lp_ops);
5502 WARN(1, "No oob scheme defined for oobsize %d\n",
5505 goto err_nand_manuf_cleanup;
5510 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5511 * selected and we have 256 byte pagesize fallback to software ECC
5514 switch (ecc->mode) {
5515 case NAND_ECC_HW_OOB_FIRST:
5516 /* Similar to NAND_ECC_HW, but a separate read_page handle */
5517 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5518 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5520 goto err_nand_manuf_cleanup;
5522 if (!ecc->read_page)
5523 ecc->read_page = nand_read_page_hwecc_oob_first;
5527 /* Use standard hwecc read page function? */
5528 if (!ecc->read_page)
5529 ecc->read_page = nand_read_page_hwecc;
5530 if (!ecc->write_page)
5531 ecc->write_page = nand_write_page_hwecc;
5532 if (!ecc->read_page_raw)
5533 ecc->read_page_raw = nand_read_page_raw;
5534 if (!ecc->write_page_raw)
5535 ecc->write_page_raw = nand_write_page_raw;
5537 ecc->read_oob = nand_read_oob_std;
5538 if (!ecc->write_oob)
5539 ecc->write_oob = nand_write_oob_std;
5540 if (!ecc->read_subpage)
5541 ecc->read_subpage = nand_read_subpage;
5542 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5543 ecc->write_subpage = nand_write_subpage_hwecc;
5546 case NAND_ECC_HW_SYNDROME:
5547 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5549 ecc->read_page == nand_read_page_hwecc ||
5551 ecc->write_page == nand_write_page_hwecc)) {
5552 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5554 goto err_nand_manuf_cleanup;
5556 /* Use standard syndrome read/write page function? */
5557 if (!ecc->read_page)
5558 ecc->read_page = nand_read_page_syndrome;
5559 if (!ecc->write_page)
5560 ecc->write_page = nand_write_page_syndrome;
5561 if (!ecc->read_page_raw)
5562 ecc->read_page_raw = nand_read_page_raw_syndrome;
5563 if (!ecc->write_page_raw)
5564 ecc->write_page_raw = nand_write_page_raw_syndrome;
5566 ecc->read_oob = nand_read_oob_syndrome;
5567 if (!ecc->write_oob)
5568 ecc->write_oob = nand_write_oob_syndrome;
5570 if (mtd->writesize >= ecc->size) {
5571 if (!ecc->strength) {
5572 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5574 goto err_nand_manuf_cleanup;
5578 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5579 ecc->size, mtd->writesize);
5580 ecc->mode = NAND_ECC_SOFT;
5581 ecc->algo = NAND_ECC_HAMMING;
5585 ret = nand_set_ecc_soft_ops(chip);
5588 goto err_nand_manuf_cleanup;
5592 case NAND_ECC_ON_DIE:
5593 if (!ecc->read_page || !ecc->write_page) {
5594 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5596 goto err_nand_manuf_cleanup;
5599 ecc->read_oob = nand_read_oob_std;
5600 if (!ecc->write_oob)
5601 ecc->write_oob = nand_write_oob_std;
5605 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5606 ecc->read_page = nand_read_page_raw;
5607 ecc->write_page = nand_write_page_raw;
5608 ecc->read_oob = nand_read_oob_std;
5609 ecc->read_page_raw = nand_read_page_raw;
5610 ecc->write_page_raw = nand_write_page_raw;
5611 ecc->write_oob = nand_write_oob_std;
5612 ecc->size = mtd->writesize;
5618 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5620 goto err_nand_manuf_cleanup;
5623 if (ecc->correct || ecc->calculate) {
5624 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5625 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5626 if (!ecc->calc_buf || !ecc->code_buf) {
5628 goto err_nand_manuf_cleanup;
5632 /* For many systems, the standard OOB write also works for raw */
5633 if (!ecc->read_oob_raw)
5634 ecc->read_oob_raw = ecc->read_oob;
5635 if (!ecc->write_oob_raw)
5636 ecc->write_oob_raw = ecc->write_oob;
5638 /* propagate ecc info to mtd_info */
5639 mtd->ecc_strength = ecc->strength;
5640 mtd->ecc_step_size = ecc->size;
5643 * Set the number of read / write steps for one page depending on ECC
5646 ecc->steps = mtd->writesize / ecc->size;
5647 if (ecc->steps * ecc->size != mtd->writesize) {
5648 WARN(1, "Invalid ECC parameters\n");
5650 goto err_nand_manuf_cleanup;
5652 ecc->total = ecc->steps * ecc->bytes;
5653 if (ecc->total > mtd->oobsize) {
5654 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5656 goto err_nand_manuf_cleanup;
5660 * The number of bytes available for a client to place data into
5661 * the out of band area.
5663 ret = mtd_ooblayout_count_freebytes(mtd);
5667 mtd->oobavail = ret;
5669 /* ECC sanity check: warn if it's too weak */
5670 if (!nand_ecc_strength_good(chip))
5671 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5674 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5675 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5676 switch (ecc->steps) {
5678 mtd->subpage_sft = 1;
5683 mtd->subpage_sft = 2;
5687 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5689 /* Invalidate the pagebuffer reference */
5692 /* Large page NAND with SOFT_ECC should support subpage reads */
5693 switch (ecc->mode) {
5695 if (chip->page_shift > 9)
5696 chip->options |= NAND_SUBPAGE_READ;
5703 /* Fill in remaining MTD driver data */
5704 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5705 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5707 mtd->_erase = nand_erase;
5709 mtd->_unpoint = NULL;
5710 mtd->_panic_write = panic_nand_write;
5711 mtd->_read_oob = nand_read_oob;
5712 mtd->_write_oob = nand_write_oob;
5713 mtd->_sync = nand_sync;
5715 mtd->_unlock = NULL;
5716 mtd->_suspend = nand_suspend;
5717 mtd->_resume = nand_resume;
5718 mtd->_reboot = nand_shutdown;
5719 mtd->_block_isreserved = nand_block_isreserved;
5720 mtd->_block_isbad = nand_block_isbad;
5721 mtd->_block_markbad = nand_block_markbad;
5722 mtd->_max_bad_blocks = nand_max_bad_blocks;
5723 mtd->writebufsize = mtd->writesize;
5726 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5727 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5730 if (!mtd->bitflip_threshold)
5731 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5733 /* Initialize the ->data_interface field. */
5734 ret = nand_init_data_interface(chip);
5736 goto err_nand_manuf_cleanup;
5738 /* Enter fastest possible mode on all dies. */
5739 for (i = 0; i < chip->numchips; i++) {
5740 ret = nand_setup_data_interface(chip, i);
5742 goto err_nand_manuf_cleanup;
5745 /* Check, if we should skip the bad block table scan */
5746 if (chip->options & NAND_SKIP_BBTSCAN)
5749 /* Build bad block table */
5750 ret = nand_create_bbt(chip);
5752 goto err_nand_manuf_cleanup;
5757 err_nand_manuf_cleanup:
5758 nand_manufacturer_cleanup(chip);
5761 kfree(chip->data_buf);
5762 kfree(ecc->code_buf);
5763 kfree(ecc->calc_buf);
5768 static int nand_attach(struct nand_chip *chip)
5770 if (chip->controller->ops && chip->controller->ops->attach_chip)
5771 return chip->controller->ops->attach_chip(chip);
5776 static void nand_detach(struct nand_chip *chip)
5778 if (chip->controller->ops && chip->controller->ops->detach_chip)
5779 chip->controller->ops->detach_chip(chip);
5783 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5784 * @chip: NAND chip object
5785 * @maxchips: number of chips to scan for.
5786 * @ids: optional flash IDs table
5788 * This fills out all the uninitialized function pointers with the defaults.
5789 * The flash ID is read and the mtd/chip structures are filled with the
5790 * appropriate values.
5792 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5793 struct nand_flash_dev *ids)
5800 ret = nand_scan_ident(chip, maxchips, ids);
5804 ret = nand_attach(chip);
5808 ret = nand_scan_tail(chip);
5817 nand_scan_ident_cleanup(chip);
5821 EXPORT_SYMBOL(nand_scan_with_ids);
5824 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5825 * @chip: NAND chip object
5827 void nand_cleanup(struct nand_chip *chip)
5829 if (chip->ecc.mode == NAND_ECC_SOFT &&
5830 chip->ecc.algo == NAND_ECC_BCH)
5831 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5833 /* Free bad block table memory */
5835 kfree(chip->data_buf);
5836 kfree(chip->ecc.code_buf);
5837 kfree(chip->ecc.calc_buf);
5839 /* Free bad block descriptor memory */
5840 if (chip->badblock_pattern && chip->badblock_pattern->options
5841 & NAND_BBT_DYNAMICSTRUCT)
5842 kfree(chip->badblock_pattern);
5844 /* Free manufacturer priv data. */
5845 nand_manufacturer_cleanup(chip);
5847 /* Free controller specific allocations after chip identification */
5850 /* Free identification phase allocations */
5851 nand_scan_ident_cleanup(chip);
5854 EXPORT_SYMBOL_GPL(nand_cleanup);
5857 * nand_release - [NAND Interface] Unregister the MTD device and free resources
5858 * held by the NAND device
5859 * @chip: NAND chip object
5861 void nand_release(struct nand_chip *chip)
5863 mtd_device_unregister(nand_to_mtd(chip));
5866 EXPORT_SYMBOL_GPL(nand_release);
5868 MODULE_LICENSE("GPL");
5869 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5870 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5871 MODULE_DESCRIPTION("Generic NAND flash driver code");