1 // SPDX-License-Identifier: GPL-2.0-only
4 * This is the generic MTD driver for NAND flash devices. It should be
5 * capable of working with almost all NAND chips currently available.
7 * Additional technical information is available on
8 * http://www.linux-mtd.infradead.org/doc/nand.html
10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
14 * David Woodhouse for adding multichip support
16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17 * rework for 2K page size chips
20 * Enable cached programming for 2k page size chips
21 * Check, if mtd->ecctype should be set to MTD_ECC_HW
22 * if we have HW ECC support.
23 * BBT table is not serialized, has to be fixed
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/nand-ecc-sw-hamming.h>
39 #include <linux/mtd/nand-ecc-sw-bch.h>
40 #include <linux/interrupt.h>
41 #include <linux/bitops.h>
43 #include <linux/mtd/partitions.h>
45 #include <linux/of_gpio.h>
46 #include <linux/gpio/consumer.h>
48 #include "internals.h"
50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
51 struct mtd_pairing_info *info)
53 int lastpage = (mtd->erasesize / mtd->writesize) - 1;
59 if (!page || (page & 1)) {
61 info->pair = (page + 1) / 2;
64 info->pair = (page + 1 - dist) / 2;
70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
71 const struct mtd_pairing_info *info)
73 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
74 int page = info->pair * 2;
77 if (!info->group && !info->pair)
80 if (info->pair == lastpair && info->group)
88 if (page >= mtd->erasesize / mtd->writesize)
94 const struct mtd_pairing_scheme dist3_pairing_scheme = {
96 .get_info = nand_pairing_dist3_get_info,
97 .get_wunit = nand_pairing_dist3_get_wunit,
100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
104 /* Start address must align on block boundary */
105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
106 pr_debug("%s: unaligned address\n", __func__);
110 /* Length must align on block boundary */
111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
112 pr_debug("%s: length not block aligned\n", __func__);
120 * nand_extract_bits - Copy unaligned bits from one buffer to another one
121 * @dst: destination buffer
122 * @dst_off: bit offset at which the writing starts
123 * @src: source buffer
124 * @src_off: bit offset at which the reading starts
125 * @nbits: number of bits to copy from @src to @dst
127 * Copy bits from one memory region to another (overlap authorized).
129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
130 unsigned int src_off, unsigned int nbits)
140 n = min3(8 - dst_off, 8 - src_off, nbits);
142 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
144 *dst |= tmp << dst_off;
161 EXPORT_SYMBOL_GPL(nand_extract_bits);
164 * nand_select_target() - Select a NAND target (A.K.A. die)
165 * @chip: NAND chip object
166 * @cs: the CS line to select. Note that this CS id is always from the chip
167 * PoV, not the controller one
169 * Select a NAND target so that further operations executed on @chip go to the
170 * selected NAND target.
172 void nand_select_target(struct nand_chip *chip, unsigned int cs)
175 * cs should always lie between 0 and nanddev_ntargets(), when that's
176 * not the case it's a bug and the caller should be fixed.
178 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
183 if (chip->legacy.select_chip)
184 chip->legacy.select_chip(chip, cs);
186 EXPORT_SYMBOL_GPL(nand_select_target);
189 * nand_deselect_target() - Deselect the currently selected target
190 * @chip: NAND chip object
192 * Deselect the currently selected NAND target. The result of operations
193 * executed on @chip after the target has been deselected is undefined.
195 void nand_deselect_target(struct nand_chip *chip)
197 if (chip->legacy.select_chip)
198 chip->legacy.select_chip(chip, -1);
202 EXPORT_SYMBOL_GPL(nand_deselect_target);
205 * nand_release_device - [GENERIC] release chip
206 * @chip: NAND chip object
208 * Release chip lock and wake up anyone waiting on the device.
210 static void nand_release_device(struct nand_chip *chip)
212 /* Release the controller and the chip */
213 mutex_unlock(&chip->controller->lock);
214 mutex_unlock(&chip->lock);
218 * nand_bbm_get_next_page - Get the next page for bad block markers
219 * @chip: NAND chip object
220 * @page: First page to start checking for bad block marker usage
222 * Returns an integer that corresponds to the page offset within a block, for
223 * a page that is used to store bad block markers. If no more pages are
224 * available, -EINVAL is returned.
226 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
228 struct mtd_info *mtd = nand_to_mtd(chip);
229 int last_page = ((mtd->erasesize - mtd->writesize) >>
230 chip->page_shift) & chip->pagemask;
231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
234 if (page == 0 && !(chip->options & bbm_flags))
236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
247 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
248 * @chip: NAND chip object
249 * @ofs: offset from device start
251 * Check, if the block is bad.
253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
255 int first_page, page_offset;
259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
260 page_offset = nand_bbm_get_next_page(chip, 0);
262 while (page_offset >= 0) {
263 res = chip->ecc.read_oob(chip, first_page + page_offset);
267 bad = chip->oob_poi[chip->badblockpos];
269 if (likely(chip->badblockbits == 8))
272 res = hweight8(bad) < chip->badblockbits;
276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
283 * nand_region_is_secured() - Check if the region is secured
284 * @chip: NAND chip object
285 * @offset: Offset of the region to check
286 * @size: Size of the region to check
288 * Checks if the region is secured by comparing the offset and size with the
289 * list of secure regions obtained from DT. Returns true if the region is
290 * secured else false.
292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
296 /* Skip touching the secure regions if present */
297 for (i = 0; i < chip->nr_secure_regions; i++) {
298 const struct nand_secure_region *region = &chip->secure_regions[i];
300 if (offset + size <= region->offset ||
301 offset >= region->offset + region->size)
304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
305 __func__, offset, offset + size);
313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
315 struct mtd_info *mtd = nand_to_mtd(chip);
317 if (chip->options & NAND_NO_BBM_QUIRK)
320 /* Check if the region is secured */
321 if (nand_region_is_secured(chip, ofs, mtd->erasesize))
324 if (mtd_check_expert_analysis_mode())
327 if (chip->legacy.block_bad)
328 return chip->legacy.block_bad(chip, ofs);
330 return nand_block_bad(chip, ofs);
334 * nand_get_device - [GENERIC] Get chip for selected access
335 * @chip: NAND chip structure
337 * Lock the device and its controller for exclusive access
339 * Return: -EBUSY if the chip has been suspended, 0 otherwise
341 static void nand_get_device(struct nand_chip *chip)
343 /* Wait until the device is resumed. */
345 mutex_lock(&chip->lock);
346 if (!chip->suspended) {
347 mutex_lock(&chip->controller->lock);
350 mutex_unlock(&chip->lock);
352 wait_event(chip->resume_wq, !chip->suspended);
357 * nand_check_wp - [GENERIC] check if the chip is write protected
358 * @chip: NAND chip object
360 * Check, if the device is write protected. The function expects, that the
361 * device is already selected.
363 static int nand_check_wp(struct nand_chip *chip)
368 /* Broken xD cards report WP despite being writable */
369 if (chip->options & NAND_BROKEN_XD)
372 /* Check the WP bit */
373 ret = nand_status_op(chip, &status);
377 return status & NAND_STATUS_WP ? 0 : 1;
381 * nand_fill_oob - [INTERN] Transfer client buffer to oob
382 * @chip: NAND chip object
383 * @oob: oob data buffer
384 * @len: oob data write length
385 * @ops: oob ops structure
387 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
388 struct mtd_oob_ops *ops)
390 struct mtd_info *mtd = nand_to_mtd(chip);
394 * Initialise to all 0xFF, to avoid the possibility of left over OOB
395 * data from a previous OOB read.
397 memset(chip->oob_poi, 0xff, mtd->oobsize);
401 case MTD_OPS_PLACE_OOB:
403 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
406 case MTD_OPS_AUTO_OOB:
407 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
419 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
420 * @chip: NAND chip object
421 * @to: offset to write to
422 * @ops: oob operation description structure
424 * NAND write out-of-band.
426 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
427 struct mtd_oob_ops *ops)
429 struct mtd_info *mtd = nand_to_mtd(chip);
430 int chipnr, page, status, len, ret;
432 pr_debug("%s: to = 0x%08x, len = %i\n",
433 __func__, (unsigned int)to, (int)ops->ooblen);
435 len = mtd_oobavail(mtd, ops);
437 /* Do not allow write past end of page */
438 if ((ops->ooboffs + ops->ooblen) > len) {
439 pr_debug("%s: attempt to write past end of page\n",
444 /* Check if the region is secured */
445 if (nand_region_is_secured(chip, to, ops->ooblen))
448 chipnr = (int)(to >> chip->chip_shift);
451 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
452 * of my DiskOnChip 2000 test units) will clear the whole data page too
453 * if we don't do this. I have no clue why, but I seem to have 'fixed'
454 * it in the doc2000 driver in August 1999. dwmw2.
456 ret = nand_reset(chip, chipnr);
460 nand_select_target(chip, chipnr);
462 /* Shift to get page */
463 page = (int)(to >> chip->page_shift);
465 /* Check, if it is write protected */
466 if (nand_check_wp(chip)) {
467 nand_deselect_target(chip);
471 /* Invalidate the page cache, if we write to the cached page */
472 if (page == chip->pagecache.page)
473 chip->pagecache.page = -1;
475 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
477 if (ops->mode == MTD_OPS_RAW)
478 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
480 status = chip->ecc.write_oob(chip, page & chip->pagemask);
482 nand_deselect_target(chip);
487 ops->oobretlen = ops->ooblen;
493 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
494 * @chip: NAND chip object
495 * @ofs: offset from device start
497 * This is the default implementation, which can be overridden by a hardware
498 * specific driver. It provides the details for writing a bad block marker to a
501 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
503 struct mtd_info *mtd = nand_to_mtd(chip);
504 struct mtd_oob_ops ops;
505 uint8_t buf[2] = { 0, 0 };
506 int ret = 0, res, page_offset;
508 memset(&ops, 0, sizeof(ops));
510 ops.ooboffs = chip->badblockpos;
511 if (chip->options & NAND_BUSWIDTH_16) {
512 ops.ooboffs &= ~0x01;
513 ops.len = ops.ooblen = 2;
515 ops.len = ops.ooblen = 1;
517 ops.mode = MTD_OPS_PLACE_OOB;
519 page_offset = nand_bbm_get_next_page(chip, 0);
521 while (page_offset >= 0) {
522 res = nand_do_write_oob(chip,
523 ofs + (page_offset * mtd->writesize),
529 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
536 * nand_markbad_bbm - mark a block by updating the BBM
537 * @chip: NAND chip object
538 * @ofs: offset of the block to mark bad
540 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
542 if (chip->legacy.block_markbad)
543 return chip->legacy.block_markbad(chip, ofs);
545 return nand_default_block_markbad(chip, ofs);
549 * nand_block_markbad_lowlevel - mark a block bad
550 * @chip: NAND chip object
551 * @ofs: offset from device start
553 * This function performs the generic NAND bad block marking steps (i.e., bad
554 * block table(s) and/or marker(s)). We only allow the hardware driver to
555 * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
557 * We try operations in the following order:
559 * (1) erase the affected block, to allow OOB marker to be written cleanly
560 * (2) write bad block marker to OOB area of affected block (unless flag
561 * NAND_BBT_NO_OOB_BBM is present)
564 * Note that we retain the first error encountered in (2) or (3), finish the
565 * procedures, and dump the error in the end.
567 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
569 struct mtd_info *mtd = nand_to_mtd(chip);
572 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
573 struct erase_info einfo;
575 /* Attempt erase before marking OOB */
576 memset(&einfo, 0, sizeof(einfo));
578 einfo.len = 1ULL << chip->phys_erase_shift;
579 nand_erase_nand(chip, &einfo, 0);
581 /* Write bad block marker to OOB */
582 nand_get_device(chip);
584 ret = nand_markbad_bbm(chip, ofs);
585 nand_release_device(chip);
588 /* Mark block bad in BBT */
590 res = nand_markbad_bbt(chip, ofs);
596 mtd->ecc_stats.badblocks++;
602 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
603 * @mtd: MTD device structure
604 * @ofs: offset from device start
606 * Check if the block is marked as reserved.
608 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
610 struct nand_chip *chip = mtd_to_nand(mtd);
614 /* Return info from the table */
615 return nand_isreserved_bbt(chip, ofs);
619 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
620 * @chip: NAND chip object
621 * @ofs: offset from device start
622 * @allowbbt: 1, if its allowed to access the bbt area
624 * Check, if the block is bad. Either by reading the bad block table or
625 * calling of the scan function.
627 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
629 /* Return info from the table */
631 return nand_isbad_bbt(chip, ofs, allowbbt);
633 return nand_isbad_bbm(chip, ofs);
637 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
638 * @chip: NAND chip structure
639 * @timeout_ms: Timeout in ms
641 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
642 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
645 * This helper is intended to be used when the controller does not have access
646 * to the NAND R/B pin.
648 * Be aware that calling this helper from an ->exec_op() implementation means
649 * ->exec_op() must be re-entrant.
651 * Return 0 if the NAND chip is ready, a negative error otherwise.
653 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
655 const struct nand_interface_config *conf;
659 if (!nand_has_exec_op(chip))
662 /* Wait tWB before polling the STATUS reg. */
663 conf = nand_get_interface_config(chip);
664 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
666 ret = nand_status_op(chip, NULL);
671 * +1 below is necessary because if we are now in the last fraction
672 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
673 * small jiffy fraction - possibly leading to false timeout
675 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
677 ret = nand_read_data_op(chip, &status, sizeof(status), true,
682 if (status & NAND_STATUS_READY)
686 * Typical lowest execution time for a tR on most NANDs is 10us,
687 * use this as polling delay before doing something smarter (ie.
688 * deriving a delay from the timeout value, timeout_ms/ratio).
691 } while (time_before(jiffies, timeout_ms));
694 * We have to exit READ_STATUS mode in order to read real data on the
695 * bus in case the WAITRDY instruction is preceding a DATA_IN
698 nand_exit_status_op(chip);
703 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
705 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
708 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
709 * @chip: NAND chip structure
710 * @gpiod: GPIO descriptor of R/B pin
711 * @timeout_ms: Timeout in ms
713 * Poll the R/B GPIO pin until it becomes ready. If that does not happen
714 * whitin the specified timeout, -ETIMEDOUT is returned.
716 * This helper is intended to be used when the controller has access to the
717 * NAND R/B pin over GPIO.
719 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
721 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
722 unsigned long timeout_ms)
726 * Wait until R/B pin indicates chip is ready or timeout occurs.
727 * +1 below is necessary because if we are now in the last fraction
728 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
729 * small jiffy fraction - possibly leading to false timeout.
731 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
733 if (gpiod_get_value_cansleep(gpiod))
737 } while (time_before(jiffies, timeout_ms));
739 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
741 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
744 * panic_nand_wait - [GENERIC] wait until the command is done
745 * @chip: NAND chip structure
748 * Wait for command done. This is a helper function for nand_wait used when
749 * we are in interrupt context. May happen when in panic and trying to write
750 * an oops through mtdoops.
752 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
755 for (i = 0; i < timeo; i++) {
756 if (chip->legacy.dev_ready) {
757 if (chip->legacy.dev_ready(chip))
763 ret = nand_read_data_op(chip, &status, sizeof(status),
768 if (status & NAND_STATUS_READY)
775 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
777 return (chip->parameters.supports_set_get_features &&
778 test_bit(addr, chip->parameters.get_feature_list));
781 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
783 return (chip->parameters.supports_set_get_features &&
784 test_bit(addr, chip->parameters.set_feature_list));
788 * nand_reset_interface - Reset data interface and timings
789 * @chip: The NAND chip
790 * @chipnr: Internal die id
792 * Reset the Data interface and timings to ONFI mode 0.
794 * Returns 0 for success or negative error code otherwise.
796 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
798 const struct nand_controller_ops *ops = chip->controller->ops;
801 if (!nand_controller_can_setup_interface(chip))
805 * The ONFI specification says:
807 * To transition from NV-DDR or NV-DDR2 to the SDR data
808 * interface, the host shall use the Reset (FFh) command
809 * using SDR timing mode 0. A device in any timing mode is
810 * required to recognize Reset (FFh) command issued in SDR
814 * Configure the data interface in SDR mode and set the
815 * timings to timing mode 0.
818 chip->current_interface_config = nand_get_reset_interface_config();
819 ret = ops->setup_interface(chip, chipnr,
820 chip->current_interface_config);
822 pr_err("Failed to configure data interface to SDR timing mode 0\n");
828 * nand_setup_interface - Setup the best data interface and timings
829 * @chip: The NAND chip
830 * @chipnr: Internal die id
832 * Configure what has been reported to be the best data interface and NAND
833 * timings supported by the chip and the driver.
835 * Returns 0 for success or negative error code otherwise.
837 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
839 const struct nand_controller_ops *ops = chip->controller->ops;
840 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
843 if (!nand_controller_can_setup_interface(chip))
847 * A nand_reset_interface() put both the NAND chip and the NAND
848 * controller in timings mode 0. If the default mode for this chip is
849 * also 0, no need to proceed to the change again. Plus, at probe time,
850 * nand_setup_interface() uses ->set/get_features() which would
851 * fail anyway as the parameter page is not available yet.
853 if (!chip->best_interface_config)
856 request = chip->best_interface_config->timings.mode;
857 if (nand_interface_is_sdr(chip->best_interface_config))
858 request |= ONFI_DATA_INTERFACE_SDR;
860 request |= ONFI_DATA_INTERFACE_NVDDR;
861 tmode_param[0] = request;
863 /* Change the mode on the chip side (if supported by the NAND chip) */
864 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
865 nand_select_target(chip, chipnr);
866 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
868 nand_deselect_target(chip);
873 /* Change the mode on the controller side */
874 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
878 /* Check the mode has been accepted by the chip, if supported */
879 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
880 goto update_interface_config;
882 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
883 nand_select_target(chip, chipnr);
884 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
886 nand_deselect_target(chip);
890 if (request != tmode_param[0]) {
891 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
892 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
893 chip->best_interface_config->timings.mode);
894 pr_debug("NAND chip would work in %s timing mode %d\n",
895 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
896 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
900 update_interface_config:
901 chip->current_interface_config = chip->best_interface_config;
907 * Fallback to mode 0 if the chip explicitly did not ack the chosen
910 nand_reset_interface(chip, chipnr);
911 nand_select_target(chip, chipnr);
913 nand_deselect_target(chip);
919 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
920 * NAND controller and the NAND chip support
921 * @chip: the NAND chip
922 * @iface: the interface configuration (can eventually be updated)
923 * @spec_timings: specific timings, when not fitting the ONFI specification
925 * If specific timings are provided, use them. Otherwise, retrieve supported
926 * timing modes from ONFI information.
928 int nand_choose_best_sdr_timings(struct nand_chip *chip,
929 struct nand_interface_config *iface,
930 struct nand_sdr_timings *spec_timings)
932 const struct nand_controller_ops *ops = chip->controller->ops;
933 int best_mode = 0, mode, ret = -EOPNOTSUPP;
935 iface->type = NAND_SDR_IFACE;
938 iface->timings.sdr = *spec_timings;
939 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
941 /* Verify the controller supports the requested interface */
942 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
945 chip->best_interface_config = iface;
949 /* Fallback to slower modes */
950 best_mode = iface->timings.mode;
951 } else if (chip->parameters.onfi) {
952 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
955 for (mode = best_mode; mode >= 0; mode--) {
956 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
958 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
961 chip->best_interface_config = iface;
970 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
971 * NAND controller and the NAND chip support
972 * @chip: the NAND chip
973 * @iface: the interface configuration (can eventually be updated)
974 * @spec_timings: specific timings, when not fitting the ONFI specification
976 * If specific timings are provided, use them. Otherwise, retrieve supported
977 * timing modes from ONFI information.
979 int nand_choose_best_nvddr_timings(struct nand_chip *chip,
980 struct nand_interface_config *iface,
981 struct nand_nvddr_timings *spec_timings)
983 const struct nand_controller_ops *ops = chip->controller->ops;
984 int best_mode = 0, mode, ret = -EOPNOTSUPP;
986 iface->type = NAND_NVDDR_IFACE;
989 iface->timings.nvddr = *spec_timings;
990 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
992 /* Verify the controller supports the requested interface */
993 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
996 chip->best_interface_config = iface;
1000 /* Fallback to slower modes */
1001 best_mode = iface->timings.mode;
1002 } else if (chip->parameters.onfi) {
1003 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
1006 for (mode = best_mode; mode >= 0; mode--) {
1007 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
1009 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1012 chip->best_interface_config = iface;
1021 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
1022 * NAND controller and the NAND chip support
1023 * @chip: the NAND chip
1024 * @iface: the interface configuration (can eventually be updated)
1026 * If specific timings are provided, use them. Otherwise, retrieve supported
1027 * timing modes from ONFI information.
1029 static int nand_choose_best_timings(struct nand_chip *chip,
1030 struct nand_interface_config *iface)
1034 /* Try the fastest timings: NV-DDR */
1035 ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
1039 /* Fallback to SDR timings otherwise */
1040 return nand_choose_best_sdr_timings(chip, iface, NULL);
1044 * nand_choose_interface_config - find the best data interface and timings
1045 * @chip: The NAND chip
1047 * Find the best data interface and NAND timings supported by the chip
1048 * and the driver. Eventually let the NAND manufacturer driver propose his own
1051 * After this function nand_chip->interface_config is initialized with the best
1052 * timing mode available.
1054 * Returns 0 for success or negative error code otherwise.
1056 static int nand_choose_interface_config(struct nand_chip *chip)
1058 struct nand_interface_config *iface;
1061 if (!nand_controller_can_setup_interface(chip))
1064 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
1068 if (chip->ops.choose_interface_config)
1069 ret = chip->ops.choose_interface_config(chip, iface);
1071 ret = nand_choose_best_timings(chip, iface);
1080 * nand_fill_column_cycles - fill the column cycles of an address
1081 * @chip: The NAND chip
1082 * @addrs: Array of address cycles to fill
1083 * @offset_in_page: The offset in the page
1085 * Fills the first or the first two bytes of the @addrs field depending
1086 * on the NAND bus width and the page size.
1088 * Returns the number of cycles needed to encode the column, or a negative
1089 * error code in case one of the arguments is invalid.
1091 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1092 unsigned int offset_in_page)
1094 struct mtd_info *mtd = nand_to_mtd(chip);
1096 /* Make sure the offset is less than the actual page size. */
1097 if (offset_in_page > mtd->writesize + mtd->oobsize)
1101 * On small page NANDs, there's a dedicated command to access the OOB
1102 * area, and the column address is relative to the start of the OOB
1103 * area, not the start of the page. Asjust the address accordingly.
1105 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1106 offset_in_page -= mtd->writesize;
1109 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1110 * wide, then it must be divided by 2.
1112 if (chip->options & NAND_BUSWIDTH_16) {
1113 if (WARN_ON(offset_in_page % 2))
1116 offset_in_page /= 2;
1119 addrs[0] = offset_in_page;
1122 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1125 if (mtd->writesize <= 512)
1128 addrs[1] = offset_in_page >> 8;
1133 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1134 unsigned int offset_in_page, void *buf,
1137 const struct nand_interface_config *conf =
1138 nand_get_interface_config(chip);
1139 struct mtd_info *mtd = nand_to_mtd(chip);
1141 struct nand_op_instr instrs[] = {
1142 NAND_OP_CMD(NAND_CMD_READ0, 0),
1143 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1144 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1145 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1146 NAND_OP_DATA_IN(len, buf, 0),
1148 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1151 /* Drop the DATA_IN instruction if len is set to 0. */
1155 if (offset_in_page >= mtd->writesize)
1156 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1157 else if (offset_in_page >= 256 &&
1158 !(chip->options & NAND_BUSWIDTH_16))
1159 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1161 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1166 addrs[2] = page >> 8;
1168 if (chip->options & NAND_ROW_ADDR_3) {
1169 addrs[3] = page >> 16;
1170 instrs[1].ctx.addr.naddrs++;
1173 return nand_exec_op(chip, &op);
1176 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1177 unsigned int offset_in_page, void *buf,
1180 const struct nand_interface_config *conf =
1181 nand_get_interface_config(chip);
1183 struct nand_op_instr instrs[] = {
1184 NAND_OP_CMD(NAND_CMD_READ0, 0),
1185 NAND_OP_ADDR(4, addrs, 0),
1186 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1187 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1188 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1189 NAND_OP_DATA_IN(len, buf, 0),
1191 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1194 /* Drop the DATA_IN instruction if len is set to 0. */
1198 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1203 addrs[3] = page >> 8;
1205 if (chip->options & NAND_ROW_ADDR_3) {
1206 addrs[4] = page >> 16;
1207 instrs[1].ctx.addr.naddrs++;
1210 return nand_exec_op(chip, &op);
1214 * nand_read_page_op - Do a READ PAGE operation
1215 * @chip: The NAND chip
1216 * @page: page to read
1217 * @offset_in_page: offset within the page
1218 * @buf: buffer used to store the data
1219 * @len: length of the buffer
1221 * This function issues a READ PAGE operation.
1222 * This function does not select/unselect the CS line.
1224 * Returns 0 on success, a negative error code otherwise.
1226 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1227 unsigned int offset_in_page, void *buf, unsigned int len)
1229 struct mtd_info *mtd = nand_to_mtd(chip);
1234 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1237 if (nand_has_exec_op(chip)) {
1238 if (mtd->writesize > 512)
1239 return nand_lp_exec_read_page_op(chip, page,
1240 offset_in_page, buf,
1243 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1247 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1249 chip->legacy.read_buf(chip, buf, len);
1253 EXPORT_SYMBOL_GPL(nand_read_page_op);
1256 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1257 * @chip: The NAND chip
1258 * @page: parameter page to read
1259 * @buf: buffer used to store the data
1260 * @len: length of the buffer
1262 * This function issues a READ PARAMETER PAGE operation.
1263 * This function does not select/unselect the CS line.
1265 * Returns 0 on success, a negative error code otherwise.
1267 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1276 if (nand_has_exec_op(chip)) {
1277 const struct nand_interface_config *conf =
1278 nand_get_interface_config(chip);
1279 struct nand_op_instr instrs[] = {
1280 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1281 NAND_OP_ADDR(1, &page,
1282 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1283 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1284 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1285 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1287 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1289 /* Drop the DATA_IN instruction if len is set to 0. */
1293 return nand_exec_op(chip, &op);
1296 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1297 for (i = 0; i < len; i++)
1298 p[i] = chip->legacy.read_byte(chip);
1304 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1305 * @chip: The NAND chip
1306 * @offset_in_page: offset within the page
1307 * @buf: buffer used to store the data
1308 * @len: length of the buffer
1309 * @force_8bit: force 8-bit bus access
1311 * This function issues a CHANGE READ COLUMN operation.
1312 * This function does not select/unselect the CS line.
1314 * Returns 0 on success, a negative error code otherwise.
1316 int nand_change_read_column_op(struct nand_chip *chip,
1317 unsigned int offset_in_page, void *buf,
1318 unsigned int len, bool force_8bit)
1320 struct mtd_info *mtd = nand_to_mtd(chip);
1325 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1328 /* Small page NANDs do not support column change. */
1329 if (mtd->writesize <= 512)
1332 if (nand_has_exec_op(chip)) {
1333 const struct nand_interface_config *conf =
1334 nand_get_interface_config(chip);
1336 struct nand_op_instr instrs[] = {
1337 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1338 NAND_OP_ADDR(2, addrs, 0),
1339 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1340 NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1341 NAND_OP_DATA_IN(len, buf, 0),
1343 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1346 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1350 /* Drop the DATA_IN instruction if len is set to 0. */
1354 instrs[3].ctx.data.force_8bit = force_8bit;
1356 return nand_exec_op(chip, &op);
1359 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1361 chip->legacy.read_buf(chip, buf, len);
1365 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1368 * nand_read_oob_op - Do a READ OOB operation
1369 * @chip: The NAND chip
1370 * @page: page to read
1371 * @offset_in_oob: offset within the OOB area
1372 * @buf: buffer used to store the data
1373 * @len: length of the buffer
1375 * This function issues a READ OOB operation.
1376 * This function does not select/unselect the CS line.
1378 * Returns 0 on success, a negative error code otherwise.
1380 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1381 unsigned int offset_in_oob, void *buf, unsigned int len)
1383 struct mtd_info *mtd = nand_to_mtd(chip);
1388 if (offset_in_oob + len > mtd->oobsize)
1391 if (nand_has_exec_op(chip))
1392 return nand_read_page_op(chip, page,
1393 mtd->writesize + offset_in_oob,
1396 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1398 chip->legacy.read_buf(chip, buf, len);
1402 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1404 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1405 unsigned int offset_in_page, const void *buf,
1406 unsigned int len, bool prog)
1408 const struct nand_interface_config *conf =
1409 nand_get_interface_config(chip);
1410 struct mtd_info *mtd = nand_to_mtd(chip);
1412 struct nand_op_instr instrs[] = {
1414 * The first instruction will be dropped if we're dealing
1415 * with a large page NAND and adjusted if we're dealing
1416 * with a small page NAND and the page offset is > 255.
1418 NAND_OP_CMD(NAND_CMD_READ0, 0),
1419 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1420 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
1421 NAND_OP_DATA_OUT(len, buf, 0),
1422 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1423 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1424 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
1426 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1427 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1432 addrs[naddrs++] = page;
1433 addrs[naddrs++] = page >> 8;
1434 if (chip->options & NAND_ROW_ADDR_3)
1435 addrs[naddrs++] = page >> 16;
1437 instrs[2].ctx.addr.naddrs = naddrs;
1439 /* Drop the last two instructions if we're not programming the page. */
1442 /* Also drop the DATA_OUT instruction if empty. */
1447 if (mtd->writesize <= 512) {
1449 * Small pages need some more tweaking: we have to adjust the
1450 * first instruction depending on the page offset we're trying
1453 if (offset_in_page >= mtd->writesize)
1454 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1455 else if (offset_in_page >= 256 &&
1456 !(chip->options & NAND_BUSWIDTH_16))
1457 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1460 * Drop the first command if we're dealing with a large page
1467 return nand_exec_op(chip, &op);
1471 * nand_prog_page_begin_op - starts a PROG PAGE operation
1472 * @chip: The NAND chip
1473 * @page: page to write
1474 * @offset_in_page: offset within the page
1475 * @buf: buffer containing the data to write to the page
1476 * @len: length of the buffer
1478 * This function issues the first half of a PROG PAGE operation.
1479 * This function does not select/unselect the CS line.
1481 * Returns 0 on success, a negative error code otherwise.
1483 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1484 unsigned int offset_in_page, const void *buf,
1487 struct mtd_info *mtd = nand_to_mtd(chip);
1492 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1495 if (nand_has_exec_op(chip))
1496 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1499 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1502 chip->legacy.write_buf(chip, buf, len);
1506 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1509 * nand_prog_page_end_op - ends a PROG PAGE operation
1510 * @chip: The NAND chip
1512 * This function issues the second half of a PROG PAGE operation.
1513 * This function does not select/unselect the CS line.
1515 * Returns 0 on success, a negative error code otherwise.
1517 int nand_prog_page_end_op(struct nand_chip *chip)
1522 if (nand_has_exec_op(chip)) {
1523 const struct nand_interface_config *conf =
1524 nand_get_interface_config(chip);
1525 struct nand_op_instr instrs[] = {
1526 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1527 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1528 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
1531 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1533 ret = nand_exec_op(chip, &op);
1537 ret = nand_status_op(chip, &status);
1541 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1542 ret = chip->legacy.waitfunc(chip);
1549 if (status & NAND_STATUS_FAIL)
1554 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1557 * nand_prog_page_op - Do a full PROG PAGE operation
1558 * @chip: The NAND chip
1559 * @page: page to write
1560 * @offset_in_page: offset within the page
1561 * @buf: buffer containing the data to write to the page
1562 * @len: length of the buffer
1564 * This function issues a full PROG PAGE operation.
1565 * This function does not select/unselect the CS line.
1567 * Returns 0 on success, a negative error code otherwise.
1569 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1570 unsigned int offset_in_page, const void *buf,
1573 struct mtd_info *mtd = nand_to_mtd(chip);
1580 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1583 if (nand_has_exec_op(chip)) {
1584 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1589 ret = nand_status_op(chip, &status);
1593 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1595 chip->legacy.write_buf(chip, buf, len);
1596 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1597 ret = chip->legacy.waitfunc(chip);
1604 if (status & NAND_STATUS_FAIL)
1609 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1612 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1613 * @chip: The NAND chip
1614 * @offset_in_page: offset within the page
1615 * @buf: buffer containing the data to send to the NAND
1616 * @len: length of the buffer
1617 * @force_8bit: force 8-bit bus access
1619 * This function issues a CHANGE WRITE COLUMN operation.
1620 * This function does not select/unselect the CS line.
1622 * Returns 0 on success, a negative error code otherwise.
1624 int nand_change_write_column_op(struct nand_chip *chip,
1625 unsigned int offset_in_page,
1626 const void *buf, unsigned int len,
1629 struct mtd_info *mtd = nand_to_mtd(chip);
1634 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1637 /* Small page NANDs do not support column change. */
1638 if (mtd->writesize <= 512)
1641 if (nand_has_exec_op(chip)) {
1642 const struct nand_interface_config *conf =
1643 nand_get_interface_config(chip);
1645 struct nand_op_instr instrs[] = {
1646 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1647 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1648 NAND_OP_DATA_OUT(len, buf, 0),
1650 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1653 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1657 instrs[2].ctx.data.force_8bit = force_8bit;
1659 /* Drop the DATA_OUT instruction if len is set to 0. */
1663 return nand_exec_op(chip, &op);
1666 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1668 chip->legacy.write_buf(chip, buf, len);
1672 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1675 * nand_readid_op - Do a READID operation
1676 * @chip: The NAND chip
1677 * @addr: address cycle to pass after the READID command
1678 * @buf: buffer used to store the ID
1679 * @len: length of the buffer
1681 * This function sends a READID command and reads back the ID returned by the
1683 * This function does not select/unselect the CS line.
1685 * Returns 0 on success, a negative error code otherwise.
1687 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1691 u8 *id = buf, *ddrbuf = NULL;
1696 if (nand_has_exec_op(chip)) {
1697 const struct nand_interface_config *conf =
1698 nand_get_interface_config(chip);
1699 struct nand_op_instr instrs[] = {
1700 NAND_OP_CMD(NAND_CMD_READID, 0),
1701 NAND_OP_ADDR(1, &addr,
1702 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1703 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1705 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1708 /* READ_ID data bytes are received twice in NV-DDR mode */
1709 if (len && nand_interface_is_nvddr(conf)) {
1710 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
1714 instrs[2].ctx.data.len *= 2;
1715 instrs[2].ctx.data.buf.in = ddrbuf;
1718 /* Drop the DATA_IN instruction if len is set to 0. */
1722 ret = nand_exec_op(chip, &op);
1723 if (!ret && len && nand_interface_is_nvddr(conf)) {
1724 for (i = 0; i < len; i++)
1725 id[i] = ddrbuf[i * 2];
1733 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1735 for (i = 0; i < len; i++)
1736 id[i] = chip->legacy.read_byte(chip);
1740 EXPORT_SYMBOL_GPL(nand_readid_op);
1743 * nand_status_op - Do a STATUS operation
1744 * @chip: The NAND chip
1745 * @status: out variable to store the NAND status
1747 * This function sends a STATUS command and reads back the status returned by
1749 * This function does not select/unselect the CS line.
1751 * Returns 0 on success, a negative error code otherwise.
1753 int nand_status_op(struct nand_chip *chip, u8 *status)
1755 if (nand_has_exec_op(chip)) {
1756 const struct nand_interface_config *conf =
1757 nand_get_interface_config(chip);
1759 struct nand_op_instr instrs[] = {
1760 NAND_OP_CMD(NAND_CMD_STATUS,
1761 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1762 NAND_OP_8BIT_DATA_IN(1, status, 0),
1764 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1767 /* The status data byte will be received twice in NV-DDR mode */
1768 if (status && nand_interface_is_nvddr(conf)) {
1769 instrs[1].ctx.data.len *= 2;
1770 instrs[1].ctx.data.buf.in = ddrstatus;
1776 ret = nand_exec_op(chip, &op);
1777 if (!ret && status && nand_interface_is_nvddr(conf))
1778 *status = ddrstatus[0];
1783 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1785 *status = chip->legacy.read_byte(chip);
1789 EXPORT_SYMBOL_GPL(nand_status_op);
1792 * nand_exit_status_op - Exit a STATUS operation
1793 * @chip: The NAND chip
1795 * This function sends a READ0 command to cancel the effect of the STATUS
1796 * command to avoid reading only the status until a new read command is sent.
1798 * This function does not select/unselect the CS line.
1800 * Returns 0 on success, a negative error code otherwise.
1802 int nand_exit_status_op(struct nand_chip *chip)
1804 if (nand_has_exec_op(chip)) {
1805 struct nand_op_instr instrs[] = {
1806 NAND_OP_CMD(NAND_CMD_READ0, 0),
1808 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1810 return nand_exec_op(chip, &op);
1813 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1819 * nand_erase_op - Do an erase operation
1820 * @chip: The NAND chip
1821 * @eraseblock: block to erase
1823 * This function sends an ERASE command and waits for the NAND to be ready
1825 * This function does not select/unselect the CS line.
1827 * Returns 0 on success, a negative error code otherwise.
1829 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1831 unsigned int page = eraseblock <<
1832 (chip->phys_erase_shift - chip->page_shift);
1836 if (nand_has_exec_op(chip)) {
1837 const struct nand_interface_config *conf =
1838 nand_get_interface_config(chip);
1839 u8 addrs[3] = { page, page >> 8, page >> 16 };
1840 struct nand_op_instr instrs[] = {
1841 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1842 NAND_OP_ADDR(2, addrs, 0),
1843 NAND_OP_CMD(NAND_CMD_ERASE2,
1844 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1845 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
1848 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1850 if (chip->options & NAND_ROW_ADDR_3)
1851 instrs[1].ctx.addr.naddrs++;
1853 ret = nand_exec_op(chip, &op);
1857 ret = nand_status_op(chip, &status);
1861 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1862 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1864 ret = chip->legacy.waitfunc(chip);
1871 if (status & NAND_STATUS_FAIL)
1876 EXPORT_SYMBOL_GPL(nand_erase_op);
1879 * nand_set_features_op - Do a SET FEATURES operation
1880 * @chip: The NAND chip
1881 * @feature: feature id
1882 * @data: 4 bytes of data
1884 * This function sends a SET FEATURES command and waits for the NAND to be
1885 * ready before returning.
1886 * This function does not select/unselect the CS line.
1888 * Returns 0 on success, a negative error code otherwise.
1890 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1893 const u8 *params = data;
1896 if (nand_has_exec_op(chip)) {
1897 const struct nand_interface_config *conf =
1898 nand_get_interface_config(chip);
1899 struct nand_op_instr instrs[] = {
1900 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1901 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
1903 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1904 NAND_COMMON_TIMING_NS(conf,
1906 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1909 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1911 return nand_exec_op(chip, &op);
1914 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1915 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1916 chip->legacy.write_byte(chip, params[i]);
1918 ret = chip->legacy.waitfunc(chip);
1922 if (ret & NAND_STATUS_FAIL)
1929 * nand_get_features_op - Do a GET FEATURES operation
1930 * @chip: The NAND chip
1931 * @feature: feature id
1932 * @data: 4 bytes of data
1934 * This function sends a GET FEATURES command and waits for the NAND to be
1935 * ready before returning.
1936 * This function does not select/unselect the CS line.
1938 * Returns 0 on success, a negative error code otherwise.
1940 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1943 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
1946 if (nand_has_exec_op(chip)) {
1947 const struct nand_interface_config *conf =
1948 nand_get_interface_config(chip);
1949 struct nand_op_instr instrs[] = {
1950 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1951 NAND_OP_ADDR(1, &feature,
1952 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1953 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1954 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1955 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1958 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1961 /* GET_FEATURE data bytes are received twice in NV-DDR mode */
1962 if (nand_interface_is_nvddr(conf)) {
1963 instrs[3].ctx.data.len *= 2;
1964 instrs[3].ctx.data.buf.in = ddrbuf;
1967 ret = nand_exec_op(chip, &op);
1968 if (nand_interface_is_nvddr(conf)) {
1969 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
1970 params[i] = ddrbuf[i * 2];
1976 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1977 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1978 params[i] = chip->legacy.read_byte(chip);
1983 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1984 unsigned int delay_ns)
1986 if (nand_has_exec_op(chip)) {
1987 struct nand_op_instr instrs[] = {
1988 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1989 PSEC_TO_NSEC(delay_ns)),
1991 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1993 return nand_exec_op(chip, &op);
1996 /* Apply delay or wait for ready/busy pin */
1997 if (!chip->legacy.dev_ready)
1998 udelay(chip->legacy.chip_delay);
2000 nand_wait_ready(chip);
2006 * nand_reset_op - Do a reset operation
2007 * @chip: The NAND chip
2009 * This function sends a RESET command and waits for the NAND to be ready
2011 * This function does not select/unselect the CS line.
2013 * Returns 0 on success, a negative error code otherwise.
2015 int nand_reset_op(struct nand_chip *chip)
2017 if (nand_has_exec_op(chip)) {
2018 const struct nand_interface_config *conf =
2019 nand_get_interface_config(chip);
2020 struct nand_op_instr instrs[] = {
2021 NAND_OP_CMD(NAND_CMD_RESET,
2022 NAND_COMMON_TIMING_NS(conf, tWB_max)),
2023 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
2026 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2028 return nand_exec_op(chip, &op);
2031 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
2035 EXPORT_SYMBOL_GPL(nand_reset_op);
2038 * nand_read_data_op - Read data from the NAND
2039 * @chip: The NAND chip
2040 * @buf: buffer used to store the data
2041 * @len: length of the buffer
2042 * @force_8bit: force 8-bit bus access
2043 * @check_only: do not actually run the command, only checks if the
2044 * controller driver supports it
2046 * This function does a raw data read on the bus. Usually used after launching
2047 * another NAND operation like nand_read_page_op().
2048 * This function does not select/unselect the CS line.
2050 * Returns 0 on success, a negative error code otherwise.
2052 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2053 bool force_8bit, bool check_only)
2058 if (nand_has_exec_op(chip)) {
2059 const struct nand_interface_config *conf =
2060 nand_get_interface_config(chip);
2061 struct nand_op_instr instrs[] = {
2062 NAND_OP_DATA_IN(len, buf, 0),
2064 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2068 instrs[0].ctx.data.force_8bit = force_8bit;
2071 * Parameter payloads (ID, status, features, etc) do not go
2072 * through the same pipeline as regular data, hence the
2073 * force_8bit flag must be set and this also indicates that in
2074 * case NV-DDR timings are being used the data will be received
2077 if (force_8bit && nand_interface_is_nvddr(conf)) {
2078 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
2082 instrs[0].ctx.data.len *= 2;
2083 instrs[0].ctx.data.buf.in = ddrbuf;
2087 ret = nand_check_op(chip, &op);
2092 ret = nand_exec_op(chip, &op);
2093 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
2096 for (i = 0; i < len; i++)
2097 dst[i] = ddrbuf[i * 2];
2112 for (i = 0; i < len; i++)
2113 p[i] = chip->legacy.read_byte(chip);
2115 chip->legacy.read_buf(chip, buf, len);
2120 EXPORT_SYMBOL_GPL(nand_read_data_op);
2123 * nand_write_data_op - Write data from the NAND
2124 * @chip: The NAND chip
2125 * @buf: buffer containing the data to send on the bus
2126 * @len: length of the buffer
2127 * @force_8bit: force 8-bit bus access
2129 * This function does a raw data write on the bus. Usually used after launching
2130 * another NAND operation like nand_write_page_begin_op().
2131 * This function does not select/unselect the CS line.
2133 * Returns 0 on success, a negative error code otherwise.
2135 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2136 unsigned int len, bool force_8bit)
2141 if (nand_has_exec_op(chip)) {
2142 struct nand_op_instr instrs[] = {
2143 NAND_OP_DATA_OUT(len, buf, 0),
2145 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2147 instrs[0].ctx.data.force_8bit = force_8bit;
2149 return nand_exec_op(chip, &op);
2156 for (i = 0; i < len; i++)
2157 chip->legacy.write_byte(chip, p[i]);
2159 chip->legacy.write_buf(chip, buf, len);
2164 EXPORT_SYMBOL_GPL(nand_write_data_op);
2167 * struct nand_op_parser_ctx - Context used by the parser
2168 * @instrs: array of all the instructions that must be addressed
2169 * @ninstrs: length of the @instrs array
2170 * @subop: Sub-operation to be passed to the NAND controller
2172 * This structure is used by the core to split NAND operations into
2173 * sub-operations that can be handled by the NAND controller.
2175 struct nand_op_parser_ctx {
2176 const struct nand_op_instr *instrs;
2177 unsigned int ninstrs;
2178 struct nand_subop subop;
2182 * nand_op_parser_must_split_instr - Checks if an instruction must be split
2183 * @pat: the parser pattern element that matches @instr
2184 * @instr: pointer to the instruction to check
2185 * @start_offset: this is an in/out parameter. If @instr has already been
2186 * split, then @start_offset is the offset from which to start
2187 * (either an address cycle or an offset in the data buffer).
2188 * Conversely, if the function returns true (ie. instr must be
2189 * split), this parameter is updated to point to the first
2190 * data/address cycle that has not been taken care of.
2192 * Some NAND controllers are limited and cannot send X address cycles with a
2193 * unique operation, or cannot read/write more than Y bytes at the same time.
2194 * In this case, split the instruction that does not fit in a single
2195 * controller-operation into two or more chunks.
2197 * Returns true if the instruction must be split, false otherwise.
2198 * The @start_offset parameter is also updated to the offset at which the next
2199 * bundle of instruction must start (if an address or a data instruction).
2202 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2203 const struct nand_op_instr *instr,
2204 unsigned int *start_offset)
2206 switch (pat->type) {
2207 case NAND_OP_ADDR_INSTR:
2208 if (!pat->ctx.addr.maxcycles)
2211 if (instr->ctx.addr.naddrs - *start_offset >
2212 pat->ctx.addr.maxcycles) {
2213 *start_offset += pat->ctx.addr.maxcycles;
2218 case NAND_OP_DATA_IN_INSTR:
2219 case NAND_OP_DATA_OUT_INSTR:
2220 if (!pat->ctx.data.maxlen)
2223 if (instr->ctx.data.len - *start_offset >
2224 pat->ctx.data.maxlen) {
2225 *start_offset += pat->ctx.data.maxlen;
2238 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2239 * remaining in the parser context
2240 * @pat: the pattern to test
2241 * @ctx: the parser context structure to match with the pattern @pat
2243 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2244 * Returns true if this is the case, false ortherwise. When true is returned,
2245 * @ctx->subop is updated with the set of instructions to be passed to the
2246 * controller driver.
2249 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2250 struct nand_op_parser_ctx *ctx)
2252 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2253 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2254 const struct nand_op_instr *instr = ctx->subop.instrs;
2255 unsigned int i, ninstrs;
2257 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2259 * The pattern instruction does not match the operation
2260 * instruction. If the instruction is marked optional in the
2261 * pattern definition, we skip the pattern element and continue
2262 * to the next one. If the element is mandatory, there's no
2263 * match and we can return false directly.
2265 if (instr->type != pat->elems[i].type) {
2266 if (!pat->elems[i].optional)
2273 * Now check the pattern element constraints. If the pattern is
2274 * not able to handle the whole instruction in a single step,
2275 * we have to split it.
2276 * The last_instr_end_off value comes back updated to point to
2277 * the position where we have to split the instruction (the
2278 * start of the next subop chunk).
2280 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2293 * This can happen if all instructions of a pattern are optional.
2294 * Still, if there's not at least one instruction handled by this
2295 * pattern, this is not a match, and we should try the next one (if
2302 * We had a match on the pattern head, but the pattern may be longer
2303 * than the instructions we're asked to execute. We need to make sure
2304 * there's no mandatory elements in the pattern tail.
2306 for (; i < pat->nelems; i++) {
2307 if (!pat->elems[i].optional)
2312 * We have a match: update the subop structure accordingly and return
2315 ctx->subop.ninstrs = ninstrs;
2316 ctx->subop.last_instr_end_off = instr_offset;
2321 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2322 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2324 const struct nand_op_instr *instr;
2328 pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2330 for (i = 0; i < ctx->ninstrs; i++) {
2331 instr = &ctx->instrs[i];
2333 if (instr == &ctx->subop.instrs[0])
2336 nand_op_trace(prefix, instr);
2338 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2343 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2349 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2350 const struct nand_op_parser_ctx *b)
2352 if (a->subop.ninstrs < b->subop.ninstrs)
2354 else if (a->subop.ninstrs > b->subop.ninstrs)
2357 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2359 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2366 * nand_op_parser_exec_op - exec_op parser
2367 * @chip: the NAND chip
2368 * @parser: patterns description provided by the controller driver
2369 * @op: the NAND operation to address
2370 * @check_only: when true, the function only checks if @op can be handled but
2371 * does not execute the operation
2373 * Helper function designed to ease integration of NAND controller drivers that
2374 * only support a limited set of instruction sequences. The supported sequences
2375 * are described in @parser, and the framework takes care of splitting @op into
2376 * multiple sub-operations (if required) and pass them back to the ->exec()
2377 * callback of the matching pattern if @check_only is set to false.
2379 * NAND controller drivers should call this function from their own ->exec_op()
2382 * Returns 0 on success, a negative error code otherwise. A failure can be
2383 * caused by an unsupported operation (none of the supported patterns is able
2384 * to handle the requested operation), or an error returned by one of the
2385 * matching pattern->exec() hook.
2387 int nand_op_parser_exec_op(struct nand_chip *chip,
2388 const struct nand_op_parser *parser,
2389 const struct nand_operation *op, bool check_only)
2391 struct nand_op_parser_ctx ctx = {
2393 .subop.instrs = op->instrs,
2394 .instrs = op->instrs,
2395 .ninstrs = op->ninstrs,
2399 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2400 const struct nand_op_parser_pattern *pattern;
2401 struct nand_op_parser_ctx best_ctx;
2402 int ret, best_pattern = -1;
2404 for (i = 0; i < parser->npatterns; i++) {
2405 struct nand_op_parser_ctx test_ctx = ctx;
2407 pattern = &parser->patterns[i];
2408 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2411 if (best_pattern >= 0 &&
2412 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2416 best_ctx = test_ctx;
2419 if (best_pattern < 0) {
2420 pr_debug("->exec_op() parser: pattern not found!\n");
2425 nand_op_parser_trace(&ctx);
2428 pattern = &parser->patterns[best_pattern];
2429 ret = pattern->exec(chip, &ctx.subop);
2435 * Update the context structure by pointing to the start of the
2438 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2439 if (ctx.subop.last_instr_end_off)
2440 ctx.subop.instrs -= 1;
2442 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2447 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2449 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2451 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2452 instr->type == NAND_OP_DATA_OUT_INSTR);
2455 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2456 unsigned int instr_idx)
2458 return subop && instr_idx < subop->ninstrs;
2461 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2462 unsigned int instr_idx)
2467 return subop->first_instr_start_off;
2471 * nand_subop_get_addr_start_off - Get the start offset in an address array
2472 * @subop: The entire sub-operation
2473 * @instr_idx: Index of the instruction inside the sub-operation
2475 * During driver development, one could be tempted to directly use the
2476 * ->addr.addrs field of address instructions. This is wrong as address
2477 * instructions might be split.
2479 * Given an address instruction, returns the offset of the first cycle to issue.
2481 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2482 unsigned int instr_idx)
2484 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2485 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2488 return nand_subop_get_start_off(subop, instr_idx);
2490 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2493 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2494 * @subop: The entire sub-operation
2495 * @instr_idx: Index of the instruction inside the sub-operation
2497 * During driver development, one could be tempted to directly use the
2498 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2501 * Given an address instruction, returns the number of address cycle to issue.
2503 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2504 unsigned int instr_idx)
2506 int start_off, end_off;
2508 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2509 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2512 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2514 if (instr_idx == subop->ninstrs - 1 &&
2515 subop->last_instr_end_off)
2516 end_off = subop->last_instr_end_off;
2518 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2520 return end_off - start_off;
2522 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2525 * nand_subop_get_data_start_off - Get the start offset in a data array
2526 * @subop: The entire sub-operation
2527 * @instr_idx: Index of the instruction inside the sub-operation
2529 * During driver development, one could be tempted to directly use the
2530 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2531 * instructions might be split.
2533 * Given a data instruction, returns the offset to start from.
2535 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2536 unsigned int instr_idx)
2538 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2539 !nand_instr_is_data(&subop->instrs[instr_idx])))
2542 return nand_subop_get_start_off(subop, instr_idx);
2544 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2547 * nand_subop_get_data_len - Get the number of bytes to retrieve
2548 * @subop: The entire sub-operation
2549 * @instr_idx: Index of the instruction inside the sub-operation
2551 * During driver development, one could be tempted to directly use the
2552 * ->data->len field of a data instruction. This is wrong as data instructions
2555 * Returns the length of the chunk of data to send/receive.
2557 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2558 unsigned int instr_idx)
2560 int start_off = 0, end_off;
2562 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2563 !nand_instr_is_data(&subop->instrs[instr_idx])))
2566 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2568 if (instr_idx == subop->ninstrs - 1 &&
2569 subop->last_instr_end_off)
2570 end_off = subop->last_instr_end_off;
2572 end_off = subop->instrs[instr_idx].ctx.data.len;
2574 return end_off - start_off;
2576 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2579 * nand_reset - Reset and initialize a NAND device
2580 * @chip: The NAND chip
2581 * @chipnr: Internal die id
2583 * Save the timings data structure, then apply SDR timings mode 0 (see
2584 * nand_reset_interface for details), do the reset operation, and apply
2585 * back the previous timings.
2587 * Returns 0 on success, a negative error code otherwise.
2589 int nand_reset(struct nand_chip *chip, int chipnr)
2593 ret = nand_reset_interface(chip, chipnr);
2598 * The CS line has to be released before we can apply the new NAND
2599 * interface settings, hence this weird nand_select_target()
2600 * nand_deselect_target() dance.
2602 nand_select_target(chip, chipnr);
2603 ret = nand_reset_op(chip);
2604 nand_deselect_target(chip);
2608 ret = nand_setup_interface(chip, chipnr);
2614 EXPORT_SYMBOL_GPL(nand_reset);
2617 * nand_get_features - wrapper to perform a GET_FEATURE
2618 * @chip: NAND chip info structure
2619 * @addr: feature address
2620 * @subfeature_param: the subfeature parameters, a four bytes array
2622 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2623 * operation cannot be handled.
2625 int nand_get_features(struct nand_chip *chip, int addr,
2626 u8 *subfeature_param)
2628 if (!nand_supports_get_features(chip, addr))
2631 if (chip->legacy.get_features)
2632 return chip->legacy.get_features(chip, addr, subfeature_param);
2634 return nand_get_features_op(chip, addr, subfeature_param);
2638 * nand_set_features - wrapper to perform a SET_FEATURE
2639 * @chip: NAND chip info structure
2640 * @addr: feature address
2641 * @subfeature_param: the subfeature parameters, a four bytes array
2643 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2644 * operation cannot be handled.
2646 int nand_set_features(struct nand_chip *chip, int addr,
2647 u8 *subfeature_param)
2649 if (!nand_supports_set_features(chip, addr))
2652 if (chip->legacy.set_features)
2653 return chip->legacy.set_features(chip, addr, subfeature_param);
2655 return nand_set_features_op(chip, addr, subfeature_param);
2659 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2660 * @buf: buffer to test
2661 * @len: buffer length
2662 * @bitflips_threshold: maximum number of bitflips
2664 * Check if a buffer contains only 0xff, which means the underlying region
2665 * has been erased and is ready to be programmed.
2666 * The bitflips_threshold specify the maximum number of bitflips before
2667 * considering the region is not erased.
2668 * Note: The logic of this function has been extracted from the memweight
2669 * implementation, except that nand_check_erased_buf function exit before
2670 * testing the whole buffer if the number of bitflips exceed the
2671 * bitflips_threshold value.
2673 * Returns a positive number of bitflips less than or equal to
2674 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2677 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2679 const unsigned char *bitmap = buf;
2683 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2685 weight = hweight8(*bitmap);
2686 bitflips += BITS_PER_BYTE - weight;
2687 if (unlikely(bitflips > bitflips_threshold))
2691 for (; len >= sizeof(long);
2692 len -= sizeof(long), bitmap += sizeof(long)) {
2693 unsigned long d = *((unsigned long *)bitmap);
2696 weight = hweight_long(d);
2697 bitflips += BITS_PER_LONG - weight;
2698 if (unlikely(bitflips > bitflips_threshold))
2702 for (; len > 0; len--, bitmap++) {
2703 weight = hweight8(*bitmap);
2704 bitflips += BITS_PER_BYTE - weight;
2705 if (unlikely(bitflips > bitflips_threshold))
2713 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2715 * @data: data buffer to test
2716 * @datalen: data length
2718 * @ecclen: ECC length
2719 * @extraoob: extra OOB buffer
2720 * @extraooblen: extra OOB length
2721 * @bitflips_threshold: maximum number of bitflips
2723 * Check if a data buffer and its associated ECC and OOB data contains only
2724 * 0xff pattern, which means the underlying region has been erased and is
2725 * ready to be programmed.
2726 * The bitflips_threshold specify the maximum number of bitflips before
2727 * considering the region as not erased.
2730 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2731 * different from the NAND page size. When fixing bitflips, ECC engines will
2732 * report the number of errors per chunk, and the NAND core infrastructure
2733 * expect you to return the maximum number of bitflips for the whole page.
2734 * This is why you should always use this function on a single chunk and
2735 * not on the whole page. After checking each chunk you should update your
2736 * max_bitflips value accordingly.
2737 * 2/ When checking for bitflips in erased pages you should not only check
2738 * the payload data but also their associated ECC data, because a user might
2739 * have programmed almost all bits to 1 but a few. In this case, we
2740 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
2742 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2743 * data are protected by the ECC engine.
2744 * It could also be used if you support subpages and want to attach some
2745 * extra OOB data to an ECC chunk.
2747 * Returns a positive number of bitflips less than or equal to
2748 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2749 * threshold. In case of success, the passed buffers are filled with 0xff.
2751 int nand_check_erased_ecc_chunk(void *data, int datalen,
2752 void *ecc, int ecclen,
2753 void *extraoob, int extraooblen,
2754 int bitflips_threshold)
2756 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2758 data_bitflips = nand_check_erased_buf(data, datalen,
2759 bitflips_threshold);
2760 if (data_bitflips < 0)
2761 return data_bitflips;
2763 bitflips_threshold -= data_bitflips;
2765 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2766 if (ecc_bitflips < 0)
2767 return ecc_bitflips;
2769 bitflips_threshold -= ecc_bitflips;
2771 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2772 bitflips_threshold);
2773 if (extraoob_bitflips < 0)
2774 return extraoob_bitflips;
2777 memset(data, 0xff, datalen);
2780 memset(ecc, 0xff, ecclen);
2782 if (extraoob_bitflips)
2783 memset(extraoob, 0xff, extraooblen);
2785 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2787 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2790 * nand_read_page_raw_notsupp - dummy read raw page function
2791 * @chip: nand chip info structure
2792 * @buf: buffer to store read data
2793 * @oob_required: caller requires OOB data read to chip->oob_poi
2794 * @page: page number to read
2796 * Returns -ENOTSUPP unconditionally.
2798 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2799 int oob_required, int page)
2805 * nand_read_page_raw - [INTERN] read raw page data without ecc
2806 * @chip: nand chip info structure
2807 * @buf: buffer to store read data
2808 * @oob_required: caller requires OOB data read to chip->oob_poi
2809 * @page: page number to read
2811 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2813 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2816 struct mtd_info *mtd = nand_to_mtd(chip);
2819 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2824 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2832 EXPORT_SYMBOL(nand_read_page_raw);
2835 * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2836 * @chip: NAND chip info structure
2837 * @buf: buffer to store read data
2838 * @oob_required: caller requires OOB data read to chip->oob_poi
2839 * @page: page number to read
2841 * This is a raw page read, ie. without any error detection/correction.
2842 * Monolithic means we are requesting all the relevant data (main plus
2843 * eventually OOB) to be loaded in the NAND cache and sent over the
2844 * bus (from the NAND chip to the NAND controller) in a single
2845 * operation. This is an alternative to nand_read_page_raw(), which
2846 * first reads the main data, and if the OOB data is requested too,
2847 * then reads more data on the bus.
2849 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2850 int oob_required, int page)
2852 struct mtd_info *mtd = nand_to_mtd(chip);
2853 unsigned int size = mtd->writesize;
2858 size += mtd->oobsize;
2860 if (buf != chip->data_buf)
2861 read_buf = nand_get_data_buf(chip);
2864 ret = nand_read_page_op(chip, page, 0, read_buf, size);
2868 if (buf != chip->data_buf)
2869 memcpy(buf, read_buf, mtd->writesize);
2873 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2876 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2877 * @chip: nand chip info structure
2878 * @buf: buffer to store read data
2879 * @oob_required: caller requires OOB data read to chip->oob_poi
2880 * @page: page number to read
2882 * We need a special oob layout and handling even when OOB isn't used.
2884 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2885 int oob_required, int page)
2887 struct mtd_info *mtd = nand_to_mtd(chip);
2888 int eccsize = chip->ecc.size;
2889 int eccbytes = chip->ecc.bytes;
2890 uint8_t *oob = chip->oob_poi;
2891 int steps, size, ret;
2893 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2897 for (steps = chip->ecc.steps; steps > 0; steps--) {
2898 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2904 if (chip->ecc.prepad) {
2905 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2910 oob += chip->ecc.prepad;
2913 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2919 if (chip->ecc.postpad) {
2920 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2925 oob += chip->ecc.postpad;
2929 size = mtd->oobsize - (oob - chip->oob_poi);
2931 ret = nand_read_data_op(chip, oob, size, false, false);
2940 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2941 * @chip: nand chip info structure
2942 * @buf: buffer to store read data
2943 * @oob_required: caller requires OOB data read to chip->oob_poi
2944 * @page: page number to read
2946 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2947 int oob_required, int page)
2949 struct mtd_info *mtd = nand_to_mtd(chip);
2950 int i, eccsize = chip->ecc.size, ret;
2951 int eccbytes = chip->ecc.bytes;
2952 int eccsteps = chip->ecc.steps;
2954 uint8_t *ecc_calc = chip->ecc.calc_buf;
2955 uint8_t *ecc_code = chip->ecc.code_buf;
2956 unsigned int max_bitflips = 0;
2958 chip->ecc.read_page_raw(chip, buf, 1, page);
2960 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2961 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2963 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2968 eccsteps = chip->ecc.steps;
2971 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2974 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2976 mtd->ecc_stats.failed++;
2978 mtd->ecc_stats.corrected += stat;
2979 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2982 return max_bitflips;
2986 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2987 * @chip: nand chip info structure
2988 * @data_offs: offset of requested data within the page
2989 * @readlen: data length
2990 * @bufpoi: buffer to store read data
2991 * @page: page number to read
2993 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2994 uint32_t readlen, uint8_t *bufpoi, int page)
2996 struct mtd_info *mtd = nand_to_mtd(chip);
2997 int start_step, end_step, num_steps, ret;
2999 int data_col_addr, i, gaps = 0;
3000 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3001 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3002 int index, section = 0;
3003 unsigned int max_bitflips = 0;
3004 struct mtd_oob_region oobregion = { };
3006 /* Column address within the page aligned to ECC size (256bytes) */
3007 start_step = data_offs / chip->ecc.size;
3008 end_step = (data_offs + readlen - 1) / chip->ecc.size;
3009 num_steps = end_step - start_step + 1;
3010 index = start_step * chip->ecc.bytes;
3012 /* Data size aligned to ECC ecc.size */
3013 datafrag_len = num_steps * chip->ecc.size;
3014 eccfrag_len = num_steps * chip->ecc.bytes;
3016 data_col_addr = start_step * chip->ecc.size;
3017 /* If we read not a page aligned data */
3018 p = bufpoi + data_col_addr;
3019 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3024 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3025 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
3028 * The performance is faster if we position offsets according to
3029 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3031 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
3035 if (oobregion.length < eccfrag_len)
3039 ret = nand_change_read_column_op(chip, mtd->writesize,
3040 chip->oob_poi, mtd->oobsize,
3046 * Send the command to read the particular ECC bytes take care
3047 * about buswidth alignment in read_buf.
3049 aligned_pos = oobregion.offset & ~(busw - 1);
3050 aligned_len = eccfrag_len;
3051 if (oobregion.offset & (busw - 1))
3053 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3057 ret = nand_change_read_column_op(chip,
3058 mtd->writesize + aligned_pos,
3059 &chip->oob_poi[aligned_pos],
3060 aligned_len, false);
3065 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3066 chip->oob_poi, index, eccfrag_len);
3070 p = bufpoi + data_col_addr;
3071 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3074 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
3075 &chip->ecc.calc_buf[i]);
3076 if (stat == -EBADMSG &&
3077 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3078 /* check for empty pages with bitflips */
3079 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3080 &chip->ecc.code_buf[i],
3083 chip->ecc.strength);
3087 mtd->ecc_stats.failed++;
3089 mtd->ecc_stats.corrected += stat;
3090 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3093 return max_bitflips;
3097 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3098 * @chip: nand chip info structure
3099 * @buf: buffer to store read data
3100 * @oob_required: caller requires OOB data read to chip->oob_poi
3101 * @page: page number to read
3103 * Not for syndrome calculating ECC controllers which need a special oob layout.
3105 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
3106 int oob_required, int page)
3108 struct mtd_info *mtd = nand_to_mtd(chip);
3109 int i, eccsize = chip->ecc.size, ret;
3110 int eccbytes = chip->ecc.bytes;
3111 int eccsteps = chip->ecc.steps;
3113 uint8_t *ecc_calc = chip->ecc.calc_buf;
3114 uint8_t *ecc_code = chip->ecc.code_buf;
3115 unsigned int max_bitflips = 0;
3117 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3121 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3122 chip->ecc.hwctl(chip, NAND_ECC_READ);
3124 ret = nand_read_data_op(chip, p, eccsize, false, false);
3128 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3131 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
3136 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3141 eccsteps = chip->ecc.steps;
3144 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3147 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
3148 if (stat == -EBADMSG &&
3149 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3150 /* check for empty pages with bitflips */
3151 stat = nand_check_erased_ecc_chunk(p, eccsize,
3152 &ecc_code[i], eccbytes,
3154 chip->ecc.strength);
3158 mtd->ecc_stats.failed++;
3160 mtd->ecc_stats.corrected += stat;
3161 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3164 return max_bitflips;
3168 * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
3169 * data read from OOB area
3170 * @chip: nand chip info structure
3171 * @buf: buffer to store read data
3172 * @oob_required: caller requires OOB data read to chip->oob_poi
3173 * @page: page number to read
3175 * Hardware ECC for large page chips, which requires the ECC data to be
3176 * extracted from the OOB before the actual data is read.
3178 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
3179 int oob_required, int page)
3181 struct mtd_info *mtd = nand_to_mtd(chip);
3182 int i, eccsize = chip->ecc.size, ret;
3183 int eccbytes = chip->ecc.bytes;
3184 int eccsteps = chip->ecc.steps;
3186 uint8_t *ecc_code = chip->ecc.code_buf;
3187 unsigned int max_bitflips = 0;
3189 /* Read the OOB area first */
3190 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3194 /* Move read cursor to start of page */
3195 ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
3199 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3204 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3207 chip->ecc.hwctl(chip, NAND_ECC_READ);
3209 ret = nand_read_data_op(chip, p, eccsize, false, false);
3213 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
3214 if (stat == -EBADMSG &&
3215 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3216 /* check for empty pages with bitflips */
3217 stat = nand_check_erased_ecc_chunk(p, eccsize,
3220 chip->ecc.strength);
3224 mtd->ecc_stats.failed++;
3226 mtd->ecc_stats.corrected += stat;
3227 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3230 return max_bitflips;
3232 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
3235 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3236 * @chip: nand chip info structure
3237 * @buf: buffer to store read data
3238 * @oob_required: caller requires OOB data read to chip->oob_poi
3239 * @page: page number to read
3241 * The hw generator calculates the error syndrome automatically. Therefore we
3242 * need a special oob layout and handling.
3244 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3245 int oob_required, int page)
3247 struct mtd_info *mtd = nand_to_mtd(chip);
3248 int ret, i, eccsize = chip->ecc.size;
3249 int eccbytes = chip->ecc.bytes;
3250 int eccsteps = chip->ecc.steps;
3251 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3253 uint8_t *oob = chip->oob_poi;
3254 unsigned int max_bitflips = 0;
3256 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3260 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3263 chip->ecc.hwctl(chip, NAND_ECC_READ);
3265 ret = nand_read_data_op(chip, p, eccsize, false, false);
3269 if (chip->ecc.prepad) {
3270 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3275 oob += chip->ecc.prepad;
3278 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3280 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
3284 stat = chip->ecc.correct(chip, p, oob, NULL);
3288 if (chip->ecc.postpad) {
3289 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3294 oob += chip->ecc.postpad;
3297 if (stat == -EBADMSG &&
3298 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3299 /* check for empty pages with bitflips */
3300 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3304 chip->ecc.strength);
3308 mtd->ecc_stats.failed++;
3310 mtd->ecc_stats.corrected += stat;
3311 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3315 /* Calculate remaining oob bytes */
3316 i = mtd->oobsize - (oob - chip->oob_poi);
3318 ret = nand_read_data_op(chip, oob, i, false, false);
3323 return max_bitflips;
3327 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3328 * @chip: NAND chip object
3329 * @oob: oob destination address
3330 * @ops: oob ops structure
3331 * @len: size of oob to transfer
3333 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3334 struct mtd_oob_ops *ops, size_t len)
3336 struct mtd_info *mtd = nand_to_mtd(chip);
3339 switch (ops->mode) {
3341 case MTD_OPS_PLACE_OOB:
3343 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3346 case MTD_OPS_AUTO_OOB:
3347 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3359 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3360 * @chip: NAND chip object
3361 * @retry_mode: the retry mode to use
3363 * Some vendors supply a special command to shift the Vt threshold, to be used
3364 * when there are too many bitflips in a page (i.e., ECC error). After setting
3365 * a new threshold, the host should retry reading the page.
3367 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3369 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3371 if (retry_mode >= chip->read_retries)
3374 if (!chip->ops.setup_read_retry)
3377 return chip->ops.setup_read_retry(chip, retry_mode);
3380 static void nand_wait_readrdy(struct nand_chip *chip)
3382 const struct nand_interface_config *conf;
3384 if (!(chip->options & NAND_NEED_READRDY))
3387 conf = nand_get_interface_config(chip);
3388 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
3392 * nand_do_read_ops - [INTERN] Read data with ECC
3393 * @chip: NAND chip object
3394 * @from: offset to read from
3395 * @ops: oob ops structure
3397 * Internal function. Called with chip held.
3399 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3400 struct mtd_oob_ops *ops)
3402 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3403 struct mtd_info *mtd = nand_to_mtd(chip);
3405 uint32_t readlen = ops->len;
3406 uint32_t oobreadlen = ops->ooblen;
3407 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3409 uint8_t *bufpoi, *oob, *buf;
3411 unsigned int max_bitflips = 0;
3413 bool ecc_fail = false;
3415 /* Check if the region is secured */
3416 if (nand_region_is_secured(chip, from, readlen))
3419 chipnr = (int)(from >> chip->chip_shift);
3420 nand_select_target(chip, chipnr);
3422 realpage = (int)(from >> chip->page_shift);
3423 page = realpage & chip->pagemask;
3425 col = (int)(from & (mtd->writesize - 1));
3429 oob_required = oob ? 1 : 0;
3432 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3434 bytes = min(mtd->writesize - col, readlen);
3435 aligned = (bytes == mtd->writesize);
3439 else if (chip->options & NAND_USES_DMA)
3440 use_bounce_buf = !virt_addr_valid(buf) ||
3441 !IS_ALIGNED((unsigned long)buf,
3446 /* Is the current page in the buffer? */
3447 if (realpage != chip->pagecache.page || oob) {
3448 bufpoi = use_bounce_buf ? chip->data_buf : buf;
3450 if (use_bounce_buf && aligned)
3451 pr_debug("%s: using read bounce buffer for buf@%p\n",
3456 * Now read the page into the buffer. Absent an error,
3457 * the read methods return max bitflips per ecc step.
3459 if (unlikely(ops->mode == MTD_OPS_RAW))
3460 ret = chip->ecc.read_page_raw(chip, bufpoi,
3463 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3465 ret = chip->ecc.read_subpage(chip, col, bytes,
3468 ret = chip->ecc.read_page(chip, bufpoi,
3469 oob_required, page);
3472 /* Invalidate page cache */
3473 chip->pagecache.page = -1;
3478 * Copy back the data in the initial buffer when reading
3479 * partial pages or when a bounce buffer is required.
3481 if (use_bounce_buf) {
3482 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3483 !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3484 (ops->mode != MTD_OPS_RAW)) {
3485 chip->pagecache.page = realpage;
3486 chip->pagecache.bitflips = ret;
3488 /* Invalidate page cache */
3489 chip->pagecache.page = -1;
3491 memcpy(buf, bufpoi + col, bytes);
3494 if (unlikely(oob)) {
3495 int toread = min(oobreadlen, max_oobsize);
3498 oob = nand_transfer_oob(chip, oob, ops,
3500 oobreadlen -= toread;
3504 nand_wait_readrdy(chip);
3506 if (mtd->ecc_stats.failed - ecc_stats.failed) {
3507 if (retry_mode + 1 < chip->read_retries) {
3509 ret = nand_setup_read_retry(chip,
3514 /* Reset ecc_stats; retry */
3515 mtd->ecc_stats = ecc_stats;
3518 /* No more retry modes; real failure */
3524 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3526 memcpy(buf, chip->data_buf + col, bytes);
3528 max_bitflips = max_t(unsigned int, max_bitflips,
3529 chip->pagecache.bitflips);
3534 /* Reset to retry mode 0 */
3536 ret = nand_setup_read_retry(chip, 0);
3545 /* For subsequent reads align to page boundary */
3547 /* Increment page address */
3550 page = realpage & chip->pagemask;
3551 /* Check, if we cross a chip boundary */
3554 nand_deselect_target(chip);
3555 nand_select_target(chip, chipnr);
3558 nand_deselect_target(chip);
3560 ops->retlen = ops->len - (size_t) readlen;
3562 ops->oobretlen = ops->ooblen - oobreadlen;
3570 return max_bitflips;
3574 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3575 * @chip: nand chip info structure
3576 * @page: page number to read
3578 int nand_read_oob_std(struct nand_chip *chip, int page)
3580 struct mtd_info *mtd = nand_to_mtd(chip);
3582 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3584 EXPORT_SYMBOL(nand_read_oob_std);
3587 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3589 * @chip: nand chip info structure
3590 * @page: page number to read
3592 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3594 struct mtd_info *mtd = nand_to_mtd(chip);
3595 int length = mtd->oobsize;
3596 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3597 int eccsize = chip->ecc.size;
3598 uint8_t *bufpoi = chip->oob_poi;
3599 int i, toread, sndrnd = 0, pos, ret;
3601 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3605 for (i = 0; i < chip->ecc.steps; i++) {
3609 pos = eccsize + i * (eccsize + chunk);
3610 if (mtd->writesize > 512)
3611 ret = nand_change_read_column_op(chip, pos,
3615 ret = nand_read_page_op(chip, page, pos, NULL,
3622 toread = min_t(int, length, chunk);
3624 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3632 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3641 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3642 * @chip: nand chip info structure
3643 * @page: page number to write
3645 int nand_write_oob_std(struct nand_chip *chip, int page)
3647 struct mtd_info *mtd = nand_to_mtd(chip);
3649 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3652 EXPORT_SYMBOL(nand_write_oob_std);
3655 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3656 * with syndrome - only for large page flash
3657 * @chip: nand chip info structure
3658 * @page: page number to write
3660 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3662 struct mtd_info *mtd = nand_to_mtd(chip);
3663 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3664 int eccsize = chip->ecc.size, length = mtd->oobsize;
3665 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3666 const uint8_t *bufpoi = chip->oob_poi;
3669 * data-ecc-data-ecc ... ecc-oob
3671 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3673 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3674 pos = steps * (eccsize + chunk);
3679 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3683 for (i = 0; i < steps; i++) {
3685 if (mtd->writesize <= 512) {
3686 uint32_t fill = 0xFFFFFFFF;
3690 int num = min_t(int, len, 4);
3692 ret = nand_write_data_op(chip, &fill,
3700 pos = eccsize + i * (eccsize + chunk);
3701 ret = nand_change_write_column_op(chip, pos,
3709 len = min_t(int, length, chunk);
3711 ret = nand_write_data_op(chip, bufpoi, len, false);
3719 ret = nand_write_data_op(chip, bufpoi, length, false);
3724 return nand_prog_page_end_op(chip);
3728 * nand_do_read_oob - [INTERN] NAND read out-of-band
3729 * @chip: NAND chip object
3730 * @from: offset to read from
3731 * @ops: oob operations description structure
3733 * NAND read out-of-band data from the spare area.
3735 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3736 struct mtd_oob_ops *ops)
3738 struct mtd_info *mtd = nand_to_mtd(chip);
3739 unsigned int max_bitflips = 0;
3740 int page, realpage, chipnr;
3741 struct mtd_ecc_stats stats;
3742 int readlen = ops->ooblen;
3744 uint8_t *buf = ops->oobbuf;
3747 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3748 __func__, (unsigned long long)from, readlen);
3750 /* Check if the region is secured */
3751 if (nand_region_is_secured(chip, from, readlen))
3754 stats = mtd->ecc_stats;
3756 len = mtd_oobavail(mtd, ops);
3758 chipnr = (int)(from >> chip->chip_shift);
3759 nand_select_target(chip, chipnr);
3761 /* Shift to get page */
3762 realpage = (int)(from >> chip->page_shift);
3763 page = realpage & chip->pagemask;
3766 if (ops->mode == MTD_OPS_RAW)
3767 ret = chip->ecc.read_oob_raw(chip, page);
3769 ret = chip->ecc.read_oob(chip, page);
3774 len = min(len, readlen);
3775 buf = nand_transfer_oob(chip, buf, ops, len);
3777 nand_wait_readrdy(chip);
3779 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3785 /* Increment page address */
3788 page = realpage & chip->pagemask;
3789 /* Check, if we cross a chip boundary */
3792 nand_deselect_target(chip);
3793 nand_select_target(chip, chipnr);
3796 nand_deselect_target(chip);
3798 ops->oobretlen = ops->ooblen - readlen;
3803 if (mtd->ecc_stats.failed - stats.failed)
3806 return max_bitflips;
3810 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3811 * @mtd: MTD device structure
3812 * @from: offset to read from
3813 * @ops: oob operation description structure
3815 * NAND read data and/or out-of-band data.
3817 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3818 struct mtd_oob_ops *ops)
3820 struct nand_chip *chip = mtd_to_nand(mtd);
3825 if (ops->mode != MTD_OPS_PLACE_OOB &&
3826 ops->mode != MTD_OPS_AUTO_OOB &&
3827 ops->mode != MTD_OPS_RAW)
3830 nand_get_device(chip);
3833 ret = nand_do_read_oob(chip, from, ops);
3835 ret = nand_do_read_ops(chip, from, ops);
3837 nand_release_device(chip);
3842 * nand_write_page_raw_notsupp - dummy raw page write function
3843 * @chip: nand chip info structure
3845 * @oob_required: must write chip->oob_poi to OOB
3846 * @page: page number to write
3848 * Returns -ENOTSUPP unconditionally.
3850 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3851 int oob_required, int page)
3857 * nand_write_page_raw - [INTERN] raw page write function
3858 * @chip: nand chip info structure
3860 * @oob_required: must write chip->oob_poi to OOB
3861 * @page: page number to write
3863 * Not for syndrome calculating ECC controllers, which use a special oob layout.
3865 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3866 int oob_required, int page)
3868 struct mtd_info *mtd = nand_to_mtd(chip);
3871 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3876 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3882 return nand_prog_page_end_op(chip);
3884 EXPORT_SYMBOL(nand_write_page_raw);
3887 * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3888 * @chip: NAND chip info structure
3889 * @buf: data buffer to write
3890 * @oob_required: must write chip->oob_poi to OOB
3891 * @page: page number to write
3893 * This is a raw page write, ie. without any error detection/correction.
3894 * Monolithic means we are requesting all the relevant data (main plus
3895 * eventually OOB) to be sent over the bus and effectively programmed
3896 * into the NAND chip arrays in a single operation. This is an
3897 * alternative to nand_write_page_raw(), which first sends the main
3898 * data, then eventually send the OOB data by latching more data
3899 * cycles on the NAND bus, and finally sends the program command to
3900 * synchronyze the NAND chip cache.
3902 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3903 int oob_required, int page)
3905 struct mtd_info *mtd = nand_to_mtd(chip);
3906 unsigned int size = mtd->writesize;
3907 u8 *write_buf = (u8 *)buf;
3910 size += mtd->oobsize;
3912 if (buf != chip->data_buf) {
3913 write_buf = nand_get_data_buf(chip);
3914 memcpy(write_buf, buf, mtd->writesize);
3918 return nand_prog_page_op(chip, page, 0, write_buf, size);
3920 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3923 * nand_write_page_raw_syndrome - [INTERN] raw page write function
3924 * @chip: nand chip info structure
3926 * @oob_required: must write chip->oob_poi to OOB
3927 * @page: page number to write
3929 * We need a special oob layout and handling even when ECC isn't checked.
3931 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3932 const uint8_t *buf, int oob_required,
3935 struct mtd_info *mtd = nand_to_mtd(chip);
3936 int eccsize = chip->ecc.size;
3937 int eccbytes = chip->ecc.bytes;
3938 uint8_t *oob = chip->oob_poi;
3939 int steps, size, ret;
3941 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3945 for (steps = chip->ecc.steps; steps > 0; steps--) {
3946 ret = nand_write_data_op(chip, buf, eccsize, false);
3952 if (chip->ecc.prepad) {
3953 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3958 oob += chip->ecc.prepad;
3961 ret = nand_write_data_op(chip, oob, eccbytes, false);
3967 if (chip->ecc.postpad) {
3968 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3973 oob += chip->ecc.postpad;
3977 size = mtd->oobsize - (oob - chip->oob_poi);
3979 ret = nand_write_data_op(chip, oob, size, false);
3984 return nand_prog_page_end_op(chip);
3987 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3988 * @chip: nand chip info structure
3990 * @oob_required: must write chip->oob_poi to OOB
3991 * @page: page number to write
3993 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3994 int oob_required, int page)
3996 struct mtd_info *mtd = nand_to_mtd(chip);
3997 int i, eccsize = chip->ecc.size, ret;
3998 int eccbytes = chip->ecc.bytes;
3999 int eccsteps = chip->ecc.steps;
4000 uint8_t *ecc_calc = chip->ecc.calc_buf;
4001 const uint8_t *p = buf;
4003 /* Software ECC calculation */
4004 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4005 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4007 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4012 return chip->ecc.write_page_raw(chip, buf, 1, page);
4016 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4017 * @chip: nand chip info structure
4019 * @oob_required: must write chip->oob_poi to OOB
4020 * @page: page number to write
4022 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
4023 int oob_required, int page)
4025 struct mtd_info *mtd = nand_to_mtd(chip);
4026 int i, eccsize = chip->ecc.size, ret;
4027 int eccbytes = chip->ecc.bytes;
4028 int eccsteps = chip->ecc.steps;
4029 uint8_t *ecc_calc = chip->ecc.calc_buf;
4030 const uint8_t *p = buf;
4032 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4036 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4037 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4039 ret = nand_write_data_op(chip, p, eccsize, false);
4043 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4046 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4051 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4055 return nand_prog_page_end_op(chip);
4060 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4061 * @chip: nand chip info structure
4062 * @offset: column address of subpage within the page
4063 * @data_len: data length
4065 * @oob_required: must write chip->oob_poi to OOB
4066 * @page: page number to write
4068 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
4069 uint32_t data_len, const uint8_t *buf,
4070 int oob_required, int page)
4072 struct mtd_info *mtd = nand_to_mtd(chip);
4073 uint8_t *oob_buf = chip->oob_poi;
4074 uint8_t *ecc_calc = chip->ecc.calc_buf;
4075 int ecc_size = chip->ecc.size;
4076 int ecc_bytes = chip->ecc.bytes;
4077 int ecc_steps = chip->ecc.steps;
4078 uint32_t start_step = offset / ecc_size;
4079 uint32_t end_step = (offset + data_len - 1) / ecc_size;
4080 int oob_bytes = mtd->oobsize / ecc_steps;
4083 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4087 for (step = 0; step < ecc_steps; step++) {
4088 /* configure controller for WRITE access */
4089 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4091 /* write data (untouched subpages already masked by 0xFF) */
4092 ret = nand_write_data_op(chip, buf, ecc_size, false);
4096 /* mask ECC of un-touched subpages by padding 0xFF */
4097 if ((step < start_step) || (step > end_step))
4098 memset(ecc_calc, 0xff, ecc_bytes);
4100 chip->ecc.calculate(chip, buf, ecc_calc);
4102 /* mask OOB of un-touched subpages by padding 0xFF */
4103 /* if oob_required, preserve OOB metadata of written subpage */
4104 if (!oob_required || (step < start_step) || (step > end_step))
4105 memset(oob_buf, 0xff, oob_bytes);
4108 ecc_calc += ecc_bytes;
4109 oob_buf += oob_bytes;
4112 /* copy calculated ECC for whole page to chip->buffer->oob */
4113 /* this include masked-value(0xFF) for unwritten subpages */
4114 ecc_calc = chip->ecc.calc_buf;
4115 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4120 /* write OOB buffer to NAND device */
4121 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4125 return nand_prog_page_end_op(chip);
4130 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4131 * @chip: nand chip info structure
4133 * @oob_required: must write chip->oob_poi to OOB
4134 * @page: page number to write
4136 * The hw generator calculates the error syndrome automatically. Therefore we
4137 * need a special oob layout and handling.
4139 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
4140 int oob_required, int page)
4142 struct mtd_info *mtd = nand_to_mtd(chip);
4143 int i, eccsize = chip->ecc.size;
4144 int eccbytes = chip->ecc.bytes;
4145 int eccsteps = chip->ecc.steps;
4146 const uint8_t *p = buf;
4147 uint8_t *oob = chip->oob_poi;
4150 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4154 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4155 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4157 ret = nand_write_data_op(chip, p, eccsize, false);
4161 if (chip->ecc.prepad) {
4162 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4167 oob += chip->ecc.prepad;
4170 chip->ecc.calculate(chip, p, oob);
4172 ret = nand_write_data_op(chip, oob, eccbytes, false);
4178 if (chip->ecc.postpad) {
4179 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4184 oob += chip->ecc.postpad;
4188 /* Calculate remaining oob bytes */
4189 i = mtd->oobsize - (oob - chip->oob_poi);
4191 ret = nand_write_data_op(chip, oob, i, false);
4196 return nand_prog_page_end_op(chip);
4200 * nand_write_page - write one page
4201 * @chip: NAND chip descriptor
4202 * @offset: address offset within the page
4203 * @data_len: length of actual data to be written
4204 * @buf: the data to write
4205 * @oob_required: must write chip->oob_poi to OOB
4206 * @page: page number to write
4207 * @raw: use _raw version of write_page
4209 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
4210 int data_len, const uint8_t *buf, int oob_required,
4213 struct mtd_info *mtd = nand_to_mtd(chip);
4214 int status, subpage;
4216 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4217 chip->ecc.write_subpage)
4218 subpage = offset || (data_len < mtd->writesize);
4223 status = chip->ecc.write_page_raw(chip, buf, oob_required,
4226 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
4227 oob_required, page);
4229 status = chip->ecc.write_page(chip, buf, oob_required, page);
4237 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4240 * nand_do_write_ops - [INTERN] NAND write with ECC
4241 * @chip: NAND chip object
4242 * @to: offset to write to
4243 * @ops: oob operations description structure
4245 * NAND write with ECC.
4247 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
4248 struct mtd_oob_ops *ops)
4250 struct mtd_info *mtd = nand_to_mtd(chip);
4251 int chipnr, realpage, page, column;
4252 uint32_t writelen = ops->len;
4254 uint32_t oobwritelen = ops->ooblen;
4255 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4257 uint8_t *oob = ops->oobbuf;
4258 uint8_t *buf = ops->datbuf;
4260 int oob_required = oob ? 1 : 0;
4266 /* Reject writes, which are not page aligned */
4267 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4268 pr_notice("%s: attempt to write non page aligned data\n",
4273 /* Check if the region is secured */
4274 if (nand_region_is_secured(chip, to, writelen))
4277 column = to & (mtd->writesize - 1);
4279 chipnr = (int)(to >> chip->chip_shift);
4280 nand_select_target(chip, chipnr);
4282 /* Check, if it is write protected */
4283 if (nand_check_wp(chip)) {
4288 realpage = (int)(to >> chip->page_shift);
4289 page = realpage & chip->pagemask;
4291 /* Invalidate the page cache, when we write to the cached page */
4292 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4293 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4294 chip->pagecache.page = -1;
4296 /* Don't allow multipage oob writes with offset */
4297 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4303 int bytes = mtd->writesize;
4304 uint8_t *wbuf = buf;
4306 int part_pagewr = (column || writelen < mtd->writesize);
4310 else if (chip->options & NAND_USES_DMA)
4311 use_bounce_buf = !virt_addr_valid(buf) ||
4312 !IS_ALIGNED((unsigned long)buf,
4318 * Copy the data from the initial buffer when doing partial page
4319 * writes or when a bounce buffer is required.
4321 if (use_bounce_buf) {
4322 pr_debug("%s: using write bounce buffer for buf@%p\n",
4325 bytes = min_t(int, bytes - column, writelen);
4326 wbuf = nand_get_data_buf(chip);
4327 memset(wbuf, 0xff, mtd->writesize);
4328 memcpy(&wbuf[column], buf, bytes);
4331 if (unlikely(oob)) {
4332 size_t len = min(oobwritelen, oobmaxlen);
4333 oob = nand_fill_oob(chip, oob, len, ops);
4336 /* We still need to erase leftover OOB data */
4337 memset(chip->oob_poi, 0xff, mtd->oobsize);
4340 ret = nand_write_page(chip, column, bytes, wbuf,
4342 (ops->mode == MTD_OPS_RAW));
4354 page = realpage & chip->pagemask;
4355 /* Check, if we cross a chip boundary */
4358 nand_deselect_target(chip);
4359 nand_select_target(chip, chipnr);
4363 ops->retlen = ops->len - writelen;
4365 ops->oobretlen = ops->ooblen;
4368 nand_deselect_target(chip);
4373 * panic_nand_write - [MTD Interface] NAND write with ECC
4374 * @mtd: MTD device structure
4375 * @to: offset to write to
4376 * @len: number of bytes to write
4377 * @retlen: pointer to variable to store the number of written bytes
4378 * @buf: the data to write
4380 * NAND write with ECC. Used when performing writes in interrupt context, this
4381 * may for example be called by mtdoops when writing an oops while in panic.
4383 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4384 size_t *retlen, const uint8_t *buf)
4386 struct nand_chip *chip = mtd_to_nand(mtd);
4387 int chipnr = (int)(to >> chip->chip_shift);
4388 struct mtd_oob_ops ops;
4391 nand_select_target(chip, chipnr);
4393 /* Wait for the device to get ready */
4394 panic_nand_wait(chip, 400);
4396 memset(&ops, 0, sizeof(ops));
4398 ops.datbuf = (uint8_t *)buf;
4399 ops.mode = MTD_OPS_PLACE_OOB;
4401 ret = nand_do_write_ops(chip, to, &ops);
4403 *retlen = ops.retlen;
4408 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4409 * @mtd: MTD device structure
4410 * @to: offset to write to
4411 * @ops: oob operation description structure
4413 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4414 struct mtd_oob_ops *ops)
4416 struct nand_chip *chip = mtd_to_nand(mtd);
4421 nand_get_device(chip);
4423 switch (ops->mode) {
4424 case MTD_OPS_PLACE_OOB:
4425 case MTD_OPS_AUTO_OOB:
4434 ret = nand_do_write_oob(chip, to, ops);
4436 ret = nand_do_write_ops(chip, to, ops);
4439 nand_release_device(chip);
4444 * nand_erase - [MTD Interface] erase block(s)
4445 * @mtd: MTD device structure
4446 * @instr: erase instruction
4448 * Erase one ore more blocks.
4450 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4452 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4456 * nand_erase_nand - [INTERN] erase block(s)
4457 * @chip: NAND chip object
4458 * @instr: erase instruction
4459 * @allowbbt: allow erasing the bbt area
4461 * Erase one ore more blocks.
4463 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4466 int page, pages_per_block, ret, chipnr;
4469 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4470 __func__, (unsigned long long)instr->addr,
4471 (unsigned long long)instr->len);
4473 if (check_offs_len(chip, instr->addr, instr->len))
4476 /* Check if the region is secured */
4477 if (nand_region_is_secured(chip, instr->addr, instr->len))
4480 /* Grab the lock and see if the device is available */
4481 nand_get_device(chip);
4483 /* Shift to get first page */
4484 page = (int)(instr->addr >> chip->page_shift);
4485 chipnr = (int)(instr->addr >> chip->chip_shift);
4487 /* Calculate pages in each block */
4488 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4490 /* Select the NAND device */
4491 nand_select_target(chip, chipnr);
4493 /* Check, if it is write protected */
4494 if (nand_check_wp(chip)) {
4495 pr_debug("%s: device is write protected!\n",
4501 /* Loop through the pages */
4505 /* Check if we have a bad block, we do not erase bad blocks! */
4506 if (nand_block_checkbad(chip, ((loff_t) page) <<
4507 chip->page_shift, allowbbt)) {
4508 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4515 * Invalidate the page cache, if we erase the block which
4516 * contains the current cached page.
4518 if (page <= chip->pagecache.page && chip->pagecache.page <
4519 (page + pages_per_block))
4520 chip->pagecache.page = -1;
4522 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4523 (chip->phys_erase_shift - chip->page_shift));
4525 pr_debug("%s: failed erase, page 0x%08x\n",
4528 ((loff_t)page << chip->page_shift);
4532 /* Increment page address and decrement length */
4533 len -= (1ULL << chip->phys_erase_shift);
4534 page += pages_per_block;
4536 /* Check, if we cross a chip boundary */
4537 if (len && !(page & chip->pagemask)) {
4539 nand_deselect_target(chip);
4540 nand_select_target(chip, chipnr);
4547 /* Deselect and wake up anyone waiting on the device */
4548 nand_deselect_target(chip);
4549 nand_release_device(chip);
4551 /* Return more or less happy */
4556 * nand_sync - [MTD Interface] sync
4557 * @mtd: MTD device structure
4559 * Sync is actually a wait for chip ready function.
4561 static void nand_sync(struct mtd_info *mtd)
4563 struct nand_chip *chip = mtd_to_nand(mtd);
4565 pr_debug("%s: called\n", __func__);
4567 /* Grab the lock and see if the device is available */
4568 nand_get_device(chip);
4569 /* Release it and go back */
4570 nand_release_device(chip);
4574 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4575 * @mtd: MTD device structure
4576 * @offs: offset relative to mtd start
4578 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4580 struct nand_chip *chip = mtd_to_nand(mtd);
4581 int chipnr = (int)(offs >> chip->chip_shift);
4584 /* Select the NAND device */
4585 nand_get_device(chip);
4587 nand_select_target(chip, chipnr);
4589 ret = nand_block_checkbad(chip, offs, 0);
4591 nand_deselect_target(chip);
4592 nand_release_device(chip);
4598 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4599 * @mtd: MTD device structure
4600 * @ofs: offset relative to mtd start
4602 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4606 ret = nand_block_isbad(mtd, ofs);
4608 /* If it was bad already, return success and do nothing */
4614 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4618 * nand_suspend - [MTD Interface] Suspend the NAND flash
4619 * @mtd: MTD device structure
4621 * Returns 0 for success or negative error code otherwise.
4623 static int nand_suspend(struct mtd_info *mtd)
4625 struct nand_chip *chip = mtd_to_nand(mtd);
4628 mutex_lock(&chip->lock);
4629 if (chip->ops.suspend)
4630 ret = chip->ops.suspend(chip);
4632 chip->suspended = 1;
4633 mutex_unlock(&chip->lock);
4639 * nand_resume - [MTD Interface] Resume the NAND flash
4640 * @mtd: MTD device structure
4642 static void nand_resume(struct mtd_info *mtd)
4644 struct nand_chip *chip = mtd_to_nand(mtd);
4646 mutex_lock(&chip->lock);
4647 if (chip->suspended) {
4648 if (chip->ops.resume)
4649 chip->ops.resume(chip);
4650 chip->suspended = 0;
4652 pr_err("%s called for a chip which is not in suspended state\n",
4655 mutex_unlock(&chip->lock);
4657 wake_up_all(&chip->resume_wq);
4661 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4662 * prevent further operations
4663 * @mtd: MTD device structure
4665 static void nand_shutdown(struct mtd_info *mtd)
4671 * nand_lock - [MTD Interface] Lock the NAND flash
4672 * @mtd: MTD device structure
4673 * @ofs: offset byte address
4674 * @len: number of bytes to lock (must be a multiple of block/page size)
4676 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4678 struct nand_chip *chip = mtd_to_nand(mtd);
4680 if (!chip->ops.lock_area)
4683 return chip->ops.lock_area(chip, ofs, len);
4687 * nand_unlock - [MTD Interface] Unlock the NAND flash
4688 * @mtd: MTD device structure
4689 * @ofs: offset byte address
4690 * @len: number of bytes to unlock (must be a multiple of block/page size)
4692 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4694 struct nand_chip *chip = mtd_to_nand(mtd);
4696 if (!chip->ops.unlock_area)
4699 return chip->ops.unlock_area(chip, ofs, len);
4702 /* Set default functions */
4703 static void nand_set_defaults(struct nand_chip *chip)
4705 /* If no controller is provided, use the dummy, legacy one. */
4706 if (!chip->controller) {
4707 chip->controller = &chip->legacy.dummy_controller;
4708 nand_controller_init(chip->controller);
4711 nand_legacy_set_defaults(chip);
4713 if (!chip->buf_align)
4714 chip->buf_align = 1;
4717 /* Sanitize ONFI strings so we can safely print them */
4718 void sanitize_string(uint8_t *s, size_t len)
4722 /* Null terminate */
4725 /* Remove non printable chars */
4726 for (i = 0; i < len - 1; i++) {
4727 if (s[i] < ' ' || s[i] > 127)
4731 /* Remove trailing spaces */
4736 * nand_id_has_period - Check if an ID string has a given wraparound period
4737 * @id_data: the ID string
4738 * @arrlen: the length of the @id_data array
4739 * @period: the period of repitition
4741 * Check if an ID string is repeated within a given sequence of bytes at
4742 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4743 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4744 * if the repetition has a period of @period; otherwise, returns zero.
4746 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4749 for (i = 0; i < period; i++)
4750 for (j = i + period; j < arrlen; j += period)
4751 if (id_data[i] != id_data[j])
4757 * nand_id_len - Get the length of an ID string returned by CMD_READID
4758 * @id_data: the ID string
4759 * @arrlen: the length of the @id_data array
4761 * Returns the length of the ID string, according to known wraparound/trailing
4762 * zero patterns. If no pattern exists, returns the length of the array.
4764 static int nand_id_len(u8 *id_data, int arrlen)
4766 int last_nonzero, period;
4768 /* Find last non-zero byte */
4769 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4770 if (id_data[last_nonzero])
4774 if (last_nonzero < 0)
4777 /* Calculate wraparound period */
4778 for (period = 1; period < arrlen; period++)
4779 if (nand_id_has_period(id_data, arrlen, period))
4782 /* There's a repeated pattern */
4783 if (period < arrlen)
4786 /* There are trailing zeros */
4787 if (last_nonzero < arrlen - 1)
4788 return last_nonzero + 1;
4790 /* No pattern detected */
4794 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4795 static int nand_get_bits_per_cell(u8 cellinfo)
4799 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4800 bits >>= NAND_CI_CELLTYPE_SHIFT;
4805 * Many new NAND share similar device ID codes, which represent the size of the
4806 * chip. The rest of the parameters must be decoded according to generic or
4807 * manufacturer-specific "extended ID" decoding patterns.
4809 void nand_decode_ext_id(struct nand_chip *chip)
4811 struct nand_memory_organization *memorg;
4812 struct mtd_info *mtd = nand_to_mtd(chip);
4814 u8 *id_data = chip->id.data;
4816 memorg = nanddev_get_memorg(&chip->base);
4818 /* The 3rd id byte holds MLC / multichip data */
4819 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4820 /* The 4th id byte is the important one */
4824 memorg->pagesize = 1024 << (extid & 0x03);
4825 mtd->writesize = memorg->pagesize;
4828 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4829 mtd->oobsize = memorg->oobsize;
4831 /* Calc blocksize. Blocksize is multiples of 64KiB */
4832 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4834 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4836 /* Get buswidth information */
4838 chip->options |= NAND_BUSWIDTH_16;
4840 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4843 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4844 * decodes a matching ID table entry and assigns the MTD size parameters for
4847 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4849 struct mtd_info *mtd = nand_to_mtd(chip);
4850 struct nand_memory_organization *memorg;
4852 memorg = nanddev_get_memorg(&chip->base);
4854 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4855 mtd->erasesize = type->erasesize;
4856 memorg->pagesize = type->pagesize;
4857 mtd->writesize = memorg->pagesize;
4858 memorg->oobsize = memorg->pagesize / 32;
4859 mtd->oobsize = memorg->oobsize;
4861 /* All legacy ID NAND are small-page, SLC */
4862 memorg->bits_per_cell = 1;
4866 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4867 * heuristic patterns using various detected parameters (e.g., manufacturer,
4868 * page size, cell-type information).
4870 static void nand_decode_bbm_options(struct nand_chip *chip)
4872 struct mtd_info *mtd = nand_to_mtd(chip);
4874 /* Set the bad block position */
4875 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4876 chip->badblockpos = NAND_BBM_POS_LARGE;
4878 chip->badblockpos = NAND_BBM_POS_SMALL;
4881 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4883 return type->id_len;
4886 static bool find_full_id_nand(struct nand_chip *chip,
4887 struct nand_flash_dev *type)
4889 struct nand_device *base = &chip->base;
4890 struct nand_ecc_props requirements;
4891 struct mtd_info *mtd = nand_to_mtd(chip);
4892 struct nand_memory_organization *memorg;
4893 u8 *id_data = chip->id.data;
4895 memorg = nanddev_get_memorg(&chip->base);
4897 if (!strncmp(type->id, id_data, type->id_len)) {
4898 memorg->pagesize = type->pagesize;
4899 mtd->writesize = memorg->pagesize;
4900 memorg->pages_per_eraseblock = type->erasesize /
4902 mtd->erasesize = type->erasesize;
4903 memorg->oobsize = type->oobsize;
4904 mtd->oobsize = memorg->oobsize;
4906 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4907 memorg->eraseblocks_per_lun =
4908 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4910 memorg->pages_per_eraseblock);
4911 chip->options |= type->options;
4912 requirements.strength = NAND_ECC_STRENGTH(type);
4913 requirements.step_size = NAND_ECC_STEP(type);
4914 nanddev_set_ecc_requirements(base, &requirements);
4916 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4917 if (!chip->parameters.model)
4926 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4927 * compliant and does not have a full-id or legacy-id entry in the nand_ids
4930 static void nand_manufacturer_detect(struct nand_chip *chip)
4933 * Try manufacturer detection if available and use
4934 * nand_decode_ext_id() otherwise.
4936 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4937 chip->manufacturer.desc->ops->detect) {
4938 struct nand_memory_organization *memorg;
4940 memorg = nanddev_get_memorg(&chip->base);
4942 /* The 3rd id byte holds MLC / multichip data */
4943 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4944 chip->manufacturer.desc->ops->detect(chip);
4946 nand_decode_ext_id(chip);
4951 * Manufacturer initialization. This function is called for all NANDs including
4952 * ONFI and JEDEC compliant ones.
4953 * Manufacturer drivers should put all their specific initialization code in
4954 * their ->init() hook.
4956 static int nand_manufacturer_init(struct nand_chip *chip)
4958 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4959 !chip->manufacturer.desc->ops->init)
4962 return chip->manufacturer.desc->ops->init(chip);
4966 * Manufacturer cleanup. This function is called for all NANDs including
4967 * ONFI and JEDEC compliant ones.
4968 * Manufacturer drivers should put all their specific cleanup code in their
4971 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4973 /* Release manufacturer private data */
4974 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4975 chip->manufacturer.desc->ops->cleanup)
4976 chip->manufacturer.desc->ops->cleanup(chip);
4980 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4982 return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4986 * Get the flash and manufacturer id and lookup if the type is supported.
4988 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4990 const struct nand_manufacturer_desc *manufacturer_desc;
4991 struct mtd_info *mtd = nand_to_mtd(chip);
4992 struct nand_memory_organization *memorg;
4994 u8 *id_data = chip->id.data;
4999 * Let's start by initializing memorg fields that might be left
5000 * unassigned by the ID-based detection logic.
5002 memorg = nanddev_get_memorg(&chip->base);
5003 memorg->planes_per_lun = 1;
5004 memorg->luns_per_target = 1;
5007 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5010 ret = nand_reset(chip, 0);
5014 /* Select the device */
5015 nand_select_target(chip, 0);
5017 /* Send the command for reading device ID */
5018 ret = nand_readid_op(chip, 0, id_data, 2);
5022 /* Read manufacturer and device IDs */
5023 maf_id = id_data[0];
5024 dev_id = id_data[1];
5027 * Try again to make sure, as some systems the bus-hold or other
5028 * interface concerns can cause random data which looks like a
5029 * possibly credible NAND flash to appear. If the two results do
5030 * not match, ignore the device completely.
5033 /* Read entire ID string */
5034 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5038 if (id_data[0] != maf_id || id_data[1] != dev_id) {
5039 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5040 maf_id, dev_id, id_data[0], id_data[1]);
5044 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5046 /* Try to identify manufacturer */
5047 manufacturer_desc = nand_get_manufacturer_desc(maf_id);
5048 chip->manufacturer.desc = manufacturer_desc;
5051 type = nand_flash_ids;
5054 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5056 * This is required to make sure initial NAND bus width set by the
5057 * NAND controller driver is coherent with the real NAND bus width
5058 * (extracted by auto-detection code).
5060 busw = chip->options & NAND_BUSWIDTH_16;
5063 * The flag is only set (never cleared), reset it to its default value
5064 * before starting auto-detection.
5066 chip->options &= ~NAND_BUSWIDTH_16;
5068 for (; type->name != NULL; type++) {
5069 if (is_full_id_nand(type)) {
5070 if (find_full_id_nand(chip, type))
5072 } else if (dev_id == type->dev_id) {
5077 if (!type->name || !type->pagesize) {
5078 /* Check if the chip is ONFI compliant */
5079 ret = nand_onfi_detect(chip);
5085 /* Check if the chip is JEDEC compliant */
5086 ret = nand_jedec_detect(chip);
5096 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
5097 if (!chip->parameters.model)
5100 if (!type->pagesize)
5101 nand_manufacturer_detect(chip);
5103 nand_decode_id(chip, type);
5105 /* Get chip options */
5106 chip->options |= type->options;
5108 memorg->eraseblocks_per_lun =
5109 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
5111 memorg->pages_per_eraseblock);
5115 mtd->name = chip->parameters.model;
5117 if (chip->options & NAND_BUSWIDTH_AUTO) {
5118 WARN_ON(busw & NAND_BUSWIDTH_16);
5119 nand_set_defaults(chip);
5120 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5122 * Check, if buswidth is correct. Hardware drivers should set
5125 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5127 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5129 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5130 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5133 goto free_detect_allocation;
5136 nand_decode_bbm_options(chip);
5138 /* Calculate the address shift from the page size */
5139 chip->page_shift = ffs(mtd->writesize) - 1;
5140 /* Convert chipsize to number of pages per chip -1 */
5141 targetsize = nanddev_target_size(&chip->base);
5142 chip->pagemask = (targetsize >> chip->page_shift) - 1;
5144 chip->bbt_erase_shift = chip->phys_erase_shift =
5145 ffs(mtd->erasesize) - 1;
5146 if (targetsize & 0xffffffff)
5147 chip->chip_shift = ffs((unsigned)targetsize) - 1;
5149 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
5150 chip->chip_shift += 32 - 1;
5153 if (chip->chip_shift - chip->page_shift > 16)
5154 chip->options |= NAND_ROW_ADDR_3;
5156 chip->badblockbits = 8;
5158 nand_legacy_adjust_cmdfunc(chip);
5160 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5162 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5163 chip->parameters.model);
5164 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5165 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5166 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5169 free_detect_allocation:
5170 kfree(chip->parameters.model);
5175 static enum nand_ecc_engine_type
5176 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
5178 enum nand_ecc_legacy_mode {
5184 NAND_ECC_HW_SYNDROME,
5187 const char * const nand_ecc_legacy_modes[] = {
5188 [NAND_ECC_NONE] = "none",
5189 [NAND_ECC_SOFT] = "soft",
5190 [NAND_ECC_SOFT_BCH] = "soft_bch",
5191 [NAND_ECC_HW] = "hw",
5192 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5193 [NAND_ECC_ON_DIE] = "on-die",
5195 enum nand_ecc_legacy_mode eng_type;
5199 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5201 return NAND_ECC_ENGINE_TYPE_INVALID;
5203 for (eng_type = NAND_ECC_NONE;
5204 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
5205 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
5208 return NAND_ECC_ENGINE_TYPE_NONE;
5210 case NAND_ECC_SOFT_BCH:
5211 return NAND_ECC_ENGINE_TYPE_SOFT;
5213 case NAND_ECC_HW_SYNDROME:
5214 return NAND_ECC_ENGINE_TYPE_ON_HOST;
5215 case NAND_ECC_ON_DIE:
5216 return NAND_ECC_ENGINE_TYPE_ON_DIE;
5223 return NAND_ECC_ENGINE_TYPE_INVALID;
5226 static enum nand_ecc_placement
5227 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
5232 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5234 if (!strcasecmp(pm, "hw_syndrome"))
5235 return NAND_ECC_PLACEMENT_INTERLEAVED;
5238 return NAND_ECC_PLACEMENT_UNKNOWN;
5241 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
5246 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5248 if (!strcasecmp(pm, "soft"))
5249 return NAND_ECC_ALGO_HAMMING;
5250 else if (!strcasecmp(pm, "soft_bch"))
5251 return NAND_ECC_ALGO_BCH;
5254 return NAND_ECC_ALGO_UNKNOWN;
5257 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
5259 struct device_node *dn = nand_get_flash_node(chip);
5260 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
5262 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5263 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
5265 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
5266 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
5268 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
5269 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
5272 static int of_get_nand_bus_width(struct nand_chip *chip)
5274 struct device_node *dn = nand_get_flash_node(chip);
5278 ret = of_property_read_u32(dn, "nand-bus-width", &val);
5280 /* Buswidth defaults to 8 if the property does not exist .*/
5286 chip->options |= NAND_BUSWIDTH_16;
5292 static int of_get_nand_secure_regions(struct nand_chip *chip)
5294 struct device_node *dn = nand_get_flash_node(chip);
5295 struct property *prop;
5298 /* Only proceed if the "secure-regions" property is present in DT */
5299 prop = of_find_property(dn, "secure-regions", NULL);
5303 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
5307 chip->nr_secure_regions = nr_elem / 2;
5308 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
5310 if (!chip->secure_regions)
5313 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
5314 of_property_read_u64_index(dn, "secure-regions", j,
5315 &chip->secure_regions[i].offset);
5316 of_property_read_u64_index(dn, "secure-regions", j + 1,
5317 &chip->secure_regions[i].size);
5324 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
5325 * @dev: Device that will be parsed. Also used for managed allocations.
5326 * @cs_array: Array of GPIO desc pointers allocated on success
5327 * @ncs_array: Number of entries in @cs_array updated on success.
5328 * @return 0 on success, an error otherwise.
5330 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
5331 unsigned int *ncs_array)
5333 struct device_node *np = dev->of_node;
5334 struct gpio_desc **descs;
5337 ndescs = of_gpio_named_count(np, "cs-gpios");
5339 dev_dbg(dev, "No valid cs-gpios property\n");
5343 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
5347 for (i = 0; i < ndescs; i++) {
5348 descs[i] = gpiod_get_index_optional(dev, "cs", i,
5350 if (IS_ERR(descs[i]))
5351 return PTR_ERR(descs[i]);
5354 *ncs_array = ndescs;
5359 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
5361 static int rawnand_dt_init(struct nand_chip *chip)
5363 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5364 struct device_node *dn = nand_get_flash_node(chip);
5370 ret = of_get_nand_bus_width(chip);
5374 if (of_property_read_bool(dn, "nand-is-boot-medium"))
5375 chip->options |= NAND_IS_BOOT_MEDIUM;
5377 if (of_property_read_bool(dn, "nand-on-flash-bbt"))
5378 chip->bbt_options |= NAND_BBT_USE_FLASH;
5380 of_get_nand_ecc_user_config(nand);
5381 of_get_nand_ecc_legacy_user_config(chip);
5384 * If neither the user nor the NAND controller have requested a specific
5385 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5387 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5390 * Use the user requested engine type, unless there is none, in this
5391 * case default to the NAND controller choice, otherwise fallback to
5392 * the raw NAND default one.
5394 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5395 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5396 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5397 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5399 chip->ecc.placement = nand->ecc.user_conf.placement;
5400 chip->ecc.algo = nand->ecc.user_conf.algo;
5401 chip->ecc.strength = nand->ecc.user_conf.strength;
5402 chip->ecc.size = nand->ecc.user_conf.step_size;
5408 * nand_scan_ident - Scan for the NAND device
5409 * @chip: NAND chip object
5410 * @maxchips: number of chips to scan for
5411 * @table: alternative NAND ID table
5413 * This is the first phase of the normal nand_scan() function. It reads the
5414 * flash ID and sets up MTD fields accordingly.
5416 * This helper used to be called directly from controller drivers that needed
5417 * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5418 * prevented dynamic allocations during this phase which was unconvenient and
5419 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5421 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5422 struct nand_flash_dev *table)
5424 struct mtd_info *mtd = nand_to_mtd(chip);
5425 struct nand_memory_organization *memorg;
5426 int nand_maf_id, nand_dev_id;
5430 memorg = nanddev_get_memorg(&chip->base);
5432 /* Assume all dies are deselected when we enter nand_scan_ident(). */
5435 mutex_init(&chip->lock);
5436 init_waitqueue_head(&chip->resume_wq);
5438 /* Enforce the right timings for reset/detection */
5439 chip->current_interface_config = nand_get_reset_interface_config();
5441 ret = rawnand_dt_init(chip);
5445 if (!mtd->name && mtd->dev.parent)
5446 mtd->name = dev_name(mtd->dev.parent);
5448 /* Set the default functions */
5449 nand_set_defaults(chip);
5451 ret = nand_legacy_check_hooks(chip);
5455 memorg->ntargets = maxchips;
5457 /* Read the flash type */
5458 ret = nand_detect(chip, table);
5460 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5461 pr_warn("No NAND device found\n");
5462 nand_deselect_target(chip);
5466 nand_maf_id = chip->id.data[0];
5467 nand_dev_id = chip->id.data[1];
5469 nand_deselect_target(chip);
5471 /* Check for a chip array */
5472 for (i = 1; i < maxchips; i++) {
5475 /* See comment in nand_get_flash_type for reset */
5476 ret = nand_reset(chip, i);
5480 nand_select_target(chip, i);
5481 /* Send the command for reading device ID */
5482 ret = nand_readid_op(chip, 0, id, sizeof(id));
5485 /* Read manufacturer and device IDs */
5486 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5487 nand_deselect_target(chip);
5490 nand_deselect_target(chip);
5493 pr_info("%d chips detected\n", i);
5495 /* Store the number of chips and calc total size for mtd */
5496 memorg->ntargets = i;
5497 mtd->size = i * nanddev_target_size(&chip->base);
5502 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5504 kfree(chip->parameters.model);
5505 kfree(chip->parameters.onfi);
5508 int rawnand_sw_hamming_init(struct nand_chip *chip)
5510 struct nand_ecc_sw_hamming_conf *engine_conf;
5511 struct nand_device *base = &chip->base;
5514 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5515 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
5516 base->ecc.user_conf.strength = chip->ecc.strength;
5517 base->ecc.user_conf.step_size = chip->ecc.size;
5519 ret = nand_ecc_sw_hamming_init_ctx(base);
5523 engine_conf = base->ecc.ctx.priv;
5525 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
5526 engine_conf->sm_order = true;
5528 chip->ecc.size = base->ecc.ctx.conf.step_size;
5529 chip->ecc.strength = base->ecc.ctx.conf.strength;
5530 chip->ecc.total = base->ecc.ctx.total;
5531 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5532 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5536 EXPORT_SYMBOL(rawnand_sw_hamming_init);
5538 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
5539 const unsigned char *buf,
5540 unsigned char *code)
5542 struct nand_device *base = &chip->base;
5544 return nand_ecc_sw_hamming_calculate(base, buf, code);
5546 EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
5548 int rawnand_sw_hamming_correct(struct nand_chip *chip,
5550 unsigned char *read_ecc,
5551 unsigned char *calc_ecc)
5553 struct nand_device *base = &chip->base;
5555 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
5557 EXPORT_SYMBOL(rawnand_sw_hamming_correct);
5559 void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
5561 struct nand_device *base = &chip->base;
5563 nand_ecc_sw_hamming_cleanup_ctx(base);
5565 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
5567 int rawnand_sw_bch_init(struct nand_chip *chip)
5569 struct nand_device *base = &chip->base;
5570 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
5573 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5574 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
5575 base->ecc.user_conf.step_size = chip->ecc.size;
5576 base->ecc.user_conf.strength = chip->ecc.strength;
5578 ret = nand_ecc_sw_bch_init_ctx(base);
5582 chip->ecc.size = ecc_conf->step_size;
5583 chip->ecc.strength = ecc_conf->strength;
5584 chip->ecc.total = base->ecc.ctx.total;
5585 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5586 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5590 EXPORT_SYMBOL(rawnand_sw_bch_init);
5592 static int rawnand_sw_bch_calculate(struct nand_chip *chip,
5593 const unsigned char *buf,
5594 unsigned char *code)
5596 struct nand_device *base = &chip->base;
5598 return nand_ecc_sw_bch_calculate(base, buf, code);
5601 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
5602 unsigned char *read_ecc, unsigned char *calc_ecc)
5604 struct nand_device *base = &chip->base;
5606 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
5608 EXPORT_SYMBOL(rawnand_sw_bch_correct);
5610 void rawnand_sw_bch_cleanup(struct nand_chip *chip)
5612 struct nand_device *base = &chip->base;
5614 nand_ecc_sw_bch_cleanup_ctx(base);
5616 EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
5618 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5620 struct nand_ecc_ctrl *ecc = &chip->ecc;
5622 switch (ecc->placement) {
5623 case NAND_ECC_PLACEMENT_UNKNOWN:
5624 case NAND_ECC_PLACEMENT_OOB:
5625 /* Use standard hwecc read page function? */
5626 if (!ecc->read_page)
5627 ecc->read_page = nand_read_page_hwecc;
5628 if (!ecc->write_page)
5629 ecc->write_page = nand_write_page_hwecc;
5630 if (!ecc->read_page_raw)
5631 ecc->read_page_raw = nand_read_page_raw;
5632 if (!ecc->write_page_raw)
5633 ecc->write_page_raw = nand_write_page_raw;
5635 ecc->read_oob = nand_read_oob_std;
5636 if (!ecc->write_oob)
5637 ecc->write_oob = nand_write_oob_std;
5638 if (!ecc->read_subpage)
5639 ecc->read_subpage = nand_read_subpage;
5640 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5641 ecc->write_subpage = nand_write_subpage_hwecc;
5644 case NAND_ECC_PLACEMENT_INTERLEAVED:
5645 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5647 ecc->read_page == nand_read_page_hwecc ||
5649 ecc->write_page == nand_write_page_hwecc)) {
5650 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5653 /* Use standard syndrome read/write page function? */
5654 if (!ecc->read_page)
5655 ecc->read_page = nand_read_page_syndrome;
5656 if (!ecc->write_page)
5657 ecc->write_page = nand_write_page_syndrome;
5658 if (!ecc->read_page_raw)
5659 ecc->read_page_raw = nand_read_page_raw_syndrome;
5660 if (!ecc->write_page_raw)
5661 ecc->write_page_raw = nand_write_page_raw_syndrome;
5663 ecc->read_oob = nand_read_oob_syndrome;
5664 if (!ecc->write_oob)
5665 ecc->write_oob = nand_write_oob_syndrome;
5669 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5677 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5679 struct mtd_info *mtd = nand_to_mtd(chip);
5680 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5681 struct nand_ecc_ctrl *ecc = &chip->ecc;
5684 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5687 switch (ecc->algo) {
5688 case NAND_ECC_ALGO_HAMMING:
5689 ecc->calculate = rawnand_sw_hamming_calculate;
5690 ecc->correct = rawnand_sw_hamming_correct;
5691 ecc->read_page = nand_read_page_swecc;
5692 ecc->read_subpage = nand_read_subpage;
5693 ecc->write_page = nand_write_page_swecc;
5694 if (!ecc->read_page_raw)
5695 ecc->read_page_raw = nand_read_page_raw;
5696 if (!ecc->write_page_raw)
5697 ecc->write_page_raw = nand_write_page_raw;
5698 ecc->read_oob = nand_read_oob_std;
5699 ecc->write_oob = nand_write_oob_std;
5705 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5706 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5708 ret = rawnand_sw_hamming_init(chip);
5710 WARN(1, "Hamming ECC initialization failed!\n");
5715 case NAND_ECC_ALGO_BCH:
5716 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
5717 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5720 ecc->calculate = rawnand_sw_bch_calculate;
5721 ecc->correct = rawnand_sw_bch_correct;
5722 ecc->read_page = nand_read_page_swecc;
5723 ecc->read_subpage = nand_read_subpage;
5724 ecc->write_page = nand_write_page_swecc;
5725 if (!ecc->read_page_raw)
5726 ecc->read_page_raw = nand_read_page_raw;
5727 if (!ecc->write_page_raw)
5728 ecc->write_page_raw = nand_write_page_raw;
5729 ecc->read_oob = nand_read_oob_std;
5730 ecc->write_oob = nand_write_oob_std;
5733 * We can only maximize ECC config when the default layout is
5734 * used, otherwise we don't know how many bytes can really be
5737 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
5738 mtd->ooblayout != nand_get_large_page_ooblayout())
5739 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
5741 ret = rawnand_sw_bch_init(chip);
5743 WARN(1, "BCH ECC initialization failed!\n");
5749 WARN(1, "Unsupported ECC algorithm!\n");
5755 * nand_check_ecc_caps - check the sanity of preset ECC settings
5756 * @chip: nand chip info structure
5757 * @caps: ECC caps info structure
5758 * @oobavail: OOB size that the ECC engine can use
5760 * When ECC step size and strength are already set, check if they are supported
5761 * by the controller and the calculated ECC bytes fit within the chip's OOB.
5762 * On success, the calculated ECC bytes is set.
5765 nand_check_ecc_caps(struct nand_chip *chip,
5766 const struct nand_ecc_caps *caps, int oobavail)
5768 struct mtd_info *mtd = nand_to_mtd(chip);
5769 const struct nand_ecc_step_info *stepinfo;
5770 int preset_step = chip->ecc.size;
5771 int preset_strength = chip->ecc.strength;
5772 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5775 for (i = 0; i < caps->nstepinfos; i++) {
5776 stepinfo = &caps->stepinfos[i];
5778 if (stepinfo->stepsize != preset_step)
5781 for (j = 0; j < stepinfo->nstrengths; j++) {
5782 if (stepinfo->strengths[j] != preset_strength)
5785 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5787 if (WARN_ON_ONCE(ecc_bytes < 0))
5790 if (ecc_bytes * nsteps > oobavail) {
5791 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5792 preset_step, preset_strength);
5796 chip->ecc.bytes = ecc_bytes;
5802 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5803 preset_step, preset_strength);
5809 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5810 * @chip: nand chip info structure
5811 * @caps: ECC engine caps info structure
5812 * @oobavail: OOB size that the ECC engine can use
5814 * If a chip's ECC requirement is provided, try to meet it with the least
5815 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5816 * On success, the chosen ECC settings are set.
5819 nand_match_ecc_req(struct nand_chip *chip,
5820 const struct nand_ecc_caps *caps, int oobavail)
5822 const struct nand_ecc_props *requirements =
5823 nanddev_get_ecc_requirements(&chip->base);
5824 struct mtd_info *mtd = nand_to_mtd(chip);
5825 const struct nand_ecc_step_info *stepinfo;
5826 int req_step = requirements->step_size;
5827 int req_strength = requirements->strength;
5828 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5829 int best_step, best_strength, best_ecc_bytes;
5830 int best_ecc_bytes_total = INT_MAX;
5833 /* No information provided by the NAND chip */
5834 if (!req_step || !req_strength)
5837 /* number of correctable bits the chip requires in a page */
5838 req_corr = mtd->writesize / req_step * req_strength;
5840 for (i = 0; i < caps->nstepinfos; i++) {
5841 stepinfo = &caps->stepinfos[i];
5842 step_size = stepinfo->stepsize;
5844 for (j = 0; j < stepinfo->nstrengths; j++) {
5845 strength = stepinfo->strengths[j];
5848 * If both step size and strength are smaller than the
5849 * chip's requirement, it is not easy to compare the
5850 * resulted reliability.
5852 if (step_size < req_step && strength < req_strength)
5855 if (mtd->writesize % step_size)
5858 nsteps = mtd->writesize / step_size;
5860 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5861 if (WARN_ON_ONCE(ecc_bytes < 0))
5863 ecc_bytes_total = ecc_bytes * nsteps;
5865 if (ecc_bytes_total > oobavail ||
5866 strength * nsteps < req_corr)
5870 * We assume the best is to meet the chip's requrement
5871 * with the least number of ECC bytes.
5873 if (ecc_bytes_total < best_ecc_bytes_total) {
5874 best_ecc_bytes_total = ecc_bytes_total;
5875 best_step = step_size;
5876 best_strength = strength;
5877 best_ecc_bytes = ecc_bytes;
5882 if (best_ecc_bytes_total == INT_MAX)
5885 chip->ecc.size = best_step;
5886 chip->ecc.strength = best_strength;
5887 chip->ecc.bytes = best_ecc_bytes;
5893 * nand_maximize_ecc - choose the max ECC strength available
5894 * @chip: nand chip info structure
5895 * @caps: ECC engine caps info structure
5896 * @oobavail: OOB size that the ECC engine can use
5898 * Choose the max ECC strength that is supported on the controller, and can fit
5899 * within the chip's OOB. On success, the chosen ECC settings are set.
5902 nand_maximize_ecc(struct nand_chip *chip,
5903 const struct nand_ecc_caps *caps, int oobavail)
5905 struct mtd_info *mtd = nand_to_mtd(chip);
5906 const struct nand_ecc_step_info *stepinfo;
5907 int step_size, strength, nsteps, ecc_bytes, corr;
5910 int best_strength, best_ecc_bytes;
5913 for (i = 0; i < caps->nstepinfos; i++) {
5914 stepinfo = &caps->stepinfos[i];
5915 step_size = stepinfo->stepsize;
5917 /* If chip->ecc.size is already set, respect it */
5918 if (chip->ecc.size && step_size != chip->ecc.size)
5921 for (j = 0; j < stepinfo->nstrengths; j++) {
5922 strength = stepinfo->strengths[j];
5924 if (mtd->writesize % step_size)
5927 nsteps = mtd->writesize / step_size;
5929 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5930 if (WARN_ON_ONCE(ecc_bytes < 0))
5933 if (ecc_bytes * nsteps > oobavail)
5936 corr = strength * nsteps;
5939 * If the number of correctable bits is the same,
5940 * bigger step_size has more reliability.
5942 if (corr > best_corr ||
5943 (corr == best_corr && step_size > best_step)) {
5945 best_step = step_size;
5946 best_strength = strength;
5947 best_ecc_bytes = ecc_bytes;
5955 chip->ecc.size = best_step;
5956 chip->ecc.strength = best_strength;
5957 chip->ecc.bytes = best_ecc_bytes;
5963 * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5964 * @chip: nand chip info structure
5965 * @caps: ECC engine caps info structure
5966 * @oobavail: OOB size that the ECC engine can use
5968 * Choose the ECC configuration according to following logic.
5970 * 1. If both ECC step size and ECC strength are already set (usually by DT)
5971 * then check if it is supported by this controller.
5972 * 2. If the user provided the nand-ecc-maximize property, then select maximum
5974 * 3. Otherwise, try to match the ECC step size and ECC strength closest
5975 * to the chip's requirement. If available OOB size can't fit the chip
5976 * requirement then fallback to the maximum ECC step size and ECC strength.
5978 * On success, the chosen ECC settings are set.
5980 int nand_ecc_choose_conf(struct nand_chip *chip,
5981 const struct nand_ecc_caps *caps, int oobavail)
5983 struct mtd_info *mtd = nand_to_mtd(chip);
5984 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5986 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5989 if (chip->ecc.size && chip->ecc.strength)
5990 return nand_check_ecc_caps(chip, caps, oobavail);
5992 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5993 return nand_maximize_ecc(chip, caps, oobavail);
5995 if (!nand_match_ecc_req(chip, caps, oobavail))
5998 return nand_maximize_ecc(chip, caps, oobavail);
6000 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
6002 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
6004 struct nand_chip *chip = container_of(nand, struct nand_chip,
6006 unsigned int eb = nanddev_pos_to_row(nand, pos);
6009 eb >>= nand->rowconv.eraseblock_addr_shift;
6011 nand_select_target(chip, pos->target);
6012 ret = nand_erase_op(chip, eb);
6013 nand_deselect_target(chip);
6018 static int rawnand_markbad(struct nand_device *nand,
6019 const struct nand_pos *pos)
6021 struct nand_chip *chip = container_of(nand, struct nand_chip,
6024 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6027 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
6029 struct nand_chip *chip = container_of(nand, struct nand_chip,
6033 nand_select_target(chip, pos->target);
6034 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6035 nand_deselect_target(chip);
6040 static const struct nand_ops rawnand_ops = {
6041 .erase = rawnand_erase,
6042 .markbad = rawnand_markbad,
6043 .isbad = rawnand_isbad,
6047 * nand_scan_tail - Scan for the NAND device
6048 * @chip: NAND chip object
6050 * This is the second phase of the normal nand_scan() function. It fills out
6051 * all the uninitialized function pointers with the defaults and scans for a
6052 * bad block table if appropriate.
6054 static int nand_scan_tail(struct nand_chip *chip)
6056 struct mtd_info *mtd = nand_to_mtd(chip);
6057 struct nand_ecc_ctrl *ecc = &chip->ecc;
6060 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
6061 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6062 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6066 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6067 if (!chip->data_buf)
6071 * FIXME: some NAND manufacturer drivers expect the first die to be
6072 * selected when manufacturer->init() is called. They should be fixed
6073 * to explictly select the relevant die when interacting with the NAND
6076 nand_select_target(chip, 0);
6077 ret = nand_manufacturer_init(chip);
6078 nand_deselect_target(chip);
6082 /* Set the internal oob buffer location, just after the page data */
6083 chip->oob_poi = chip->data_buf + mtd->writesize;
6086 * If no default placement scheme is given, select an appropriate one.
6088 if (!mtd->ooblayout &&
6089 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6090 ecc->algo == NAND_ECC_ALGO_BCH) &&
6091 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6092 ecc->algo == NAND_ECC_ALGO_HAMMING)) {
6093 switch (mtd->oobsize) {
6096 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
6100 mtd_set_ooblayout(mtd,
6101 nand_get_large_page_hamming_ooblayout());
6105 * Expose the whole OOB area to users if ECC_NONE
6106 * is passed. We could do that for all kind of
6107 * ->oobsize, but we must keep the old large/small
6108 * page with ECC layout when ->oobsize <= 128 for
6109 * compatibility reasons.
6111 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
6112 mtd_set_ooblayout(mtd,
6113 nand_get_large_page_ooblayout());
6117 WARN(1, "No oob scheme defined for oobsize %d\n",
6120 goto err_nand_manuf_cleanup;
6125 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6126 * selected and we have 256 byte pagesize fallback to software ECC
6129 switch (ecc->engine_type) {
6130 case NAND_ECC_ENGINE_TYPE_ON_HOST:
6131 ret = nand_set_ecc_on_host_ops(chip);
6133 goto err_nand_manuf_cleanup;
6135 if (mtd->writesize >= ecc->size) {
6136 if (!ecc->strength) {
6137 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6139 goto err_nand_manuf_cleanup;
6143 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6144 ecc->size, mtd->writesize);
6145 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
6146 ecc->algo = NAND_ECC_ALGO_HAMMING;
6149 case NAND_ECC_ENGINE_TYPE_SOFT:
6150 ret = nand_set_ecc_soft_ops(chip);
6152 goto err_nand_manuf_cleanup;
6155 case NAND_ECC_ENGINE_TYPE_ON_DIE:
6156 if (!ecc->read_page || !ecc->write_page) {
6157 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6159 goto err_nand_manuf_cleanup;
6162 ecc->read_oob = nand_read_oob_std;
6163 if (!ecc->write_oob)
6164 ecc->write_oob = nand_write_oob_std;
6167 case NAND_ECC_ENGINE_TYPE_NONE:
6168 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
6169 ecc->read_page = nand_read_page_raw;
6170 ecc->write_page = nand_write_page_raw;
6171 ecc->read_oob = nand_read_oob_std;
6172 ecc->read_page_raw = nand_read_page_raw;
6173 ecc->write_page_raw = nand_write_page_raw;
6174 ecc->write_oob = nand_write_oob_std;
6175 ecc->size = mtd->writesize;
6181 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
6183 goto err_nand_manuf_cleanup;
6186 if (ecc->correct || ecc->calculate) {
6187 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6188 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6189 if (!ecc->calc_buf || !ecc->code_buf) {
6191 goto err_nand_manuf_cleanup;
6195 /* For many systems, the standard OOB write also works for raw */
6196 if (!ecc->read_oob_raw)
6197 ecc->read_oob_raw = ecc->read_oob;
6198 if (!ecc->write_oob_raw)
6199 ecc->write_oob_raw = ecc->write_oob;
6201 /* propagate ecc info to mtd_info */
6202 mtd->ecc_strength = ecc->strength;
6203 mtd->ecc_step_size = ecc->size;
6206 * Set the number of read / write steps for one page depending on ECC
6210 ecc->steps = mtd->writesize / ecc->size;
6211 if (ecc->steps * ecc->size != mtd->writesize) {
6212 WARN(1, "Invalid ECC parameters\n");
6214 goto err_nand_manuf_cleanup;
6218 ecc->total = ecc->steps * ecc->bytes;
6219 chip->base.ecc.ctx.total = ecc->total;
6222 if (ecc->total > mtd->oobsize) {
6223 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6225 goto err_nand_manuf_cleanup;
6229 * The number of bytes available for a client to place data into
6230 * the out of band area.
6232 ret = mtd_ooblayout_count_freebytes(mtd);
6236 mtd->oobavail = ret;
6238 /* ECC sanity check: warn if it's too weak */
6239 if (!nand_ecc_is_strong_enough(&chip->base))
6240 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
6241 mtd->name, chip->ecc.strength, chip->ecc.size,
6242 nanddev_get_ecc_requirements(&chip->base)->strength,
6243 nanddev_get_ecc_requirements(&chip->base)->step_size);
6245 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6246 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6247 switch (ecc->steps) {
6249 mtd->subpage_sft = 1;
6254 mtd->subpage_sft = 2;
6258 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6260 /* Invalidate the pagebuffer reference */
6261 chip->pagecache.page = -1;
6263 /* Large page NAND with SOFT_ECC should support subpage reads */
6264 switch (ecc->engine_type) {
6265 case NAND_ECC_ENGINE_TYPE_SOFT:
6266 if (chip->page_shift > 9)
6267 chip->options |= NAND_SUBPAGE_READ;
6274 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
6276 goto err_nand_manuf_cleanup;
6278 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
6279 if (chip->options & NAND_ROM)
6280 mtd->flags = MTD_CAP_ROM;
6282 /* Fill in remaining MTD driver data */
6283 mtd->_erase = nand_erase;
6285 mtd->_unpoint = NULL;
6286 mtd->_panic_write = panic_nand_write;
6287 mtd->_read_oob = nand_read_oob;
6288 mtd->_write_oob = nand_write_oob;
6289 mtd->_sync = nand_sync;
6290 mtd->_lock = nand_lock;
6291 mtd->_unlock = nand_unlock;
6292 mtd->_suspend = nand_suspend;
6293 mtd->_resume = nand_resume;
6294 mtd->_reboot = nand_shutdown;
6295 mtd->_block_isreserved = nand_block_isreserved;
6296 mtd->_block_isbad = nand_block_isbad;
6297 mtd->_block_markbad = nand_block_markbad;
6298 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
6301 * Initialize bitflip_threshold to its default prior scan_bbt() call.
6302 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6305 if (!mtd->bitflip_threshold)
6306 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6308 /* Find the fastest data interface for this chip */
6309 ret = nand_choose_interface_config(chip);
6311 goto err_nanddev_cleanup;
6313 /* Enter fastest possible mode on all dies. */
6314 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
6315 ret = nand_setup_interface(chip, i);
6317 goto err_free_interface_config;
6321 * Look for secure regions in the NAND chip. These regions are supposed
6322 * to be protected by a secure element like Trustzone. So the read/write
6323 * accesses to these regions will be blocked in the runtime by this
6326 ret = of_get_nand_secure_regions(chip);
6328 goto err_free_interface_config;
6330 /* Check, if we should skip the bad block table scan */
6331 if (chip->options & NAND_SKIP_BBTSCAN)
6334 /* Build bad block table */
6335 ret = nand_create_bbt(chip);
6337 goto err_free_secure_regions;
6341 err_free_secure_regions:
6342 kfree(chip->secure_regions);
6344 err_free_interface_config:
6345 kfree(chip->best_interface_config);
6347 err_nanddev_cleanup:
6348 nanddev_cleanup(&chip->base);
6350 err_nand_manuf_cleanup:
6351 nand_manufacturer_cleanup(chip);
6354 kfree(chip->data_buf);
6355 kfree(ecc->code_buf);
6356 kfree(ecc->calc_buf);
6361 static int nand_attach(struct nand_chip *chip)
6363 if (chip->controller->ops && chip->controller->ops->attach_chip)
6364 return chip->controller->ops->attach_chip(chip);
6369 static void nand_detach(struct nand_chip *chip)
6371 if (chip->controller->ops && chip->controller->ops->detach_chip)
6372 chip->controller->ops->detach_chip(chip);
6376 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
6377 * @chip: NAND chip object
6378 * @maxchips: number of chips to scan for.
6379 * @ids: optional flash IDs table
6381 * This fills out all the uninitialized function pointers with the defaults.
6382 * The flash ID is read and the mtd/chip structures are filled with the
6383 * appropriate values.
6385 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6386 struct nand_flash_dev *ids)
6393 ret = nand_scan_ident(chip, maxchips, ids);
6397 ret = nand_attach(chip);
6401 ret = nand_scan_tail(chip);
6410 nand_scan_ident_cleanup(chip);
6414 EXPORT_SYMBOL(nand_scan_with_ids);
6417 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6418 * @chip: NAND chip object
6420 void nand_cleanup(struct nand_chip *chip)
6422 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
6423 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
6424 rawnand_sw_hamming_cleanup(chip);
6425 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
6426 rawnand_sw_bch_cleanup(chip);
6429 nanddev_cleanup(&chip->base);
6431 /* Free secure regions data */
6432 kfree(chip->secure_regions);
6434 /* Free bad block table memory */
6436 kfree(chip->data_buf);
6437 kfree(chip->ecc.code_buf);
6438 kfree(chip->ecc.calc_buf);
6440 /* Free bad block descriptor memory */
6441 if (chip->badblock_pattern && chip->badblock_pattern->options
6442 & NAND_BBT_DYNAMICSTRUCT)
6443 kfree(chip->badblock_pattern);
6445 /* Free the data interface */
6446 kfree(chip->best_interface_config);
6448 /* Free manufacturer priv data. */
6449 nand_manufacturer_cleanup(chip);
6451 /* Free controller specific allocations after chip identification */
6454 /* Free identification phase allocations */
6455 nand_scan_ident_cleanup(chip);
6458 EXPORT_SYMBOL_GPL(nand_cleanup);
6460 MODULE_LICENSE("GPL");
6461 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6462 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6463 MODULE_DESCRIPTION("Generic NAND flash driver code");