1 // SPDX-License-Identifier: GPL-2.0-only
4 * This is the generic MTD driver for NAND flash devices. It should be
5 * capable of working with almost all NAND chips currently available.
7 * Additional technical information is available on
8 * http://www.linux-mtd.infradead.org/doc/nand.html
10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
14 * David Woodhouse for adding multichip support
16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17 * rework for 2K page size chips
20 * Enable cached programming for 2k page size chips
21 * Check, if mtd->ecctype should be set to MTD_ECC_HW
22 * if we have HW ECC support.
23 * BBT table is not serialized, has to be fixed
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/nand-ecc-sw-hamming.h>
39 #include <linux/mtd/nand-ecc-sw-bch.h>
40 #include <linux/interrupt.h>
41 #include <linux/bitops.h>
43 #include <linux/mtd/partitions.h>
45 #include <linux/of_gpio.h>
46 #include <linux/gpio/consumer.h>
48 #include "internals.h"
50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
51 struct mtd_pairing_info *info)
53 int lastpage = (mtd->erasesize / mtd->writesize) - 1;
59 if (!page || (page & 1)) {
61 info->pair = (page + 1) / 2;
64 info->pair = (page + 1 - dist) / 2;
70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
71 const struct mtd_pairing_info *info)
73 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
74 int page = info->pair * 2;
77 if (!info->group && !info->pair)
80 if (info->pair == lastpair && info->group)
88 if (page >= mtd->erasesize / mtd->writesize)
94 const struct mtd_pairing_scheme dist3_pairing_scheme = {
96 .get_info = nand_pairing_dist3_get_info,
97 .get_wunit = nand_pairing_dist3_get_wunit,
100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
104 /* Start address must align on block boundary */
105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
106 pr_debug("%s: unaligned address\n", __func__);
110 /* Length must align on block boundary */
111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
112 pr_debug("%s: length not block aligned\n", __func__);
120 * nand_extract_bits - Copy unaligned bits from one buffer to another one
121 * @dst: destination buffer
122 * @dst_off: bit offset at which the writing starts
123 * @src: source buffer
124 * @src_off: bit offset at which the reading starts
125 * @nbits: number of bits to copy from @src to @dst
127 * Copy bits from one memory region to another (overlap authorized).
129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
130 unsigned int src_off, unsigned int nbits)
140 n = min3(8 - dst_off, 8 - src_off, nbits);
142 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
144 *dst |= tmp << dst_off;
161 EXPORT_SYMBOL_GPL(nand_extract_bits);
164 * nand_select_target() - Select a NAND target (A.K.A. die)
165 * @chip: NAND chip object
166 * @cs: the CS line to select. Note that this CS id is always from the chip
167 * PoV, not the controller one
169 * Select a NAND target so that further operations executed on @chip go to the
170 * selected NAND target.
172 void nand_select_target(struct nand_chip *chip, unsigned int cs)
175 * cs should always lie between 0 and nanddev_ntargets(), when that's
176 * not the case it's a bug and the caller should be fixed.
178 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
183 if (chip->legacy.select_chip)
184 chip->legacy.select_chip(chip, cs);
186 EXPORT_SYMBOL_GPL(nand_select_target);
189 * nand_deselect_target() - Deselect the currently selected target
190 * @chip: NAND chip object
192 * Deselect the currently selected NAND target. The result of operations
193 * executed on @chip after the target has been deselected is undefined.
195 void nand_deselect_target(struct nand_chip *chip)
197 if (chip->legacy.select_chip)
198 chip->legacy.select_chip(chip, -1);
202 EXPORT_SYMBOL_GPL(nand_deselect_target);
205 * nand_release_device - [GENERIC] release chip
206 * @chip: NAND chip object
208 * Release chip lock and wake up anyone waiting on the device.
210 static void nand_release_device(struct nand_chip *chip)
212 /* Release the controller and the chip */
213 mutex_unlock(&chip->controller->lock);
214 mutex_unlock(&chip->lock);
218 * nand_bbm_get_next_page - Get the next page for bad block markers
219 * @chip: NAND chip object
220 * @page: First page to start checking for bad block marker usage
222 * Returns an integer that corresponds to the page offset within a block, for
223 * a page that is used to store bad block markers. If no more pages are
224 * available, -EINVAL is returned.
226 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
228 struct mtd_info *mtd = nand_to_mtd(chip);
229 int last_page = ((mtd->erasesize - mtd->writesize) >>
230 chip->page_shift) & chip->pagemask;
231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
234 if (page == 0 && !(chip->options & bbm_flags))
236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
247 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
248 * @chip: NAND chip object
249 * @ofs: offset from device start
251 * Check, if the block is bad.
253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
255 int first_page, page_offset;
259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
260 page_offset = nand_bbm_get_next_page(chip, 0);
262 while (page_offset >= 0) {
263 res = chip->ecc.read_oob(chip, first_page + page_offset);
267 bad = chip->oob_poi[chip->badblockpos];
269 if (likely(chip->badblockbits == 8))
272 res = hweight8(bad) < chip->badblockbits;
276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
283 * nand_region_is_secured() - Check if the region is secured
284 * @chip: NAND chip object
285 * @offset: Offset of the region to check
286 * @size: Size of the region to check
288 * Checks if the region is secured by comparing the offset and size with the
289 * list of secure regions obtained from DT. Returns true if the region is
290 * secured else false.
292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
296 /* Skip touching the secure regions if present */
297 for (i = 0; i < chip->nr_secure_regions; i++) {
298 const struct nand_secure_region *region = &chip->secure_regions[i];
300 if (offset + size <= region->offset ||
301 offset >= region->offset + region->size)
304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
305 __func__, offset, offset + size);
313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
315 struct mtd_info *mtd = nand_to_mtd(chip);
317 if (chip->options & NAND_NO_BBM_QUIRK)
320 /* Check if the region is secured */
321 if (nand_region_is_secured(chip, ofs, mtd->erasesize))
324 if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
327 if (chip->legacy.block_bad)
328 return chip->legacy.block_bad(chip, ofs);
330 return nand_block_bad(chip, ofs);
334 * nand_get_device - [GENERIC] Get chip for selected access
335 * @chip: NAND chip structure
337 * Lock the device and its controller for exclusive access
339 * Return: -EBUSY if the chip has been suspended, 0 otherwise
341 static int nand_get_device(struct nand_chip *chip)
343 mutex_lock(&chip->lock);
344 if (chip->suspended) {
345 mutex_unlock(&chip->lock);
348 mutex_lock(&chip->controller->lock);
354 * nand_check_wp - [GENERIC] check if the chip is write protected
355 * @chip: NAND chip object
357 * Check, if the device is write protected. The function expects, that the
358 * device is already selected.
360 static int nand_check_wp(struct nand_chip *chip)
365 /* Broken xD cards report WP despite being writable */
366 if (chip->options & NAND_BROKEN_XD)
369 /* Check the WP bit */
370 ret = nand_status_op(chip, &status);
374 return status & NAND_STATUS_WP ? 0 : 1;
378 * nand_fill_oob - [INTERN] Transfer client buffer to oob
379 * @chip: NAND chip object
380 * @oob: oob data buffer
381 * @len: oob data write length
382 * @ops: oob ops structure
384 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
385 struct mtd_oob_ops *ops)
387 struct mtd_info *mtd = nand_to_mtd(chip);
391 * Initialise to all 0xFF, to avoid the possibility of left over OOB
392 * data from a previous OOB read.
394 memset(chip->oob_poi, 0xff, mtd->oobsize);
398 case MTD_OPS_PLACE_OOB:
400 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
403 case MTD_OPS_AUTO_OOB:
404 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
416 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
417 * @chip: NAND chip object
418 * @to: offset to write to
419 * @ops: oob operation description structure
421 * NAND write out-of-band.
423 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
424 struct mtd_oob_ops *ops)
426 struct mtd_info *mtd = nand_to_mtd(chip);
427 int chipnr, page, status, len, ret;
429 pr_debug("%s: to = 0x%08x, len = %i\n",
430 __func__, (unsigned int)to, (int)ops->ooblen);
432 len = mtd_oobavail(mtd, ops);
434 /* Do not allow write past end of page */
435 if ((ops->ooboffs + ops->ooblen) > len) {
436 pr_debug("%s: attempt to write past end of page\n",
441 /* Check if the region is secured */
442 if (nand_region_is_secured(chip, to, ops->ooblen))
445 chipnr = (int)(to >> chip->chip_shift);
448 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
449 * of my DiskOnChip 2000 test units) will clear the whole data page too
450 * if we don't do this. I have no clue why, but I seem to have 'fixed'
451 * it in the doc2000 driver in August 1999. dwmw2.
453 ret = nand_reset(chip, chipnr);
457 nand_select_target(chip, chipnr);
459 /* Shift to get page */
460 page = (int)(to >> chip->page_shift);
462 /* Check, if it is write protected */
463 if (nand_check_wp(chip)) {
464 nand_deselect_target(chip);
468 /* Invalidate the page cache, if we write to the cached page */
469 if (page == chip->pagecache.page)
470 chip->pagecache.page = -1;
472 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
474 if (ops->mode == MTD_OPS_RAW)
475 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
477 status = chip->ecc.write_oob(chip, page & chip->pagemask);
479 nand_deselect_target(chip);
484 ops->oobretlen = ops->ooblen;
490 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
491 * @chip: NAND chip object
492 * @ofs: offset from device start
494 * This is the default implementation, which can be overridden by a hardware
495 * specific driver. It provides the details for writing a bad block marker to a
498 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
500 struct mtd_info *mtd = nand_to_mtd(chip);
501 struct mtd_oob_ops ops;
502 uint8_t buf[2] = { 0, 0 };
503 int ret = 0, res, page_offset;
505 memset(&ops, 0, sizeof(ops));
507 ops.ooboffs = chip->badblockpos;
508 if (chip->options & NAND_BUSWIDTH_16) {
509 ops.ooboffs &= ~0x01;
510 ops.len = ops.ooblen = 2;
512 ops.len = ops.ooblen = 1;
514 ops.mode = MTD_OPS_PLACE_OOB;
516 page_offset = nand_bbm_get_next_page(chip, 0);
518 while (page_offset >= 0) {
519 res = nand_do_write_oob(chip,
520 ofs + (page_offset * mtd->writesize),
526 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
533 * nand_markbad_bbm - mark a block by updating the BBM
534 * @chip: NAND chip object
535 * @ofs: offset of the block to mark bad
537 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
539 if (chip->legacy.block_markbad)
540 return chip->legacy.block_markbad(chip, ofs);
542 return nand_default_block_markbad(chip, ofs);
546 * nand_block_markbad_lowlevel - mark a block bad
547 * @chip: NAND chip object
548 * @ofs: offset from device start
550 * This function performs the generic NAND bad block marking steps (i.e., bad
551 * block table(s) and/or marker(s)). We only allow the hardware driver to
552 * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
554 * We try operations in the following order:
556 * (1) erase the affected block, to allow OOB marker to be written cleanly
557 * (2) write bad block marker to OOB area of affected block (unless flag
558 * NAND_BBT_NO_OOB_BBM is present)
561 * Note that we retain the first error encountered in (2) or (3), finish the
562 * procedures, and dump the error in the end.
564 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
566 struct mtd_info *mtd = nand_to_mtd(chip);
569 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
570 struct erase_info einfo;
572 /* Attempt erase before marking OOB */
573 memset(&einfo, 0, sizeof(einfo));
575 einfo.len = 1ULL << chip->phys_erase_shift;
576 nand_erase_nand(chip, &einfo, 0);
578 /* Write bad block marker to OOB */
579 ret = nand_get_device(chip);
583 ret = nand_markbad_bbm(chip, ofs);
584 nand_release_device(chip);
587 /* Mark block bad in BBT */
589 res = nand_markbad_bbt(chip, ofs);
595 mtd->ecc_stats.badblocks++;
601 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
602 * @mtd: MTD device structure
603 * @ofs: offset from device start
605 * Check if the block is marked as reserved.
607 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
609 struct nand_chip *chip = mtd_to_nand(mtd);
613 /* Return info from the table */
614 return nand_isreserved_bbt(chip, ofs);
618 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
619 * @chip: NAND chip object
620 * @ofs: offset from device start
621 * @allowbbt: 1, if its allowed to access the bbt area
623 * Check, if the block is bad. Either by reading the bad block table or
624 * calling of the scan function.
626 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
628 /* Return info from the table */
630 return nand_isbad_bbt(chip, ofs, allowbbt);
632 return nand_isbad_bbm(chip, ofs);
636 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
637 * @chip: NAND chip structure
638 * @timeout_ms: Timeout in ms
640 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
641 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
644 * This helper is intended to be used when the controller does not have access
645 * to the NAND R/B pin.
647 * Be aware that calling this helper from an ->exec_op() implementation means
648 * ->exec_op() must be re-entrant.
650 * Return 0 if the NAND chip is ready, a negative error otherwise.
652 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
654 const struct nand_interface_config *conf;
658 if (!nand_has_exec_op(chip))
661 /* Wait tWB before polling the STATUS reg. */
662 conf = nand_get_interface_config(chip);
663 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
665 ret = nand_status_op(chip, NULL);
670 * +1 below is necessary because if we are now in the last fraction
671 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
672 * small jiffy fraction - possibly leading to false timeout
674 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
676 ret = nand_read_data_op(chip, &status, sizeof(status), true,
681 if (status & NAND_STATUS_READY)
685 * Typical lowest execution time for a tR on most NANDs is 10us,
686 * use this as polling delay before doing something smarter (ie.
687 * deriving a delay from the timeout value, timeout_ms/ratio).
690 } while (time_before(jiffies, timeout_ms));
693 * We have to exit READ_STATUS mode in order to read real data on the
694 * bus in case the WAITRDY instruction is preceding a DATA_IN
697 nand_exit_status_op(chip);
702 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
704 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
707 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
708 * @chip: NAND chip structure
709 * @gpiod: GPIO descriptor of R/B pin
710 * @timeout_ms: Timeout in ms
712 * Poll the R/B GPIO pin until it becomes ready. If that does not happen
713 * whitin the specified timeout, -ETIMEDOUT is returned.
715 * This helper is intended to be used when the controller has access to the
716 * NAND R/B pin over GPIO.
718 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
720 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
721 unsigned long timeout_ms)
725 * Wait until R/B pin indicates chip is ready or timeout occurs.
726 * +1 below is necessary because if we are now in the last fraction
727 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
728 * small jiffy fraction - possibly leading to false timeout.
730 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
732 if (gpiod_get_value_cansleep(gpiod))
736 } while (time_before(jiffies, timeout_ms));
738 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
740 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
743 * panic_nand_wait - [GENERIC] wait until the command is done
744 * @chip: NAND chip structure
747 * Wait for command done. This is a helper function for nand_wait used when
748 * we are in interrupt context. May happen when in panic and trying to write
749 * an oops through mtdoops.
751 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
754 for (i = 0; i < timeo; i++) {
755 if (chip->legacy.dev_ready) {
756 if (chip->legacy.dev_ready(chip))
762 ret = nand_read_data_op(chip, &status, sizeof(status),
767 if (status & NAND_STATUS_READY)
774 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
776 return (chip->parameters.supports_set_get_features &&
777 test_bit(addr, chip->parameters.get_feature_list));
780 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
782 return (chip->parameters.supports_set_get_features &&
783 test_bit(addr, chip->parameters.set_feature_list));
787 * nand_reset_interface - Reset data interface and timings
788 * @chip: The NAND chip
789 * @chipnr: Internal die id
791 * Reset the Data interface and timings to ONFI mode 0.
793 * Returns 0 for success or negative error code otherwise.
795 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
797 const struct nand_controller_ops *ops = chip->controller->ops;
800 if (!nand_controller_can_setup_interface(chip))
804 * The ONFI specification says:
806 * To transition from NV-DDR or NV-DDR2 to the SDR data
807 * interface, the host shall use the Reset (FFh) command
808 * using SDR timing mode 0. A device in any timing mode is
809 * required to recognize Reset (FFh) command issued in SDR
813 * Configure the data interface in SDR mode and set the
814 * timings to timing mode 0.
817 chip->current_interface_config = nand_get_reset_interface_config();
818 ret = ops->setup_interface(chip, chipnr,
819 chip->current_interface_config);
821 pr_err("Failed to configure data interface to SDR timing mode 0\n");
827 * nand_setup_interface - Setup the best data interface and timings
828 * @chip: The NAND chip
829 * @chipnr: Internal die id
831 * Configure what has been reported to be the best data interface and NAND
832 * timings supported by the chip and the driver.
834 * Returns 0 for success or negative error code otherwise.
836 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
838 const struct nand_controller_ops *ops = chip->controller->ops;
839 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
842 if (!nand_controller_can_setup_interface(chip))
846 * A nand_reset_interface() put both the NAND chip and the NAND
847 * controller in timings mode 0. If the default mode for this chip is
848 * also 0, no need to proceed to the change again. Plus, at probe time,
849 * nand_setup_interface() uses ->set/get_features() which would
850 * fail anyway as the parameter page is not available yet.
852 if (!chip->best_interface_config)
855 request = chip->best_interface_config->timings.mode;
856 if (nand_interface_is_sdr(chip->best_interface_config))
857 request |= ONFI_DATA_INTERFACE_SDR;
859 request |= ONFI_DATA_INTERFACE_NVDDR;
860 tmode_param[0] = request;
862 /* Change the mode on the chip side (if supported by the NAND chip) */
863 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
864 nand_select_target(chip, chipnr);
865 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
867 nand_deselect_target(chip);
872 /* Change the mode on the controller side */
873 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
877 /* Check the mode has been accepted by the chip, if supported */
878 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
879 goto update_interface_config;
881 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
882 nand_select_target(chip, chipnr);
883 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
885 nand_deselect_target(chip);
889 if (request != tmode_param[0]) {
890 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
891 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
892 chip->best_interface_config->timings.mode);
893 pr_debug("NAND chip would work in %s timing mode %d\n",
894 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
895 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
899 update_interface_config:
900 chip->current_interface_config = chip->best_interface_config;
906 * Fallback to mode 0 if the chip explicitly did not ack the chosen
909 nand_reset_interface(chip, chipnr);
910 nand_select_target(chip, chipnr);
912 nand_deselect_target(chip);
918 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
919 * NAND controller and the NAND chip support
920 * @chip: the NAND chip
921 * @iface: the interface configuration (can eventually be updated)
922 * @spec_timings: specific timings, when not fitting the ONFI specification
924 * If specific timings are provided, use them. Otherwise, retrieve supported
925 * timing modes from ONFI information.
927 int nand_choose_best_sdr_timings(struct nand_chip *chip,
928 struct nand_interface_config *iface,
929 struct nand_sdr_timings *spec_timings)
931 const struct nand_controller_ops *ops = chip->controller->ops;
932 int best_mode = 0, mode, ret;
934 iface->type = NAND_SDR_IFACE;
937 iface->timings.sdr = *spec_timings;
938 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
940 /* Verify the controller supports the requested interface */
941 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
944 chip->best_interface_config = iface;
948 /* Fallback to slower modes */
949 best_mode = iface->timings.mode;
950 } else if (chip->parameters.onfi) {
951 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
954 for (mode = best_mode; mode >= 0; mode--) {
955 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
957 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
960 chip->best_interface_config = iface;
969 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
970 * NAND controller and the NAND chip support
971 * @chip: the NAND chip
972 * @iface: the interface configuration (can eventually be updated)
973 * @spec_timings: specific timings, when not fitting the ONFI specification
975 * If specific timings are provided, use them. Otherwise, retrieve supported
976 * timing modes from ONFI information.
978 int nand_choose_best_nvddr_timings(struct nand_chip *chip,
979 struct nand_interface_config *iface,
980 struct nand_nvddr_timings *spec_timings)
982 const struct nand_controller_ops *ops = chip->controller->ops;
983 int best_mode = 0, mode, ret;
985 iface->type = NAND_NVDDR_IFACE;
988 iface->timings.nvddr = *spec_timings;
989 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
991 /* Verify the controller supports the requested interface */
992 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
995 chip->best_interface_config = iface;
999 /* Fallback to slower modes */
1000 best_mode = iface->timings.mode;
1001 } else if (chip->parameters.onfi) {
1002 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
1005 for (mode = best_mode; mode >= 0; mode--) {
1006 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
1008 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1011 chip->best_interface_config = iface;
1020 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
1021 * NAND controller and the NAND chip support
1022 * @chip: the NAND chip
1023 * @iface: the interface configuration (can eventually be updated)
1025 * If specific timings are provided, use them. Otherwise, retrieve supported
1026 * timing modes from ONFI information.
1028 static int nand_choose_best_timings(struct nand_chip *chip,
1029 struct nand_interface_config *iface)
1033 /* Try the fastest timings: NV-DDR */
1034 ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
1038 /* Fallback to SDR timings otherwise */
1039 return nand_choose_best_sdr_timings(chip, iface, NULL);
1043 * nand_choose_interface_config - find the best data interface and timings
1044 * @chip: The NAND chip
1046 * Find the best data interface and NAND timings supported by the chip
1047 * and the driver. Eventually let the NAND manufacturer driver propose his own
1050 * After this function nand_chip->interface_config is initialized with the best
1051 * timing mode available.
1053 * Returns 0 for success or negative error code otherwise.
1055 static int nand_choose_interface_config(struct nand_chip *chip)
1057 struct nand_interface_config *iface;
1060 if (!nand_controller_can_setup_interface(chip))
1063 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
1067 if (chip->ops.choose_interface_config)
1068 ret = chip->ops.choose_interface_config(chip, iface);
1070 ret = nand_choose_best_timings(chip, iface);
1079 * nand_fill_column_cycles - fill the column cycles of an address
1080 * @chip: The NAND chip
1081 * @addrs: Array of address cycles to fill
1082 * @offset_in_page: The offset in the page
1084 * Fills the first or the first two bytes of the @addrs field depending
1085 * on the NAND bus width and the page size.
1087 * Returns the number of cycles needed to encode the column, or a negative
1088 * error code in case one of the arguments is invalid.
1090 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1091 unsigned int offset_in_page)
1093 struct mtd_info *mtd = nand_to_mtd(chip);
1095 /* Make sure the offset is less than the actual page size. */
1096 if (offset_in_page > mtd->writesize + mtd->oobsize)
1100 * On small page NANDs, there's a dedicated command to access the OOB
1101 * area, and the column address is relative to the start of the OOB
1102 * area, not the start of the page. Asjust the address accordingly.
1104 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1105 offset_in_page -= mtd->writesize;
1108 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1109 * wide, then it must be divided by 2.
1111 if (chip->options & NAND_BUSWIDTH_16) {
1112 if (WARN_ON(offset_in_page % 2))
1115 offset_in_page /= 2;
1118 addrs[0] = offset_in_page;
1121 * Small page NANDs use 1 cycle for the columns, while large page NANDs
1124 if (mtd->writesize <= 512)
1127 addrs[1] = offset_in_page >> 8;
1132 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1133 unsigned int offset_in_page, void *buf,
1136 const struct nand_interface_config *conf =
1137 nand_get_interface_config(chip);
1138 struct mtd_info *mtd = nand_to_mtd(chip);
1140 struct nand_op_instr instrs[] = {
1141 NAND_OP_CMD(NAND_CMD_READ0, 0),
1142 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1143 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1144 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1145 NAND_OP_DATA_IN(len, buf, 0),
1147 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1150 /* Drop the DATA_IN instruction if len is set to 0. */
1154 if (offset_in_page >= mtd->writesize)
1155 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1156 else if (offset_in_page >= 256 &&
1157 !(chip->options & NAND_BUSWIDTH_16))
1158 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1160 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1165 addrs[2] = page >> 8;
1167 if (chip->options & NAND_ROW_ADDR_3) {
1168 addrs[3] = page >> 16;
1169 instrs[1].ctx.addr.naddrs++;
1172 return nand_exec_op(chip, &op);
1175 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1176 unsigned int offset_in_page, void *buf,
1179 const struct nand_interface_config *conf =
1180 nand_get_interface_config(chip);
1182 struct nand_op_instr instrs[] = {
1183 NAND_OP_CMD(NAND_CMD_READ0, 0),
1184 NAND_OP_ADDR(4, addrs, 0),
1185 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1186 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1187 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1188 NAND_OP_DATA_IN(len, buf, 0),
1190 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1193 /* Drop the DATA_IN instruction if len is set to 0. */
1197 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1202 addrs[3] = page >> 8;
1204 if (chip->options & NAND_ROW_ADDR_3) {
1205 addrs[4] = page >> 16;
1206 instrs[1].ctx.addr.naddrs++;
1209 return nand_exec_op(chip, &op);
1213 * nand_read_page_op - Do a READ PAGE operation
1214 * @chip: The NAND chip
1215 * @page: page to read
1216 * @offset_in_page: offset within the page
1217 * @buf: buffer used to store the data
1218 * @len: length of the buffer
1220 * This function issues a READ PAGE operation.
1221 * This function does not select/unselect the CS line.
1223 * Returns 0 on success, a negative error code otherwise.
1225 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1226 unsigned int offset_in_page, void *buf, unsigned int len)
1228 struct mtd_info *mtd = nand_to_mtd(chip);
1233 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1236 if (nand_has_exec_op(chip)) {
1237 if (mtd->writesize > 512)
1238 return nand_lp_exec_read_page_op(chip, page,
1239 offset_in_page, buf,
1242 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1246 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1248 chip->legacy.read_buf(chip, buf, len);
1252 EXPORT_SYMBOL_GPL(nand_read_page_op);
1255 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1256 * @chip: The NAND chip
1257 * @page: parameter page to read
1258 * @buf: buffer used to store the data
1259 * @len: length of the buffer
1261 * This function issues a READ PARAMETER PAGE operation.
1262 * This function does not select/unselect the CS line.
1264 * Returns 0 on success, a negative error code otherwise.
1266 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1275 if (nand_has_exec_op(chip)) {
1276 const struct nand_interface_config *conf =
1277 nand_get_interface_config(chip);
1278 struct nand_op_instr instrs[] = {
1279 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1280 NAND_OP_ADDR(1, &page,
1281 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1282 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1283 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1284 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1286 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1288 /* Drop the DATA_IN instruction if len is set to 0. */
1292 return nand_exec_op(chip, &op);
1295 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1296 for (i = 0; i < len; i++)
1297 p[i] = chip->legacy.read_byte(chip);
1303 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1304 * @chip: The NAND chip
1305 * @offset_in_page: offset within the page
1306 * @buf: buffer used to store the data
1307 * @len: length of the buffer
1308 * @force_8bit: force 8-bit bus access
1310 * This function issues a CHANGE READ COLUMN operation.
1311 * This function does not select/unselect the CS line.
1313 * Returns 0 on success, a negative error code otherwise.
1315 int nand_change_read_column_op(struct nand_chip *chip,
1316 unsigned int offset_in_page, void *buf,
1317 unsigned int len, bool force_8bit)
1319 struct mtd_info *mtd = nand_to_mtd(chip);
1324 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1327 /* Small page NANDs do not support column change. */
1328 if (mtd->writesize <= 512)
1331 if (nand_has_exec_op(chip)) {
1332 const struct nand_interface_config *conf =
1333 nand_get_interface_config(chip);
1335 struct nand_op_instr instrs[] = {
1336 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1337 NAND_OP_ADDR(2, addrs, 0),
1338 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1339 NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1340 NAND_OP_DATA_IN(len, buf, 0),
1342 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1345 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1349 /* Drop the DATA_IN instruction if len is set to 0. */
1353 instrs[3].ctx.data.force_8bit = force_8bit;
1355 return nand_exec_op(chip, &op);
1358 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1360 chip->legacy.read_buf(chip, buf, len);
1364 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1367 * nand_read_oob_op - Do a READ OOB operation
1368 * @chip: The NAND chip
1369 * @page: page to read
1370 * @offset_in_oob: offset within the OOB area
1371 * @buf: buffer used to store the data
1372 * @len: length of the buffer
1374 * This function issues a READ OOB operation.
1375 * This function does not select/unselect the CS line.
1377 * Returns 0 on success, a negative error code otherwise.
1379 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1380 unsigned int offset_in_oob, void *buf, unsigned int len)
1382 struct mtd_info *mtd = nand_to_mtd(chip);
1387 if (offset_in_oob + len > mtd->oobsize)
1390 if (nand_has_exec_op(chip))
1391 return nand_read_page_op(chip, page,
1392 mtd->writesize + offset_in_oob,
1395 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1397 chip->legacy.read_buf(chip, buf, len);
1401 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1403 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1404 unsigned int offset_in_page, const void *buf,
1405 unsigned int len, bool prog)
1407 const struct nand_interface_config *conf =
1408 nand_get_interface_config(chip);
1409 struct mtd_info *mtd = nand_to_mtd(chip);
1411 struct nand_op_instr instrs[] = {
1413 * The first instruction will be dropped if we're dealing
1414 * with a large page NAND and adjusted if we're dealing
1415 * with a small page NAND and the page offset is > 255.
1417 NAND_OP_CMD(NAND_CMD_READ0, 0),
1418 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1419 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
1420 NAND_OP_DATA_OUT(len, buf, 0),
1421 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1422 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1423 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
1425 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1426 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1431 addrs[naddrs++] = page;
1432 addrs[naddrs++] = page >> 8;
1433 if (chip->options & NAND_ROW_ADDR_3)
1434 addrs[naddrs++] = page >> 16;
1436 instrs[2].ctx.addr.naddrs = naddrs;
1438 /* Drop the last two instructions if we're not programming the page. */
1441 /* Also drop the DATA_OUT instruction if empty. */
1446 if (mtd->writesize <= 512) {
1448 * Small pages need some more tweaking: we have to adjust the
1449 * first instruction depending on the page offset we're trying
1452 if (offset_in_page >= mtd->writesize)
1453 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1454 else if (offset_in_page >= 256 &&
1455 !(chip->options & NAND_BUSWIDTH_16))
1456 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1459 * Drop the first command if we're dealing with a large page
1466 return nand_exec_op(chip, &op);
1470 * nand_prog_page_begin_op - starts a PROG PAGE operation
1471 * @chip: The NAND chip
1472 * @page: page to write
1473 * @offset_in_page: offset within the page
1474 * @buf: buffer containing the data to write to the page
1475 * @len: length of the buffer
1477 * This function issues the first half of a PROG PAGE operation.
1478 * This function does not select/unselect the CS line.
1480 * Returns 0 on success, a negative error code otherwise.
1482 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1483 unsigned int offset_in_page, const void *buf,
1486 struct mtd_info *mtd = nand_to_mtd(chip);
1491 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1494 if (nand_has_exec_op(chip))
1495 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1498 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1501 chip->legacy.write_buf(chip, buf, len);
1505 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1508 * nand_prog_page_end_op - ends a PROG PAGE operation
1509 * @chip: The NAND chip
1511 * This function issues the second half of a PROG PAGE operation.
1512 * This function does not select/unselect the CS line.
1514 * Returns 0 on success, a negative error code otherwise.
1516 int nand_prog_page_end_op(struct nand_chip *chip)
1521 if (nand_has_exec_op(chip)) {
1522 const struct nand_interface_config *conf =
1523 nand_get_interface_config(chip);
1524 struct nand_op_instr instrs[] = {
1525 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1526 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1527 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
1530 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1532 ret = nand_exec_op(chip, &op);
1536 ret = nand_status_op(chip, &status);
1540 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1541 ret = chip->legacy.waitfunc(chip);
1548 if (status & NAND_STATUS_FAIL)
1553 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1556 * nand_prog_page_op - Do a full PROG PAGE operation
1557 * @chip: The NAND chip
1558 * @page: page to write
1559 * @offset_in_page: offset within the page
1560 * @buf: buffer containing the data to write to the page
1561 * @len: length of the buffer
1563 * This function issues a full PROG PAGE operation.
1564 * This function does not select/unselect the CS line.
1566 * Returns 0 on success, a negative error code otherwise.
1568 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1569 unsigned int offset_in_page, const void *buf,
1572 struct mtd_info *mtd = nand_to_mtd(chip);
1579 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1582 if (nand_has_exec_op(chip)) {
1583 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1588 ret = nand_status_op(chip, &status);
1592 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1594 chip->legacy.write_buf(chip, buf, len);
1595 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1596 ret = chip->legacy.waitfunc(chip);
1603 if (status & NAND_STATUS_FAIL)
1608 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1611 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1612 * @chip: The NAND chip
1613 * @offset_in_page: offset within the page
1614 * @buf: buffer containing the data to send to the NAND
1615 * @len: length of the buffer
1616 * @force_8bit: force 8-bit bus access
1618 * This function issues a CHANGE WRITE COLUMN operation.
1619 * This function does not select/unselect the CS line.
1621 * Returns 0 on success, a negative error code otherwise.
1623 int nand_change_write_column_op(struct nand_chip *chip,
1624 unsigned int offset_in_page,
1625 const void *buf, unsigned int len,
1628 struct mtd_info *mtd = nand_to_mtd(chip);
1633 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1636 /* Small page NANDs do not support column change. */
1637 if (mtd->writesize <= 512)
1640 if (nand_has_exec_op(chip)) {
1641 const struct nand_interface_config *conf =
1642 nand_get_interface_config(chip);
1644 struct nand_op_instr instrs[] = {
1645 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1646 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1647 NAND_OP_DATA_OUT(len, buf, 0),
1649 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1652 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1656 instrs[2].ctx.data.force_8bit = force_8bit;
1658 /* Drop the DATA_OUT instruction if len is set to 0. */
1662 return nand_exec_op(chip, &op);
1665 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1667 chip->legacy.write_buf(chip, buf, len);
1671 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1674 * nand_readid_op - Do a READID operation
1675 * @chip: The NAND chip
1676 * @addr: address cycle to pass after the READID command
1677 * @buf: buffer used to store the ID
1678 * @len: length of the buffer
1680 * This function sends a READID command and reads back the ID returned by the
1682 * This function does not select/unselect the CS line.
1684 * Returns 0 on success, a negative error code otherwise.
1686 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1690 u8 *id = buf, *ddrbuf = NULL;
1695 if (nand_has_exec_op(chip)) {
1696 const struct nand_interface_config *conf =
1697 nand_get_interface_config(chip);
1698 struct nand_op_instr instrs[] = {
1699 NAND_OP_CMD(NAND_CMD_READID, 0),
1700 NAND_OP_ADDR(1, &addr,
1701 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1702 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1704 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1707 /* READ_ID data bytes are received twice in NV-DDR mode */
1708 if (len && nand_interface_is_nvddr(conf)) {
1709 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
1713 instrs[2].ctx.data.len *= 2;
1714 instrs[2].ctx.data.buf.in = ddrbuf;
1717 /* Drop the DATA_IN instruction if len is set to 0. */
1721 ret = nand_exec_op(chip, &op);
1722 if (!ret && len && nand_interface_is_nvddr(conf)) {
1723 for (i = 0; i < len; i++)
1724 id[i] = ddrbuf[i * 2];
1732 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1734 for (i = 0; i < len; i++)
1735 id[i] = chip->legacy.read_byte(chip);
1739 EXPORT_SYMBOL_GPL(nand_readid_op);
1742 * nand_status_op - Do a STATUS operation
1743 * @chip: The NAND chip
1744 * @status: out variable to store the NAND status
1746 * This function sends a STATUS command and reads back the status returned by
1748 * This function does not select/unselect the CS line.
1750 * Returns 0 on success, a negative error code otherwise.
1752 int nand_status_op(struct nand_chip *chip, u8 *status)
1754 if (nand_has_exec_op(chip)) {
1755 const struct nand_interface_config *conf =
1756 nand_get_interface_config(chip);
1758 struct nand_op_instr instrs[] = {
1759 NAND_OP_CMD(NAND_CMD_STATUS,
1760 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1761 NAND_OP_8BIT_DATA_IN(1, status, 0),
1763 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1766 /* The status data byte will be received twice in NV-DDR mode */
1767 if (status && nand_interface_is_nvddr(conf)) {
1768 instrs[1].ctx.data.len *= 2;
1769 instrs[1].ctx.data.buf.in = ddrstatus;
1775 ret = nand_exec_op(chip, &op);
1776 if (!ret && status && nand_interface_is_nvddr(conf))
1777 *status = ddrstatus[0];
1782 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1784 *status = chip->legacy.read_byte(chip);
1788 EXPORT_SYMBOL_GPL(nand_status_op);
1791 * nand_exit_status_op - Exit a STATUS operation
1792 * @chip: The NAND chip
1794 * This function sends a READ0 command to cancel the effect of the STATUS
1795 * command to avoid reading only the status until a new read command is sent.
1797 * This function does not select/unselect the CS line.
1799 * Returns 0 on success, a negative error code otherwise.
1801 int nand_exit_status_op(struct nand_chip *chip)
1803 if (nand_has_exec_op(chip)) {
1804 struct nand_op_instr instrs[] = {
1805 NAND_OP_CMD(NAND_CMD_READ0, 0),
1807 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1809 return nand_exec_op(chip, &op);
1812 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1818 * nand_erase_op - Do an erase operation
1819 * @chip: The NAND chip
1820 * @eraseblock: block to erase
1822 * This function sends an ERASE command and waits for the NAND to be ready
1824 * This function does not select/unselect the CS line.
1826 * Returns 0 on success, a negative error code otherwise.
1828 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1830 unsigned int page = eraseblock <<
1831 (chip->phys_erase_shift - chip->page_shift);
1835 if (nand_has_exec_op(chip)) {
1836 const struct nand_interface_config *conf =
1837 nand_get_interface_config(chip);
1838 u8 addrs[3] = { page, page >> 8, page >> 16 };
1839 struct nand_op_instr instrs[] = {
1840 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1841 NAND_OP_ADDR(2, addrs, 0),
1842 NAND_OP_CMD(NAND_CMD_ERASE2,
1843 NAND_COMMON_TIMING_MS(conf, tWB_max)),
1844 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
1847 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1849 if (chip->options & NAND_ROW_ADDR_3)
1850 instrs[1].ctx.addr.naddrs++;
1852 ret = nand_exec_op(chip, &op);
1856 ret = nand_status_op(chip, &status);
1860 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1861 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1863 ret = chip->legacy.waitfunc(chip);
1870 if (status & NAND_STATUS_FAIL)
1875 EXPORT_SYMBOL_GPL(nand_erase_op);
1878 * nand_set_features_op - Do a SET FEATURES operation
1879 * @chip: The NAND chip
1880 * @feature: feature id
1881 * @data: 4 bytes of data
1883 * This function sends a SET FEATURES command and waits for the NAND to be
1884 * ready before returning.
1885 * This function does not select/unselect the CS line.
1887 * Returns 0 on success, a negative error code otherwise.
1889 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1892 const u8 *params = data;
1895 if (nand_has_exec_op(chip)) {
1896 const struct nand_interface_config *conf =
1897 nand_get_interface_config(chip);
1898 struct nand_op_instr instrs[] = {
1899 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1900 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
1902 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1903 NAND_COMMON_TIMING_NS(conf,
1905 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1908 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1910 return nand_exec_op(chip, &op);
1913 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1914 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1915 chip->legacy.write_byte(chip, params[i]);
1917 ret = chip->legacy.waitfunc(chip);
1921 if (ret & NAND_STATUS_FAIL)
1928 * nand_get_features_op - Do a GET FEATURES operation
1929 * @chip: The NAND chip
1930 * @feature: feature id
1931 * @data: 4 bytes of data
1933 * This function sends a GET FEATURES command and waits for the NAND to be
1934 * ready before returning.
1935 * This function does not select/unselect the CS line.
1937 * Returns 0 on success, a negative error code otherwise.
1939 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1942 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
1945 if (nand_has_exec_op(chip)) {
1946 const struct nand_interface_config *conf =
1947 nand_get_interface_config(chip);
1948 struct nand_op_instr instrs[] = {
1949 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1950 NAND_OP_ADDR(1, &feature,
1951 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1952 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1953 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1954 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1957 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1960 /* GET_FEATURE data bytes are received twice in NV-DDR mode */
1961 if (nand_interface_is_nvddr(conf)) {
1962 instrs[3].ctx.data.len *= 2;
1963 instrs[3].ctx.data.buf.in = ddrbuf;
1966 ret = nand_exec_op(chip, &op);
1967 if (nand_interface_is_nvddr(conf)) {
1968 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
1969 params[i] = ddrbuf[i * 2];
1975 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1976 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1977 params[i] = chip->legacy.read_byte(chip);
1982 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1983 unsigned int delay_ns)
1985 if (nand_has_exec_op(chip)) {
1986 struct nand_op_instr instrs[] = {
1987 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1988 PSEC_TO_NSEC(delay_ns)),
1990 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1992 return nand_exec_op(chip, &op);
1995 /* Apply delay or wait for ready/busy pin */
1996 if (!chip->legacy.dev_ready)
1997 udelay(chip->legacy.chip_delay);
1999 nand_wait_ready(chip);
2005 * nand_reset_op - Do a reset operation
2006 * @chip: The NAND chip
2008 * This function sends a RESET command and waits for the NAND to be ready
2010 * This function does not select/unselect the CS line.
2012 * Returns 0 on success, a negative error code otherwise.
2014 int nand_reset_op(struct nand_chip *chip)
2016 if (nand_has_exec_op(chip)) {
2017 const struct nand_interface_config *conf =
2018 nand_get_interface_config(chip);
2019 struct nand_op_instr instrs[] = {
2020 NAND_OP_CMD(NAND_CMD_RESET,
2021 NAND_COMMON_TIMING_NS(conf, tWB_max)),
2022 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
2025 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2027 return nand_exec_op(chip, &op);
2030 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
2034 EXPORT_SYMBOL_GPL(nand_reset_op);
2037 * nand_read_data_op - Read data from the NAND
2038 * @chip: The NAND chip
2039 * @buf: buffer used to store the data
2040 * @len: length of the buffer
2041 * @force_8bit: force 8-bit bus access
2042 * @check_only: do not actually run the command, only checks if the
2043 * controller driver supports it
2045 * This function does a raw data read on the bus. Usually used after launching
2046 * another NAND operation like nand_read_page_op().
2047 * This function does not select/unselect the CS line.
2049 * Returns 0 on success, a negative error code otherwise.
2051 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2052 bool force_8bit, bool check_only)
2057 if (nand_has_exec_op(chip)) {
2058 const struct nand_interface_config *conf =
2059 nand_get_interface_config(chip);
2060 struct nand_op_instr instrs[] = {
2061 NAND_OP_DATA_IN(len, buf, 0),
2063 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2067 instrs[0].ctx.data.force_8bit = force_8bit;
2070 * Parameter payloads (ID, status, features, etc) do not go
2071 * through the same pipeline as regular data, hence the
2072 * force_8bit flag must be set and this also indicates that in
2073 * case NV-DDR timings are being used the data will be received
2076 if (force_8bit && nand_interface_is_nvddr(conf)) {
2077 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
2081 instrs[0].ctx.data.len *= 2;
2082 instrs[0].ctx.data.buf.in = ddrbuf;
2086 ret = nand_check_op(chip, &op);
2091 ret = nand_exec_op(chip, &op);
2092 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
2095 for (i = 0; i < len; i++)
2096 dst[i] = ddrbuf[i * 2];
2111 for (i = 0; i < len; i++)
2112 p[i] = chip->legacy.read_byte(chip);
2114 chip->legacy.read_buf(chip, buf, len);
2119 EXPORT_SYMBOL_GPL(nand_read_data_op);
2122 * nand_write_data_op - Write data from the NAND
2123 * @chip: The NAND chip
2124 * @buf: buffer containing the data to send on the bus
2125 * @len: length of the buffer
2126 * @force_8bit: force 8-bit bus access
2128 * This function does a raw data write on the bus. Usually used after launching
2129 * another NAND operation like nand_write_page_begin_op().
2130 * This function does not select/unselect the CS line.
2132 * Returns 0 on success, a negative error code otherwise.
2134 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2135 unsigned int len, bool force_8bit)
2140 if (nand_has_exec_op(chip)) {
2141 struct nand_op_instr instrs[] = {
2142 NAND_OP_DATA_OUT(len, buf, 0),
2144 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2146 instrs[0].ctx.data.force_8bit = force_8bit;
2148 return nand_exec_op(chip, &op);
2155 for (i = 0; i < len; i++)
2156 chip->legacy.write_byte(chip, p[i]);
2158 chip->legacy.write_buf(chip, buf, len);
2163 EXPORT_SYMBOL_GPL(nand_write_data_op);
2166 * struct nand_op_parser_ctx - Context used by the parser
2167 * @instrs: array of all the instructions that must be addressed
2168 * @ninstrs: length of the @instrs array
2169 * @subop: Sub-operation to be passed to the NAND controller
2171 * This structure is used by the core to split NAND operations into
2172 * sub-operations that can be handled by the NAND controller.
2174 struct nand_op_parser_ctx {
2175 const struct nand_op_instr *instrs;
2176 unsigned int ninstrs;
2177 struct nand_subop subop;
2181 * nand_op_parser_must_split_instr - Checks if an instruction must be split
2182 * @pat: the parser pattern element that matches @instr
2183 * @instr: pointer to the instruction to check
2184 * @start_offset: this is an in/out parameter. If @instr has already been
2185 * split, then @start_offset is the offset from which to start
2186 * (either an address cycle or an offset in the data buffer).
2187 * Conversely, if the function returns true (ie. instr must be
2188 * split), this parameter is updated to point to the first
2189 * data/address cycle that has not been taken care of.
2191 * Some NAND controllers are limited and cannot send X address cycles with a
2192 * unique operation, or cannot read/write more than Y bytes at the same time.
2193 * In this case, split the instruction that does not fit in a single
2194 * controller-operation into two or more chunks.
2196 * Returns true if the instruction must be split, false otherwise.
2197 * The @start_offset parameter is also updated to the offset at which the next
2198 * bundle of instruction must start (if an address or a data instruction).
2201 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2202 const struct nand_op_instr *instr,
2203 unsigned int *start_offset)
2205 switch (pat->type) {
2206 case NAND_OP_ADDR_INSTR:
2207 if (!pat->ctx.addr.maxcycles)
2210 if (instr->ctx.addr.naddrs - *start_offset >
2211 pat->ctx.addr.maxcycles) {
2212 *start_offset += pat->ctx.addr.maxcycles;
2217 case NAND_OP_DATA_IN_INSTR:
2218 case NAND_OP_DATA_OUT_INSTR:
2219 if (!pat->ctx.data.maxlen)
2222 if (instr->ctx.data.len - *start_offset >
2223 pat->ctx.data.maxlen) {
2224 *start_offset += pat->ctx.data.maxlen;
2237 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2238 * remaining in the parser context
2239 * @pat: the pattern to test
2240 * @ctx: the parser context structure to match with the pattern @pat
2242 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2243 * Returns true if this is the case, false ortherwise. When true is returned,
2244 * @ctx->subop is updated with the set of instructions to be passed to the
2245 * controller driver.
2248 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2249 struct nand_op_parser_ctx *ctx)
2251 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2252 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2253 const struct nand_op_instr *instr = ctx->subop.instrs;
2254 unsigned int i, ninstrs;
2256 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2258 * The pattern instruction does not match the operation
2259 * instruction. If the instruction is marked optional in the
2260 * pattern definition, we skip the pattern element and continue
2261 * to the next one. If the element is mandatory, there's no
2262 * match and we can return false directly.
2264 if (instr->type != pat->elems[i].type) {
2265 if (!pat->elems[i].optional)
2272 * Now check the pattern element constraints. If the pattern is
2273 * not able to handle the whole instruction in a single step,
2274 * we have to split it.
2275 * The last_instr_end_off value comes back updated to point to
2276 * the position where we have to split the instruction (the
2277 * start of the next subop chunk).
2279 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2292 * This can happen if all instructions of a pattern are optional.
2293 * Still, if there's not at least one instruction handled by this
2294 * pattern, this is not a match, and we should try the next one (if
2301 * We had a match on the pattern head, but the pattern may be longer
2302 * than the instructions we're asked to execute. We need to make sure
2303 * there's no mandatory elements in the pattern tail.
2305 for (; i < pat->nelems; i++) {
2306 if (!pat->elems[i].optional)
2311 * We have a match: update the subop structure accordingly and return
2314 ctx->subop.ninstrs = ninstrs;
2315 ctx->subop.last_instr_end_off = instr_offset;
2320 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2321 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2323 const struct nand_op_instr *instr;
2327 pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2329 for (i = 0; i < ctx->ninstrs; i++) {
2330 instr = &ctx->instrs[i];
2332 if (instr == &ctx->subop.instrs[0])
2335 nand_op_trace(prefix, instr);
2337 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2342 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2348 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2349 const struct nand_op_parser_ctx *b)
2351 if (a->subop.ninstrs < b->subop.ninstrs)
2353 else if (a->subop.ninstrs > b->subop.ninstrs)
2356 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2358 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2365 * nand_op_parser_exec_op - exec_op parser
2366 * @chip: the NAND chip
2367 * @parser: patterns description provided by the controller driver
2368 * @op: the NAND operation to address
2369 * @check_only: when true, the function only checks if @op can be handled but
2370 * does not execute the operation
2372 * Helper function designed to ease integration of NAND controller drivers that
2373 * only support a limited set of instruction sequences. The supported sequences
2374 * are described in @parser, and the framework takes care of splitting @op into
2375 * multiple sub-operations (if required) and pass them back to the ->exec()
2376 * callback of the matching pattern if @check_only is set to false.
2378 * NAND controller drivers should call this function from their own ->exec_op()
2381 * Returns 0 on success, a negative error code otherwise. A failure can be
2382 * caused by an unsupported operation (none of the supported patterns is able
2383 * to handle the requested operation), or an error returned by one of the
2384 * matching pattern->exec() hook.
2386 int nand_op_parser_exec_op(struct nand_chip *chip,
2387 const struct nand_op_parser *parser,
2388 const struct nand_operation *op, bool check_only)
2390 struct nand_op_parser_ctx ctx = {
2392 .subop.instrs = op->instrs,
2393 .instrs = op->instrs,
2394 .ninstrs = op->ninstrs,
2398 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2399 const struct nand_op_parser_pattern *pattern;
2400 struct nand_op_parser_ctx best_ctx;
2401 int ret, best_pattern = -1;
2403 for (i = 0; i < parser->npatterns; i++) {
2404 struct nand_op_parser_ctx test_ctx = ctx;
2406 pattern = &parser->patterns[i];
2407 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2410 if (best_pattern >= 0 &&
2411 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2415 best_ctx = test_ctx;
2418 if (best_pattern < 0) {
2419 pr_debug("->exec_op() parser: pattern not found!\n");
2424 nand_op_parser_trace(&ctx);
2427 pattern = &parser->patterns[best_pattern];
2428 ret = pattern->exec(chip, &ctx.subop);
2434 * Update the context structure by pointing to the start of the
2437 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2438 if (ctx.subop.last_instr_end_off)
2439 ctx.subop.instrs -= 1;
2441 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2446 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2448 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2450 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2451 instr->type == NAND_OP_DATA_OUT_INSTR);
2454 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2455 unsigned int instr_idx)
2457 return subop && instr_idx < subop->ninstrs;
2460 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2461 unsigned int instr_idx)
2466 return subop->first_instr_start_off;
2470 * nand_subop_get_addr_start_off - Get the start offset in an address array
2471 * @subop: The entire sub-operation
2472 * @instr_idx: Index of the instruction inside the sub-operation
2474 * During driver development, one could be tempted to directly use the
2475 * ->addr.addrs field of address instructions. This is wrong as address
2476 * instructions might be split.
2478 * Given an address instruction, returns the offset of the first cycle to issue.
2480 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2481 unsigned int instr_idx)
2483 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2484 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2487 return nand_subop_get_start_off(subop, instr_idx);
2489 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2492 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2493 * @subop: The entire sub-operation
2494 * @instr_idx: Index of the instruction inside the sub-operation
2496 * During driver development, one could be tempted to directly use the
2497 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2500 * Given an address instruction, returns the number of address cycle to issue.
2502 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2503 unsigned int instr_idx)
2505 int start_off, end_off;
2507 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2508 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2511 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2513 if (instr_idx == subop->ninstrs - 1 &&
2514 subop->last_instr_end_off)
2515 end_off = subop->last_instr_end_off;
2517 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2519 return end_off - start_off;
2521 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2524 * nand_subop_get_data_start_off - Get the start offset in a data array
2525 * @subop: The entire sub-operation
2526 * @instr_idx: Index of the instruction inside the sub-operation
2528 * During driver development, one could be tempted to directly use the
2529 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2530 * instructions might be split.
2532 * Given a data instruction, returns the offset to start from.
2534 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2535 unsigned int instr_idx)
2537 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2538 !nand_instr_is_data(&subop->instrs[instr_idx])))
2541 return nand_subop_get_start_off(subop, instr_idx);
2543 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2546 * nand_subop_get_data_len - Get the number of bytes to retrieve
2547 * @subop: The entire sub-operation
2548 * @instr_idx: Index of the instruction inside the sub-operation
2550 * During driver development, one could be tempted to directly use the
2551 * ->data->len field of a data instruction. This is wrong as data instructions
2554 * Returns the length of the chunk of data to send/receive.
2556 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2557 unsigned int instr_idx)
2559 int start_off = 0, end_off;
2561 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2562 !nand_instr_is_data(&subop->instrs[instr_idx])))
2565 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2567 if (instr_idx == subop->ninstrs - 1 &&
2568 subop->last_instr_end_off)
2569 end_off = subop->last_instr_end_off;
2571 end_off = subop->instrs[instr_idx].ctx.data.len;
2573 return end_off - start_off;
2575 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2578 * nand_reset - Reset and initialize a NAND device
2579 * @chip: The NAND chip
2580 * @chipnr: Internal die id
2582 * Save the timings data structure, then apply SDR timings mode 0 (see
2583 * nand_reset_interface for details), do the reset operation, and apply
2584 * back the previous timings.
2586 * Returns 0 on success, a negative error code otherwise.
2588 int nand_reset(struct nand_chip *chip, int chipnr)
2592 ret = nand_reset_interface(chip, chipnr);
2597 * The CS line has to be released before we can apply the new NAND
2598 * interface settings, hence this weird nand_select_target()
2599 * nand_deselect_target() dance.
2601 nand_select_target(chip, chipnr);
2602 ret = nand_reset_op(chip);
2603 nand_deselect_target(chip);
2607 ret = nand_setup_interface(chip, chipnr);
2613 EXPORT_SYMBOL_GPL(nand_reset);
2616 * nand_get_features - wrapper to perform a GET_FEATURE
2617 * @chip: NAND chip info structure
2618 * @addr: feature address
2619 * @subfeature_param: the subfeature parameters, a four bytes array
2621 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2622 * operation cannot be handled.
2624 int nand_get_features(struct nand_chip *chip, int addr,
2625 u8 *subfeature_param)
2627 if (!nand_supports_get_features(chip, addr))
2630 if (chip->legacy.get_features)
2631 return chip->legacy.get_features(chip, addr, subfeature_param);
2633 return nand_get_features_op(chip, addr, subfeature_param);
2637 * nand_set_features - wrapper to perform a SET_FEATURE
2638 * @chip: NAND chip info structure
2639 * @addr: feature address
2640 * @subfeature_param: the subfeature parameters, a four bytes array
2642 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2643 * operation cannot be handled.
2645 int nand_set_features(struct nand_chip *chip, int addr,
2646 u8 *subfeature_param)
2648 if (!nand_supports_set_features(chip, addr))
2651 if (chip->legacy.set_features)
2652 return chip->legacy.set_features(chip, addr, subfeature_param);
2654 return nand_set_features_op(chip, addr, subfeature_param);
2658 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2659 * @buf: buffer to test
2660 * @len: buffer length
2661 * @bitflips_threshold: maximum number of bitflips
2663 * Check if a buffer contains only 0xff, which means the underlying region
2664 * has been erased and is ready to be programmed.
2665 * The bitflips_threshold specify the maximum number of bitflips before
2666 * considering the region is not erased.
2667 * Note: The logic of this function has been extracted from the memweight
2668 * implementation, except that nand_check_erased_buf function exit before
2669 * testing the whole buffer if the number of bitflips exceed the
2670 * bitflips_threshold value.
2672 * Returns a positive number of bitflips less than or equal to
2673 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2676 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2678 const unsigned char *bitmap = buf;
2682 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2684 weight = hweight8(*bitmap);
2685 bitflips += BITS_PER_BYTE - weight;
2686 if (unlikely(bitflips > bitflips_threshold))
2690 for (; len >= sizeof(long);
2691 len -= sizeof(long), bitmap += sizeof(long)) {
2692 unsigned long d = *((unsigned long *)bitmap);
2695 weight = hweight_long(d);
2696 bitflips += BITS_PER_LONG - weight;
2697 if (unlikely(bitflips > bitflips_threshold))
2701 for (; len > 0; len--, bitmap++) {
2702 weight = hweight8(*bitmap);
2703 bitflips += BITS_PER_BYTE - weight;
2704 if (unlikely(bitflips > bitflips_threshold))
2712 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2714 * @data: data buffer to test
2715 * @datalen: data length
2717 * @ecclen: ECC length
2718 * @extraoob: extra OOB buffer
2719 * @extraooblen: extra OOB length
2720 * @bitflips_threshold: maximum number of bitflips
2722 * Check if a data buffer and its associated ECC and OOB data contains only
2723 * 0xff pattern, which means the underlying region has been erased and is
2724 * ready to be programmed.
2725 * The bitflips_threshold specify the maximum number of bitflips before
2726 * considering the region as not erased.
2729 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2730 * different from the NAND page size. When fixing bitflips, ECC engines will
2731 * report the number of errors per chunk, and the NAND core infrastructure
2732 * expect you to return the maximum number of bitflips for the whole page.
2733 * This is why you should always use this function on a single chunk and
2734 * not on the whole page. After checking each chunk you should update your
2735 * max_bitflips value accordingly.
2736 * 2/ When checking for bitflips in erased pages you should not only check
2737 * the payload data but also their associated ECC data, because a user might
2738 * have programmed almost all bits to 1 but a few. In this case, we
2739 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
2741 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2742 * data are protected by the ECC engine.
2743 * It could also be used if you support subpages and want to attach some
2744 * extra OOB data to an ECC chunk.
2746 * Returns a positive number of bitflips less than or equal to
2747 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2748 * threshold. In case of success, the passed buffers are filled with 0xff.
2750 int nand_check_erased_ecc_chunk(void *data, int datalen,
2751 void *ecc, int ecclen,
2752 void *extraoob, int extraooblen,
2753 int bitflips_threshold)
2755 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2757 data_bitflips = nand_check_erased_buf(data, datalen,
2758 bitflips_threshold);
2759 if (data_bitflips < 0)
2760 return data_bitflips;
2762 bitflips_threshold -= data_bitflips;
2764 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2765 if (ecc_bitflips < 0)
2766 return ecc_bitflips;
2768 bitflips_threshold -= ecc_bitflips;
2770 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2771 bitflips_threshold);
2772 if (extraoob_bitflips < 0)
2773 return extraoob_bitflips;
2776 memset(data, 0xff, datalen);
2779 memset(ecc, 0xff, ecclen);
2781 if (extraoob_bitflips)
2782 memset(extraoob, 0xff, extraooblen);
2784 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2786 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2789 * nand_read_page_raw_notsupp - dummy read raw page function
2790 * @chip: nand chip info structure
2791 * @buf: buffer to store read data
2792 * @oob_required: caller requires OOB data read to chip->oob_poi
2793 * @page: page number to read
2795 * Returns -ENOTSUPP unconditionally.
2797 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2798 int oob_required, int page)
2804 * nand_read_page_raw - [INTERN] read raw page data without ecc
2805 * @chip: nand chip info structure
2806 * @buf: buffer to store read data
2807 * @oob_required: caller requires OOB data read to chip->oob_poi
2808 * @page: page number to read
2810 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2812 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2815 struct mtd_info *mtd = nand_to_mtd(chip);
2818 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2823 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2831 EXPORT_SYMBOL(nand_read_page_raw);
2834 * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2835 * @chip: NAND chip info structure
2836 * @buf: buffer to store read data
2837 * @oob_required: caller requires OOB data read to chip->oob_poi
2838 * @page: page number to read
2840 * This is a raw page read, ie. without any error detection/correction.
2841 * Monolithic means we are requesting all the relevant data (main plus
2842 * eventually OOB) to be loaded in the NAND cache and sent over the
2843 * bus (from the NAND chip to the NAND controller) in a single
2844 * operation. This is an alternative to nand_read_page_raw(), which
2845 * first reads the main data, and if the OOB data is requested too,
2846 * then reads more data on the bus.
2848 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2849 int oob_required, int page)
2851 struct mtd_info *mtd = nand_to_mtd(chip);
2852 unsigned int size = mtd->writesize;
2857 size += mtd->oobsize;
2859 if (buf != chip->data_buf)
2860 read_buf = nand_get_data_buf(chip);
2863 ret = nand_read_page_op(chip, page, 0, read_buf, size);
2867 if (buf != chip->data_buf)
2868 memcpy(buf, read_buf, mtd->writesize);
2872 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2875 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2876 * @chip: nand chip info structure
2877 * @buf: buffer to store read data
2878 * @oob_required: caller requires OOB data read to chip->oob_poi
2879 * @page: page number to read
2881 * We need a special oob layout and handling even when OOB isn't used.
2883 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2884 int oob_required, int page)
2886 struct mtd_info *mtd = nand_to_mtd(chip);
2887 int eccsize = chip->ecc.size;
2888 int eccbytes = chip->ecc.bytes;
2889 uint8_t *oob = chip->oob_poi;
2890 int steps, size, ret;
2892 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2896 for (steps = chip->ecc.steps; steps > 0; steps--) {
2897 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2903 if (chip->ecc.prepad) {
2904 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2909 oob += chip->ecc.prepad;
2912 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2918 if (chip->ecc.postpad) {
2919 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2924 oob += chip->ecc.postpad;
2928 size = mtd->oobsize - (oob - chip->oob_poi);
2930 ret = nand_read_data_op(chip, oob, size, false, false);
2939 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2940 * @chip: nand chip info structure
2941 * @buf: buffer to store read data
2942 * @oob_required: caller requires OOB data read to chip->oob_poi
2943 * @page: page number to read
2945 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2946 int oob_required, int page)
2948 struct mtd_info *mtd = nand_to_mtd(chip);
2949 int i, eccsize = chip->ecc.size, ret;
2950 int eccbytes = chip->ecc.bytes;
2951 int eccsteps = chip->ecc.steps;
2953 uint8_t *ecc_calc = chip->ecc.calc_buf;
2954 uint8_t *ecc_code = chip->ecc.code_buf;
2955 unsigned int max_bitflips = 0;
2957 chip->ecc.read_page_raw(chip, buf, 1, page);
2959 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2960 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2962 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2967 eccsteps = chip->ecc.steps;
2970 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2973 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2975 mtd->ecc_stats.failed++;
2977 mtd->ecc_stats.corrected += stat;
2978 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2981 return max_bitflips;
2985 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2986 * @chip: nand chip info structure
2987 * @data_offs: offset of requested data within the page
2988 * @readlen: data length
2989 * @bufpoi: buffer to store read data
2990 * @page: page number to read
2992 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2993 uint32_t readlen, uint8_t *bufpoi, int page)
2995 struct mtd_info *mtd = nand_to_mtd(chip);
2996 int start_step, end_step, num_steps, ret;
2998 int data_col_addr, i, gaps = 0;
2999 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3000 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3001 int index, section = 0;
3002 unsigned int max_bitflips = 0;
3003 struct mtd_oob_region oobregion = { };
3005 /* Column address within the page aligned to ECC size (256bytes) */
3006 start_step = data_offs / chip->ecc.size;
3007 end_step = (data_offs + readlen - 1) / chip->ecc.size;
3008 num_steps = end_step - start_step + 1;
3009 index = start_step * chip->ecc.bytes;
3011 /* Data size aligned to ECC ecc.size */
3012 datafrag_len = num_steps * chip->ecc.size;
3013 eccfrag_len = num_steps * chip->ecc.bytes;
3015 data_col_addr = start_step * chip->ecc.size;
3016 /* If we read not a page aligned data */
3017 p = bufpoi + data_col_addr;
3018 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3023 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3024 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
3027 * The performance is faster if we position offsets according to
3028 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3030 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
3034 if (oobregion.length < eccfrag_len)
3038 ret = nand_change_read_column_op(chip, mtd->writesize,
3039 chip->oob_poi, mtd->oobsize,
3045 * Send the command to read the particular ECC bytes take care
3046 * about buswidth alignment in read_buf.
3048 aligned_pos = oobregion.offset & ~(busw - 1);
3049 aligned_len = eccfrag_len;
3050 if (oobregion.offset & (busw - 1))
3052 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3056 ret = nand_change_read_column_op(chip,
3057 mtd->writesize + aligned_pos,
3058 &chip->oob_poi[aligned_pos],
3059 aligned_len, false);
3064 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3065 chip->oob_poi, index, eccfrag_len);
3069 p = bufpoi + data_col_addr;
3070 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3073 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
3074 &chip->ecc.calc_buf[i]);
3075 if (stat == -EBADMSG &&
3076 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3077 /* check for empty pages with bitflips */
3078 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3079 &chip->ecc.code_buf[i],
3082 chip->ecc.strength);
3086 mtd->ecc_stats.failed++;
3088 mtd->ecc_stats.corrected += stat;
3089 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3092 return max_bitflips;
3096 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3097 * @chip: nand chip info structure
3098 * @buf: buffer to store read data
3099 * @oob_required: caller requires OOB data read to chip->oob_poi
3100 * @page: page number to read
3102 * Not for syndrome calculating ECC controllers which need a special oob layout.
3104 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
3105 int oob_required, int page)
3107 struct mtd_info *mtd = nand_to_mtd(chip);
3108 int i, eccsize = chip->ecc.size, ret;
3109 int eccbytes = chip->ecc.bytes;
3110 int eccsteps = chip->ecc.steps;
3112 uint8_t *ecc_calc = chip->ecc.calc_buf;
3113 uint8_t *ecc_code = chip->ecc.code_buf;
3114 unsigned int max_bitflips = 0;
3116 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3120 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3121 chip->ecc.hwctl(chip, NAND_ECC_READ);
3123 ret = nand_read_data_op(chip, p, eccsize, false, false);
3127 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3130 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
3135 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3140 eccsteps = chip->ecc.steps;
3143 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3146 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
3147 if (stat == -EBADMSG &&
3148 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3149 /* check for empty pages with bitflips */
3150 stat = nand_check_erased_ecc_chunk(p, eccsize,
3151 &ecc_code[i], eccbytes,
3153 chip->ecc.strength);
3157 mtd->ecc_stats.failed++;
3159 mtd->ecc_stats.corrected += stat;
3160 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3163 return max_bitflips;
3167 * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
3168 * data read from OOB area
3169 * @chip: nand chip info structure
3170 * @buf: buffer to store read data
3171 * @oob_required: caller requires OOB data read to chip->oob_poi
3172 * @page: page number to read
3174 * Hardware ECC for large page chips, which requires the ECC data to be
3175 * extracted from the OOB before the actual data is read.
3177 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
3178 int oob_required, int page)
3180 struct mtd_info *mtd = nand_to_mtd(chip);
3181 int i, eccsize = chip->ecc.size, ret;
3182 int eccbytes = chip->ecc.bytes;
3183 int eccsteps = chip->ecc.steps;
3185 uint8_t *ecc_code = chip->ecc.code_buf;
3186 unsigned int max_bitflips = 0;
3188 /* Read the OOB area first */
3189 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3193 /* Move read cursor to start of page */
3194 ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
3198 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3203 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3206 chip->ecc.hwctl(chip, NAND_ECC_READ);
3208 ret = nand_read_data_op(chip, p, eccsize, false, false);
3212 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
3213 if (stat == -EBADMSG &&
3214 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3215 /* check for empty pages with bitflips */
3216 stat = nand_check_erased_ecc_chunk(p, eccsize,
3219 chip->ecc.strength);
3223 mtd->ecc_stats.failed++;
3225 mtd->ecc_stats.corrected += stat;
3226 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3229 return max_bitflips;
3231 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
3234 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3235 * @chip: nand chip info structure
3236 * @buf: buffer to store read data
3237 * @oob_required: caller requires OOB data read to chip->oob_poi
3238 * @page: page number to read
3240 * The hw generator calculates the error syndrome automatically. Therefore we
3241 * need a special oob layout and handling.
3243 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3244 int oob_required, int page)
3246 struct mtd_info *mtd = nand_to_mtd(chip);
3247 int ret, i, eccsize = chip->ecc.size;
3248 int eccbytes = chip->ecc.bytes;
3249 int eccsteps = chip->ecc.steps;
3250 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3252 uint8_t *oob = chip->oob_poi;
3253 unsigned int max_bitflips = 0;
3255 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3259 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3262 chip->ecc.hwctl(chip, NAND_ECC_READ);
3264 ret = nand_read_data_op(chip, p, eccsize, false, false);
3268 if (chip->ecc.prepad) {
3269 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3274 oob += chip->ecc.prepad;
3277 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3279 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
3283 stat = chip->ecc.correct(chip, p, oob, NULL);
3287 if (chip->ecc.postpad) {
3288 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3293 oob += chip->ecc.postpad;
3296 if (stat == -EBADMSG &&
3297 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3298 /* check for empty pages with bitflips */
3299 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3303 chip->ecc.strength);
3307 mtd->ecc_stats.failed++;
3309 mtd->ecc_stats.corrected += stat;
3310 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3314 /* Calculate remaining oob bytes */
3315 i = mtd->oobsize - (oob - chip->oob_poi);
3317 ret = nand_read_data_op(chip, oob, i, false, false);
3322 return max_bitflips;
3326 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3327 * @chip: NAND chip object
3328 * @oob: oob destination address
3329 * @ops: oob ops structure
3330 * @len: size of oob to transfer
3332 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3333 struct mtd_oob_ops *ops, size_t len)
3335 struct mtd_info *mtd = nand_to_mtd(chip);
3338 switch (ops->mode) {
3340 case MTD_OPS_PLACE_OOB:
3342 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3345 case MTD_OPS_AUTO_OOB:
3346 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3358 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3359 * @chip: NAND chip object
3360 * @retry_mode: the retry mode to use
3362 * Some vendors supply a special command to shift the Vt threshold, to be used
3363 * when there are too many bitflips in a page (i.e., ECC error). After setting
3364 * a new threshold, the host should retry reading the page.
3366 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3368 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3370 if (retry_mode >= chip->read_retries)
3373 if (!chip->ops.setup_read_retry)
3376 return chip->ops.setup_read_retry(chip, retry_mode);
3379 static void nand_wait_readrdy(struct nand_chip *chip)
3381 const struct nand_interface_config *conf;
3383 if (!(chip->options & NAND_NEED_READRDY))
3386 conf = nand_get_interface_config(chip);
3387 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
3391 * nand_do_read_ops - [INTERN] Read data with ECC
3392 * @chip: NAND chip object
3393 * @from: offset to read from
3394 * @ops: oob ops structure
3396 * Internal function. Called with chip held.
3398 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3399 struct mtd_oob_ops *ops)
3401 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3402 struct mtd_info *mtd = nand_to_mtd(chip);
3404 uint32_t readlen = ops->len;
3405 uint32_t oobreadlen = ops->ooblen;
3406 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3408 uint8_t *bufpoi, *oob, *buf;
3410 unsigned int max_bitflips = 0;
3412 bool ecc_fail = false;
3414 /* Check if the region is secured */
3415 if (nand_region_is_secured(chip, from, readlen))
3418 chipnr = (int)(from >> chip->chip_shift);
3419 nand_select_target(chip, chipnr);
3421 realpage = (int)(from >> chip->page_shift);
3422 page = realpage & chip->pagemask;
3424 col = (int)(from & (mtd->writesize - 1));
3428 oob_required = oob ? 1 : 0;
3431 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3433 bytes = min(mtd->writesize - col, readlen);
3434 aligned = (bytes == mtd->writesize);
3438 else if (chip->options & NAND_USES_DMA)
3439 use_bounce_buf = !virt_addr_valid(buf) ||
3440 !IS_ALIGNED((unsigned long)buf,
3445 /* Is the current page in the buffer? */
3446 if (realpage != chip->pagecache.page || oob) {
3447 bufpoi = use_bounce_buf ? chip->data_buf : buf;
3449 if (use_bounce_buf && aligned)
3450 pr_debug("%s: using read bounce buffer for buf@%p\n",
3455 * Now read the page into the buffer. Absent an error,
3456 * the read methods return max bitflips per ecc step.
3458 if (unlikely(ops->mode == MTD_OPS_RAW))
3459 ret = chip->ecc.read_page_raw(chip, bufpoi,
3462 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3464 ret = chip->ecc.read_subpage(chip, col, bytes,
3467 ret = chip->ecc.read_page(chip, bufpoi,
3468 oob_required, page);
3471 /* Invalidate page cache */
3472 chip->pagecache.page = -1;
3477 * Copy back the data in the initial buffer when reading
3478 * partial pages or when a bounce buffer is required.
3480 if (use_bounce_buf) {
3481 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3482 !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3483 (ops->mode != MTD_OPS_RAW)) {
3484 chip->pagecache.page = realpage;
3485 chip->pagecache.bitflips = ret;
3487 /* Invalidate page cache */
3488 chip->pagecache.page = -1;
3490 memcpy(buf, bufpoi + col, bytes);
3493 if (unlikely(oob)) {
3494 int toread = min(oobreadlen, max_oobsize);
3497 oob = nand_transfer_oob(chip, oob, ops,
3499 oobreadlen -= toread;
3503 nand_wait_readrdy(chip);
3505 if (mtd->ecc_stats.failed - ecc_stats.failed) {
3506 if (retry_mode + 1 < chip->read_retries) {
3508 ret = nand_setup_read_retry(chip,
3513 /* Reset ecc_stats; retry */
3514 mtd->ecc_stats = ecc_stats;
3517 /* No more retry modes; real failure */
3523 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3525 memcpy(buf, chip->data_buf + col, bytes);
3527 max_bitflips = max_t(unsigned int, max_bitflips,
3528 chip->pagecache.bitflips);
3533 /* Reset to retry mode 0 */
3535 ret = nand_setup_read_retry(chip, 0);
3544 /* For subsequent reads align to page boundary */
3546 /* Increment page address */
3549 page = realpage & chip->pagemask;
3550 /* Check, if we cross a chip boundary */
3553 nand_deselect_target(chip);
3554 nand_select_target(chip, chipnr);
3557 nand_deselect_target(chip);
3559 ops->retlen = ops->len - (size_t) readlen;
3561 ops->oobretlen = ops->ooblen - oobreadlen;
3569 return max_bitflips;
3573 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3574 * @chip: nand chip info structure
3575 * @page: page number to read
3577 int nand_read_oob_std(struct nand_chip *chip, int page)
3579 struct mtd_info *mtd = nand_to_mtd(chip);
3581 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3583 EXPORT_SYMBOL(nand_read_oob_std);
3586 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3588 * @chip: nand chip info structure
3589 * @page: page number to read
3591 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3593 struct mtd_info *mtd = nand_to_mtd(chip);
3594 int length = mtd->oobsize;
3595 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3596 int eccsize = chip->ecc.size;
3597 uint8_t *bufpoi = chip->oob_poi;
3598 int i, toread, sndrnd = 0, pos, ret;
3600 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3604 for (i = 0; i < chip->ecc.steps; i++) {
3608 pos = eccsize + i * (eccsize + chunk);
3609 if (mtd->writesize > 512)
3610 ret = nand_change_read_column_op(chip, pos,
3614 ret = nand_read_page_op(chip, page, pos, NULL,
3621 toread = min_t(int, length, chunk);
3623 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3631 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3640 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3641 * @chip: nand chip info structure
3642 * @page: page number to write
3644 int nand_write_oob_std(struct nand_chip *chip, int page)
3646 struct mtd_info *mtd = nand_to_mtd(chip);
3648 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3651 EXPORT_SYMBOL(nand_write_oob_std);
3654 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3655 * with syndrome - only for large page flash
3656 * @chip: nand chip info structure
3657 * @page: page number to write
3659 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3661 struct mtd_info *mtd = nand_to_mtd(chip);
3662 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3663 int eccsize = chip->ecc.size, length = mtd->oobsize;
3664 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3665 const uint8_t *bufpoi = chip->oob_poi;
3668 * data-ecc-data-ecc ... ecc-oob
3670 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3672 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3673 pos = steps * (eccsize + chunk);
3678 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3682 for (i = 0; i < steps; i++) {
3684 if (mtd->writesize <= 512) {
3685 uint32_t fill = 0xFFFFFFFF;
3689 int num = min_t(int, len, 4);
3691 ret = nand_write_data_op(chip, &fill,
3699 pos = eccsize + i * (eccsize + chunk);
3700 ret = nand_change_write_column_op(chip, pos,
3708 len = min_t(int, length, chunk);
3710 ret = nand_write_data_op(chip, bufpoi, len, false);
3718 ret = nand_write_data_op(chip, bufpoi, length, false);
3723 return nand_prog_page_end_op(chip);
3727 * nand_do_read_oob - [INTERN] NAND read out-of-band
3728 * @chip: NAND chip object
3729 * @from: offset to read from
3730 * @ops: oob operations description structure
3732 * NAND read out-of-band data from the spare area.
3734 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3735 struct mtd_oob_ops *ops)
3737 struct mtd_info *mtd = nand_to_mtd(chip);
3738 unsigned int max_bitflips = 0;
3739 int page, realpage, chipnr;
3740 struct mtd_ecc_stats stats;
3741 int readlen = ops->ooblen;
3743 uint8_t *buf = ops->oobbuf;
3746 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3747 __func__, (unsigned long long)from, readlen);
3749 /* Check if the region is secured */
3750 if (nand_region_is_secured(chip, from, readlen))
3753 stats = mtd->ecc_stats;
3755 len = mtd_oobavail(mtd, ops);
3757 chipnr = (int)(from >> chip->chip_shift);
3758 nand_select_target(chip, chipnr);
3760 /* Shift to get page */
3761 realpage = (int)(from >> chip->page_shift);
3762 page = realpage & chip->pagemask;
3765 if (ops->mode == MTD_OPS_RAW)
3766 ret = chip->ecc.read_oob_raw(chip, page);
3768 ret = chip->ecc.read_oob(chip, page);
3773 len = min(len, readlen);
3774 buf = nand_transfer_oob(chip, buf, ops, len);
3776 nand_wait_readrdy(chip);
3778 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3784 /* Increment page address */
3787 page = realpage & chip->pagemask;
3788 /* Check, if we cross a chip boundary */
3791 nand_deselect_target(chip);
3792 nand_select_target(chip, chipnr);
3795 nand_deselect_target(chip);
3797 ops->oobretlen = ops->ooblen - readlen;
3802 if (mtd->ecc_stats.failed - stats.failed)
3805 return max_bitflips;
3809 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3810 * @mtd: MTD device structure
3811 * @from: offset to read from
3812 * @ops: oob operation description structure
3814 * NAND read data and/or out-of-band data.
3816 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3817 struct mtd_oob_ops *ops)
3819 struct nand_chip *chip = mtd_to_nand(mtd);
3824 if (ops->mode != MTD_OPS_PLACE_OOB &&
3825 ops->mode != MTD_OPS_AUTO_OOB &&
3826 ops->mode != MTD_OPS_RAW)
3829 ret = nand_get_device(chip);
3834 ret = nand_do_read_oob(chip, from, ops);
3836 ret = nand_do_read_ops(chip, from, ops);
3838 nand_release_device(chip);
3843 * nand_write_page_raw_notsupp - dummy raw page write function
3844 * @chip: nand chip info structure
3846 * @oob_required: must write chip->oob_poi to OOB
3847 * @page: page number to write
3849 * Returns -ENOTSUPP unconditionally.
3851 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3852 int oob_required, int page)
3858 * nand_write_page_raw - [INTERN] raw page write function
3859 * @chip: nand chip info structure
3861 * @oob_required: must write chip->oob_poi to OOB
3862 * @page: page number to write
3864 * Not for syndrome calculating ECC controllers, which use a special oob layout.
3866 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3867 int oob_required, int page)
3869 struct mtd_info *mtd = nand_to_mtd(chip);
3872 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3877 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3883 return nand_prog_page_end_op(chip);
3885 EXPORT_SYMBOL(nand_write_page_raw);
3888 * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3889 * @chip: NAND chip info structure
3890 * @buf: data buffer to write
3891 * @oob_required: must write chip->oob_poi to OOB
3892 * @page: page number to write
3894 * This is a raw page write, ie. without any error detection/correction.
3895 * Monolithic means we are requesting all the relevant data (main plus
3896 * eventually OOB) to be sent over the bus and effectively programmed
3897 * into the NAND chip arrays in a single operation. This is an
3898 * alternative to nand_write_page_raw(), which first sends the main
3899 * data, then eventually send the OOB data by latching more data
3900 * cycles on the NAND bus, and finally sends the program command to
3901 * synchronyze the NAND chip cache.
3903 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3904 int oob_required, int page)
3906 struct mtd_info *mtd = nand_to_mtd(chip);
3907 unsigned int size = mtd->writesize;
3908 u8 *write_buf = (u8 *)buf;
3911 size += mtd->oobsize;
3913 if (buf != chip->data_buf) {
3914 write_buf = nand_get_data_buf(chip);
3915 memcpy(write_buf, buf, mtd->writesize);
3919 return nand_prog_page_op(chip, page, 0, write_buf, size);
3921 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3924 * nand_write_page_raw_syndrome - [INTERN] raw page write function
3925 * @chip: nand chip info structure
3927 * @oob_required: must write chip->oob_poi to OOB
3928 * @page: page number to write
3930 * We need a special oob layout and handling even when ECC isn't checked.
3932 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3933 const uint8_t *buf, int oob_required,
3936 struct mtd_info *mtd = nand_to_mtd(chip);
3937 int eccsize = chip->ecc.size;
3938 int eccbytes = chip->ecc.bytes;
3939 uint8_t *oob = chip->oob_poi;
3940 int steps, size, ret;
3942 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3946 for (steps = chip->ecc.steps; steps > 0; steps--) {
3947 ret = nand_write_data_op(chip, buf, eccsize, false);
3953 if (chip->ecc.prepad) {
3954 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3959 oob += chip->ecc.prepad;
3962 ret = nand_write_data_op(chip, oob, eccbytes, false);
3968 if (chip->ecc.postpad) {
3969 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3974 oob += chip->ecc.postpad;
3978 size = mtd->oobsize - (oob - chip->oob_poi);
3980 ret = nand_write_data_op(chip, oob, size, false);
3985 return nand_prog_page_end_op(chip);
3988 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3989 * @chip: nand chip info structure
3991 * @oob_required: must write chip->oob_poi to OOB
3992 * @page: page number to write
3994 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3995 int oob_required, int page)
3997 struct mtd_info *mtd = nand_to_mtd(chip);
3998 int i, eccsize = chip->ecc.size, ret;
3999 int eccbytes = chip->ecc.bytes;
4000 int eccsteps = chip->ecc.steps;
4001 uint8_t *ecc_calc = chip->ecc.calc_buf;
4002 const uint8_t *p = buf;
4004 /* Software ECC calculation */
4005 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4006 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4008 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4013 return chip->ecc.write_page_raw(chip, buf, 1, page);
4017 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4018 * @chip: nand chip info structure
4020 * @oob_required: must write chip->oob_poi to OOB
4021 * @page: page number to write
4023 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
4024 int oob_required, int page)
4026 struct mtd_info *mtd = nand_to_mtd(chip);
4027 int i, eccsize = chip->ecc.size, ret;
4028 int eccbytes = chip->ecc.bytes;
4029 int eccsteps = chip->ecc.steps;
4030 uint8_t *ecc_calc = chip->ecc.calc_buf;
4031 const uint8_t *p = buf;
4033 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4037 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4038 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4040 ret = nand_write_data_op(chip, p, eccsize, false);
4044 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4047 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4052 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4056 return nand_prog_page_end_op(chip);
4061 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4062 * @chip: nand chip info structure
4063 * @offset: column address of subpage within the page
4064 * @data_len: data length
4066 * @oob_required: must write chip->oob_poi to OOB
4067 * @page: page number to write
4069 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
4070 uint32_t data_len, const uint8_t *buf,
4071 int oob_required, int page)
4073 struct mtd_info *mtd = nand_to_mtd(chip);
4074 uint8_t *oob_buf = chip->oob_poi;
4075 uint8_t *ecc_calc = chip->ecc.calc_buf;
4076 int ecc_size = chip->ecc.size;
4077 int ecc_bytes = chip->ecc.bytes;
4078 int ecc_steps = chip->ecc.steps;
4079 uint32_t start_step = offset / ecc_size;
4080 uint32_t end_step = (offset + data_len - 1) / ecc_size;
4081 int oob_bytes = mtd->oobsize / ecc_steps;
4084 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4088 for (step = 0; step < ecc_steps; step++) {
4089 /* configure controller for WRITE access */
4090 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4092 /* write data (untouched subpages already masked by 0xFF) */
4093 ret = nand_write_data_op(chip, buf, ecc_size, false);
4097 /* mask ECC of un-touched subpages by padding 0xFF */
4098 if ((step < start_step) || (step > end_step))
4099 memset(ecc_calc, 0xff, ecc_bytes);
4101 chip->ecc.calculate(chip, buf, ecc_calc);
4103 /* mask OOB of un-touched subpages by padding 0xFF */
4104 /* if oob_required, preserve OOB metadata of written subpage */
4105 if (!oob_required || (step < start_step) || (step > end_step))
4106 memset(oob_buf, 0xff, oob_bytes);
4109 ecc_calc += ecc_bytes;
4110 oob_buf += oob_bytes;
4113 /* copy calculated ECC for whole page to chip->buffer->oob */
4114 /* this include masked-value(0xFF) for unwritten subpages */
4115 ecc_calc = chip->ecc.calc_buf;
4116 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4121 /* write OOB buffer to NAND device */
4122 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4126 return nand_prog_page_end_op(chip);
4131 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4132 * @chip: nand chip info structure
4134 * @oob_required: must write chip->oob_poi to OOB
4135 * @page: page number to write
4137 * The hw generator calculates the error syndrome automatically. Therefore we
4138 * need a special oob layout and handling.
4140 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
4141 int oob_required, int page)
4143 struct mtd_info *mtd = nand_to_mtd(chip);
4144 int i, eccsize = chip->ecc.size;
4145 int eccbytes = chip->ecc.bytes;
4146 int eccsteps = chip->ecc.steps;
4147 const uint8_t *p = buf;
4148 uint8_t *oob = chip->oob_poi;
4151 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4155 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4156 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4158 ret = nand_write_data_op(chip, p, eccsize, false);
4162 if (chip->ecc.prepad) {
4163 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4168 oob += chip->ecc.prepad;
4171 chip->ecc.calculate(chip, p, oob);
4173 ret = nand_write_data_op(chip, oob, eccbytes, false);
4179 if (chip->ecc.postpad) {
4180 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4185 oob += chip->ecc.postpad;
4189 /* Calculate remaining oob bytes */
4190 i = mtd->oobsize - (oob - chip->oob_poi);
4192 ret = nand_write_data_op(chip, oob, i, false);
4197 return nand_prog_page_end_op(chip);
4201 * nand_write_page - write one page
4202 * @chip: NAND chip descriptor
4203 * @offset: address offset within the page
4204 * @data_len: length of actual data to be written
4205 * @buf: the data to write
4206 * @oob_required: must write chip->oob_poi to OOB
4207 * @page: page number to write
4208 * @raw: use _raw version of write_page
4210 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
4211 int data_len, const uint8_t *buf, int oob_required,
4214 struct mtd_info *mtd = nand_to_mtd(chip);
4215 int status, subpage;
4217 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4218 chip->ecc.write_subpage)
4219 subpage = offset || (data_len < mtd->writesize);
4224 status = chip->ecc.write_page_raw(chip, buf, oob_required,
4227 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
4228 oob_required, page);
4230 status = chip->ecc.write_page(chip, buf, oob_required, page);
4238 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4241 * nand_do_write_ops - [INTERN] NAND write with ECC
4242 * @chip: NAND chip object
4243 * @to: offset to write to
4244 * @ops: oob operations description structure
4246 * NAND write with ECC.
4248 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
4249 struct mtd_oob_ops *ops)
4251 struct mtd_info *mtd = nand_to_mtd(chip);
4252 int chipnr, realpage, page, column;
4253 uint32_t writelen = ops->len;
4255 uint32_t oobwritelen = ops->ooblen;
4256 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4258 uint8_t *oob = ops->oobbuf;
4259 uint8_t *buf = ops->datbuf;
4261 int oob_required = oob ? 1 : 0;
4267 /* Reject writes, which are not page aligned */
4268 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4269 pr_notice("%s: attempt to write non page aligned data\n",
4274 /* Check if the region is secured */
4275 if (nand_region_is_secured(chip, to, writelen))
4278 column = to & (mtd->writesize - 1);
4280 chipnr = (int)(to >> chip->chip_shift);
4281 nand_select_target(chip, chipnr);
4283 /* Check, if it is write protected */
4284 if (nand_check_wp(chip)) {
4289 realpage = (int)(to >> chip->page_shift);
4290 page = realpage & chip->pagemask;
4292 /* Invalidate the page cache, when we write to the cached page */
4293 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4294 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4295 chip->pagecache.page = -1;
4297 /* Don't allow multipage oob writes with offset */
4298 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4304 int bytes = mtd->writesize;
4305 uint8_t *wbuf = buf;
4307 int part_pagewr = (column || writelen < mtd->writesize);
4311 else if (chip->options & NAND_USES_DMA)
4312 use_bounce_buf = !virt_addr_valid(buf) ||
4313 !IS_ALIGNED((unsigned long)buf,
4319 * Copy the data from the initial buffer when doing partial page
4320 * writes or when a bounce buffer is required.
4322 if (use_bounce_buf) {
4323 pr_debug("%s: using write bounce buffer for buf@%p\n",
4326 bytes = min_t(int, bytes - column, writelen);
4327 wbuf = nand_get_data_buf(chip);
4328 memset(wbuf, 0xff, mtd->writesize);
4329 memcpy(&wbuf[column], buf, bytes);
4332 if (unlikely(oob)) {
4333 size_t len = min(oobwritelen, oobmaxlen);
4334 oob = nand_fill_oob(chip, oob, len, ops);
4337 /* We still need to erase leftover OOB data */
4338 memset(chip->oob_poi, 0xff, mtd->oobsize);
4341 ret = nand_write_page(chip, column, bytes, wbuf,
4343 (ops->mode == MTD_OPS_RAW));
4355 page = realpage & chip->pagemask;
4356 /* Check, if we cross a chip boundary */
4359 nand_deselect_target(chip);
4360 nand_select_target(chip, chipnr);
4364 ops->retlen = ops->len - writelen;
4366 ops->oobretlen = ops->ooblen;
4369 nand_deselect_target(chip);
4374 * panic_nand_write - [MTD Interface] NAND write with ECC
4375 * @mtd: MTD device structure
4376 * @to: offset to write to
4377 * @len: number of bytes to write
4378 * @retlen: pointer to variable to store the number of written bytes
4379 * @buf: the data to write
4381 * NAND write with ECC. Used when performing writes in interrupt context, this
4382 * may for example be called by mtdoops when writing an oops while in panic.
4384 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4385 size_t *retlen, const uint8_t *buf)
4387 struct nand_chip *chip = mtd_to_nand(mtd);
4388 int chipnr = (int)(to >> chip->chip_shift);
4389 struct mtd_oob_ops ops;
4392 nand_select_target(chip, chipnr);
4394 /* Wait for the device to get ready */
4395 panic_nand_wait(chip, 400);
4397 memset(&ops, 0, sizeof(ops));
4399 ops.datbuf = (uint8_t *)buf;
4400 ops.mode = MTD_OPS_PLACE_OOB;
4402 ret = nand_do_write_ops(chip, to, &ops);
4404 *retlen = ops.retlen;
4409 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4410 * @mtd: MTD device structure
4411 * @to: offset to write to
4412 * @ops: oob operation description structure
4414 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4415 struct mtd_oob_ops *ops)
4417 struct nand_chip *chip = mtd_to_nand(mtd);
4422 ret = nand_get_device(chip);
4426 switch (ops->mode) {
4427 case MTD_OPS_PLACE_OOB:
4428 case MTD_OPS_AUTO_OOB:
4437 ret = nand_do_write_oob(chip, to, ops);
4439 ret = nand_do_write_ops(chip, to, ops);
4442 nand_release_device(chip);
4447 * nand_erase - [MTD Interface] erase block(s)
4448 * @mtd: MTD device structure
4449 * @instr: erase instruction
4451 * Erase one ore more blocks.
4453 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4455 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4459 * nand_erase_nand - [INTERN] erase block(s)
4460 * @chip: NAND chip object
4461 * @instr: erase instruction
4462 * @allowbbt: allow erasing the bbt area
4464 * Erase one ore more blocks.
4466 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4469 int page, pages_per_block, ret, chipnr;
4472 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4473 __func__, (unsigned long long)instr->addr,
4474 (unsigned long long)instr->len);
4476 if (check_offs_len(chip, instr->addr, instr->len))
4479 /* Check if the region is secured */
4480 if (nand_region_is_secured(chip, instr->addr, instr->len))
4483 /* Grab the lock and see if the device is available */
4484 ret = nand_get_device(chip);
4488 /* Shift to get first page */
4489 page = (int)(instr->addr >> chip->page_shift);
4490 chipnr = (int)(instr->addr >> chip->chip_shift);
4492 /* Calculate pages in each block */
4493 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4495 /* Select the NAND device */
4496 nand_select_target(chip, chipnr);
4498 /* Check, if it is write protected */
4499 if (nand_check_wp(chip)) {
4500 pr_debug("%s: device is write protected!\n",
4506 /* Loop through the pages */
4510 /* Check if we have a bad block, we do not erase bad blocks! */
4511 if (nand_block_checkbad(chip, ((loff_t) page) <<
4512 chip->page_shift, allowbbt)) {
4513 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4520 * Invalidate the page cache, if we erase the block which
4521 * contains the current cached page.
4523 if (page <= chip->pagecache.page && chip->pagecache.page <
4524 (page + pages_per_block))
4525 chip->pagecache.page = -1;
4527 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4528 (chip->phys_erase_shift - chip->page_shift));
4530 pr_debug("%s: failed erase, page 0x%08x\n",
4533 ((loff_t)page << chip->page_shift);
4537 /* Increment page address and decrement length */
4538 len -= (1ULL << chip->phys_erase_shift);
4539 page += pages_per_block;
4541 /* Check, if we cross a chip boundary */
4542 if (len && !(page & chip->pagemask)) {
4544 nand_deselect_target(chip);
4545 nand_select_target(chip, chipnr);
4552 /* Deselect and wake up anyone waiting on the device */
4553 nand_deselect_target(chip);
4554 nand_release_device(chip);
4556 /* Return more or less happy */
4561 * nand_sync - [MTD Interface] sync
4562 * @mtd: MTD device structure
4564 * Sync is actually a wait for chip ready function.
4566 static void nand_sync(struct mtd_info *mtd)
4568 struct nand_chip *chip = mtd_to_nand(mtd);
4570 pr_debug("%s: called\n", __func__);
4572 /* Grab the lock and see if the device is available */
4573 WARN_ON(nand_get_device(chip));
4574 /* Release it and go back */
4575 nand_release_device(chip);
4579 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4580 * @mtd: MTD device structure
4581 * @offs: offset relative to mtd start
4583 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4585 struct nand_chip *chip = mtd_to_nand(mtd);
4586 int chipnr = (int)(offs >> chip->chip_shift);
4589 /* Select the NAND device */
4590 ret = nand_get_device(chip);
4594 nand_select_target(chip, chipnr);
4596 ret = nand_block_checkbad(chip, offs, 0);
4598 nand_deselect_target(chip);
4599 nand_release_device(chip);
4605 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4606 * @mtd: MTD device structure
4607 * @ofs: offset relative to mtd start
4609 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4613 ret = nand_block_isbad(mtd, ofs);
4615 /* If it was bad already, return success and do nothing */
4621 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4625 * nand_suspend - [MTD Interface] Suspend the NAND flash
4626 * @mtd: MTD device structure
4628 * Returns 0 for success or negative error code otherwise.
4630 static int nand_suspend(struct mtd_info *mtd)
4632 struct nand_chip *chip = mtd_to_nand(mtd);
4635 mutex_lock(&chip->lock);
4636 if (chip->ops.suspend)
4637 ret = chip->ops.suspend(chip);
4639 chip->suspended = 1;
4640 mutex_unlock(&chip->lock);
4646 * nand_resume - [MTD Interface] Resume the NAND flash
4647 * @mtd: MTD device structure
4649 static void nand_resume(struct mtd_info *mtd)
4651 struct nand_chip *chip = mtd_to_nand(mtd);
4653 mutex_lock(&chip->lock);
4654 if (chip->suspended) {
4655 if (chip->ops.resume)
4656 chip->ops.resume(chip);
4657 chip->suspended = 0;
4659 pr_err("%s called for a chip which is not in suspended state\n",
4662 mutex_unlock(&chip->lock);
4666 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4667 * prevent further operations
4668 * @mtd: MTD device structure
4670 static void nand_shutdown(struct mtd_info *mtd)
4676 * nand_lock - [MTD Interface] Lock the NAND flash
4677 * @mtd: MTD device structure
4678 * @ofs: offset byte address
4679 * @len: number of bytes to lock (must be a multiple of block/page size)
4681 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4683 struct nand_chip *chip = mtd_to_nand(mtd);
4685 if (!chip->ops.lock_area)
4688 return chip->ops.lock_area(chip, ofs, len);
4692 * nand_unlock - [MTD Interface] Unlock the NAND flash
4693 * @mtd: MTD device structure
4694 * @ofs: offset byte address
4695 * @len: number of bytes to unlock (must be a multiple of block/page size)
4697 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4699 struct nand_chip *chip = mtd_to_nand(mtd);
4701 if (!chip->ops.unlock_area)
4704 return chip->ops.unlock_area(chip, ofs, len);
4707 /* Set default functions */
4708 static void nand_set_defaults(struct nand_chip *chip)
4710 /* If no controller is provided, use the dummy, legacy one. */
4711 if (!chip->controller) {
4712 chip->controller = &chip->legacy.dummy_controller;
4713 nand_controller_init(chip->controller);
4716 nand_legacy_set_defaults(chip);
4718 if (!chip->buf_align)
4719 chip->buf_align = 1;
4722 /* Sanitize ONFI strings so we can safely print them */
4723 void sanitize_string(uint8_t *s, size_t len)
4727 /* Null terminate */
4730 /* Remove non printable chars */
4731 for (i = 0; i < len - 1; i++) {
4732 if (s[i] < ' ' || s[i] > 127)
4736 /* Remove trailing spaces */
4741 * nand_id_has_period - Check if an ID string has a given wraparound period
4742 * @id_data: the ID string
4743 * @arrlen: the length of the @id_data array
4744 * @period: the period of repitition
4746 * Check if an ID string is repeated within a given sequence of bytes at
4747 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4748 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4749 * if the repetition has a period of @period; otherwise, returns zero.
4751 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4754 for (i = 0; i < period; i++)
4755 for (j = i + period; j < arrlen; j += period)
4756 if (id_data[i] != id_data[j])
4762 * nand_id_len - Get the length of an ID string returned by CMD_READID
4763 * @id_data: the ID string
4764 * @arrlen: the length of the @id_data array
4766 * Returns the length of the ID string, according to known wraparound/trailing
4767 * zero patterns. If no pattern exists, returns the length of the array.
4769 static int nand_id_len(u8 *id_data, int arrlen)
4771 int last_nonzero, period;
4773 /* Find last non-zero byte */
4774 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4775 if (id_data[last_nonzero])
4779 if (last_nonzero < 0)
4782 /* Calculate wraparound period */
4783 for (period = 1; period < arrlen; period++)
4784 if (nand_id_has_period(id_data, arrlen, period))
4787 /* There's a repeated pattern */
4788 if (period < arrlen)
4791 /* There are trailing zeros */
4792 if (last_nonzero < arrlen - 1)
4793 return last_nonzero + 1;
4795 /* No pattern detected */
4799 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4800 static int nand_get_bits_per_cell(u8 cellinfo)
4804 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4805 bits >>= NAND_CI_CELLTYPE_SHIFT;
4810 * Many new NAND share similar device ID codes, which represent the size of the
4811 * chip. The rest of the parameters must be decoded according to generic or
4812 * manufacturer-specific "extended ID" decoding patterns.
4814 void nand_decode_ext_id(struct nand_chip *chip)
4816 struct nand_memory_organization *memorg;
4817 struct mtd_info *mtd = nand_to_mtd(chip);
4819 u8 *id_data = chip->id.data;
4821 memorg = nanddev_get_memorg(&chip->base);
4823 /* The 3rd id byte holds MLC / multichip data */
4824 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4825 /* The 4th id byte is the important one */
4829 memorg->pagesize = 1024 << (extid & 0x03);
4830 mtd->writesize = memorg->pagesize;
4833 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4834 mtd->oobsize = memorg->oobsize;
4836 /* Calc blocksize. Blocksize is multiples of 64KiB */
4837 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4839 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4841 /* Get buswidth information */
4843 chip->options |= NAND_BUSWIDTH_16;
4845 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4848 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4849 * decodes a matching ID table entry and assigns the MTD size parameters for
4852 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4854 struct mtd_info *mtd = nand_to_mtd(chip);
4855 struct nand_memory_organization *memorg;
4857 memorg = nanddev_get_memorg(&chip->base);
4859 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4860 mtd->erasesize = type->erasesize;
4861 memorg->pagesize = type->pagesize;
4862 mtd->writesize = memorg->pagesize;
4863 memorg->oobsize = memorg->pagesize / 32;
4864 mtd->oobsize = memorg->oobsize;
4866 /* All legacy ID NAND are small-page, SLC */
4867 memorg->bits_per_cell = 1;
4871 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4872 * heuristic patterns using various detected parameters (e.g., manufacturer,
4873 * page size, cell-type information).
4875 static void nand_decode_bbm_options(struct nand_chip *chip)
4877 struct mtd_info *mtd = nand_to_mtd(chip);
4879 /* Set the bad block position */
4880 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4881 chip->badblockpos = NAND_BBM_POS_LARGE;
4883 chip->badblockpos = NAND_BBM_POS_SMALL;
4886 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4888 return type->id_len;
4891 static bool find_full_id_nand(struct nand_chip *chip,
4892 struct nand_flash_dev *type)
4894 struct nand_device *base = &chip->base;
4895 struct nand_ecc_props requirements;
4896 struct mtd_info *mtd = nand_to_mtd(chip);
4897 struct nand_memory_organization *memorg;
4898 u8 *id_data = chip->id.data;
4900 memorg = nanddev_get_memorg(&chip->base);
4902 if (!strncmp(type->id, id_data, type->id_len)) {
4903 memorg->pagesize = type->pagesize;
4904 mtd->writesize = memorg->pagesize;
4905 memorg->pages_per_eraseblock = type->erasesize /
4907 mtd->erasesize = type->erasesize;
4908 memorg->oobsize = type->oobsize;
4909 mtd->oobsize = memorg->oobsize;
4911 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4912 memorg->eraseblocks_per_lun =
4913 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4915 memorg->pages_per_eraseblock);
4916 chip->options |= type->options;
4917 requirements.strength = NAND_ECC_STRENGTH(type);
4918 requirements.step_size = NAND_ECC_STEP(type);
4919 nanddev_set_ecc_requirements(base, &requirements);
4921 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4922 if (!chip->parameters.model)
4931 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4932 * compliant and does not have a full-id or legacy-id entry in the nand_ids
4935 static void nand_manufacturer_detect(struct nand_chip *chip)
4938 * Try manufacturer detection if available and use
4939 * nand_decode_ext_id() otherwise.
4941 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4942 chip->manufacturer.desc->ops->detect) {
4943 struct nand_memory_organization *memorg;
4945 memorg = nanddev_get_memorg(&chip->base);
4947 /* The 3rd id byte holds MLC / multichip data */
4948 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4949 chip->manufacturer.desc->ops->detect(chip);
4951 nand_decode_ext_id(chip);
4956 * Manufacturer initialization. This function is called for all NANDs including
4957 * ONFI and JEDEC compliant ones.
4958 * Manufacturer drivers should put all their specific initialization code in
4959 * their ->init() hook.
4961 static int nand_manufacturer_init(struct nand_chip *chip)
4963 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4964 !chip->manufacturer.desc->ops->init)
4967 return chip->manufacturer.desc->ops->init(chip);
4971 * Manufacturer cleanup. This function is called for all NANDs including
4972 * ONFI and JEDEC compliant ones.
4973 * Manufacturer drivers should put all their specific cleanup code in their
4976 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4978 /* Release manufacturer private data */
4979 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4980 chip->manufacturer.desc->ops->cleanup)
4981 chip->manufacturer.desc->ops->cleanup(chip);
4985 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4987 return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4991 * Get the flash and manufacturer id and lookup if the type is supported.
4993 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4995 const struct nand_manufacturer_desc *manufacturer_desc;
4996 struct mtd_info *mtd = nand_to_mtd(chip);
4997 struct nand_memory_organization *memorg;
4999 u8 *id_data = chip->id.data;
5004 * Let's start by initializing memorg fields that might be left
5005 * unassigned by the ID-based detection logic.
5007 memorg = nanddev_get_memorg(&chip->base);
5008 memorg->planes_per_lun = 1;
5009 memorg->luns_per_target = 1;
5012 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5015 ret = nand_reset(chip, 0);
5019 /* Select the device */
5020 nand_select_target(chip, 0);
5022 /* Send the command for reading device ID */
5023 ret = nand_readid_op(chip, 0, id_data, 2);
5027 /* Read manufacturer and device IDs */
5028 maf_id = id_data[0];
5029 dev_id = id_data[1];
5032 * Try again to make sure, as some systems the bus-hold or other
5033 * interface concerns can cause random data which looks like a
5034 * possibly credible NAND flash to appear. If the two results do
5035 * not match, ignore the device completely.
5038 /* Read entire ID string */
5039 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5043 if (id_data[0] != maf_id || id_data[1] != dev_id) {
5044 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5045 maf_id, dev_id, id_data[0], id_data[1]);
5049 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5051 /* Try to identify manufacturer */
5052 manufacturer_desc = nand_get_manufacturer_desc(maf_id);
5053 chip->manufacturer.desc = manufacturer_desc;
5056 type = nand_flash_ids;
5059 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5061 * This is required to make sure initial NAND bus width set by the
5062 * NAND controller driver is coherent with the real NAND bus width
5063 * (extracted by auto-detection code).
5065 busw = chip->options & NAND_BUSWIDTH_16;
5068 * The flag is only set (never cleared), reset it to its default value
5069 * before starting auto-detection.
5071 chip->options &= ~NAND_BUSWIDTH_16;
5073 for (; type->name != NULL; type++) {
5074 if (is_full_id_nand(type)) {
5075 if (find_full_id_nand(chip, type))
5077 } else if (dev_id == type->dev_id) {
5082 if (!type->name || !type->pagesize) {
5083 /* Check if the chip is ONFI compliant */
5084 ret = nand_onfi_detect(chip);
5090 /* Check if the chip is JEDEC compliant */
5091 ret = nand_jedec_detect(chip);
5101 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
5102 if (!chip->parameters.model)
5105 if (!type->pagesize)
5106 nand_manufacturer_detect(chip);
5108 nand_decode_id(chip, type);
5110 /* Get chip options */
5111 chip->options |= type->options;
5113 memorg->eraseblocks_per_lun =
5114 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
5116 memorg->pages_per_eraseblock);
5120 mtd->name = chip->parameters.model;
5122 if (chip->options & NAND_BUSWIDTH_AUTO) {
5123 WARN_ON(busw & NAND_BUSWIDTH_16);
5124 nand_set_defaults(chip);
5125 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5127 * Check, if buswidth is correct. Hardware drivers should set
5130 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5132 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5134 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5135 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5138 goto free_detect_allocation;
5141 nand_decode_bbm_options(chip);
5143 /* Calculate the address shift from the page size */
5144 chip->page_shift = ffs(mtd->writesize) - 1;
5145 /* Convert chipsize to number of pages per chip -1 */
5146 targetsize = nanddev_target_size(&chip->base);
5147 chip->pagemask = (targetsize >> chip->page_shift) - 1;
5149 chip->bbt_erase_shift = chip->phys_erase_shift =
5150 ffs(mtd->erasesize) - 1;
5151 if (targetsize & 0xffffffff)
5152 chip->chip_shift = ffs((unsigned)targetsize) - 1;
5154 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
5155 chip->chip_shift += 32 - 1;
5158 if (chip->chip_shift - chip->page_shift > 16)
5159 chip->options |= NAND_ROW_ADDR_3;
5161 chip->badblockbits = 8;
5163 nand_legacy_adjust_cmdfunc(chip);
5165 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5167 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5168 chip->parameters.model);
5169 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5170 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5171 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5174 free_detect_allocation:
5175 kfree(chip->parameters.model);
5180 static enum nand_ecc_engine_type
5181 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
5183 enum nand_ecc_legacy_mode {
5189 NAND_ECC_HW_SYNDROME,
5192 const char * const nand_ecc_legacy_modes[] = {
5193 [NAND_ECC_NONE] = "none",
5194 [NAND_ECC_SOFT] = "soft",
5195 [NAND_ECC_SOFT_BCH] = "soft_bch",
5196 [NAND_ECC_HW] = "hw",
5197 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5198 [NAND_ECC_ON_DIE] = "on-die",
5200 enum nand_ecc_legacy_mode eng_type;
5204 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5206 return NAND_ECC_ENGINE_TYPE_INVALID;
5208 for (eng_type = NAND_ECC_NONE;
5209 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
5210 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
5213 return NAND_ECC_ENGINE_TYPE_NONE;
5215 case NAND_ECC_SOFT_BCH:
5216 return NAND_ECC_ENGINE_TYPE_SOFT;
5218 case NAND_ECC_HW_SYNDROME:
5219 return NAND_ECC_ENGINE_TYPE_ON_HOST;
5220 case NAND_ECC_ON_DIE:
5221 return NAND_ECC_ENGINE_TYPE_ON_DIE;
5228 return NAND_ECC_ENGINE_TYPE_INVALID;
5231 static enum nand_ecc_placement
5232 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
5237 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5239 if (!strcasecmp(pm, "hw_syndrome"))
5240 return NAND_ECC_PLACEMENT_INTERLEAVED;
5243 return NAND_ECC_PLACEMENT_UNKNOWN;
5246 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
5251 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5253 if (!strcasecmp(pm, "soft"))
5254 return NAND_ECC_ALGO_HAMMING;
5255 else if (!strcasecmp(pm, "soft_bch"))
5256 return NAND_ECC_ALGO_BCH;
5259 return NAND_ECC_ALGO_UNKNOWN;
5262 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
5264 struct device_node *dn = nand_get_flash_node(chip);
5265 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
5267 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5268 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
5270 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
5271 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
5273 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
5274 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
5277 static int of_get_nand_bus_width(struct device_node *np)
5281 if (of_property_read_u32(np, "nand-bus-width", &val))
5293 static bool of_get_nand_on_flash_bbt(struct device_node *np)
5295 return of_property_read_bool(np, "nand-on-flash-bbt");
5298 static int of_get_nand_secure_regions(struct nand_chip *chip)
5300 struct device_node *dn = nand_get_flash_node(chip);
5301 struct property *prop;
5304 /* Only proceed if the "secure-regions" property is present in DT */
5305 prop = of_find_property(dn, "secure-regions", NULL);
5309 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
5313 chip->nr_secure_regions = nr_elem / 2;
5314 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
5316 if (!chip->secure_regions)
5319 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
5320 of_property_read_u64_index(dn, "secure-regions", j,
5321 &chip->secure_regions[i].offset);
5322 of_property_read_u64_index(dn, "secure-regions", j + 1,
5323 &chip->secure_regions[i].size);
5330 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
5331 * @dev: Device that will be parsed. Also used for managed allocations.
5332 * @cs_array: Array of GPIO desc pointers allocated on success
5333 * @ncs_array: Number of entries in @cs_array updated on success.
5334 * @return 0 on success, an error otherwise.
5336 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
5337 unsigned int *ncs_array)
5339 struct device_node *np = dev->of_node;
5340 struct gpio_desc **descs;
5343 ndescs = of_gpio_named_count(np, "cs-gpios");
5345 dev_dbg(dev, "No valid cs-gpios property\n");
5349 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
5353 for (i = 0; i < ndescs; i++) {
5354 descs[i] = gpiod_get_index_optional(dev, "cs", i,
5356 if (IS_ERR(descs[i]))
5357 return PTR_ERR(descs[i]);
5360 *ncs_array = ndescs;
5365 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
5367 static int rawnand_dt_init(struct nand_chip *chip)
5369 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5370 struct device_node *dn = nand_get_flash_node(chip);
5375 if (of_get_nand_bus_width(dn) == 16)
5376 chip->options |= NAND_BUSWIDTH_16;
5378 if (of_property_read_bool(dn, "nand-is-boot-medium"))
5379 chip->options |= NAND_IS_BOOT_MEDIUM;
5381 if (of_get_nand_on_flash_bbt(dn))
5382 chip->bbt_options |= NAND_BBT_USE_FLASH;
5384 of_get_nand_ecc_user_config(nand);
5385 of_get_nand_ecc_legacy_user_config(chip);
5388 * If neither the user nor the NAND controller have requested a specific
5389 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5391 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5394 * Use the user requested engine type, unless there is none, in this
5395 * case default to the NAND controller choice, otherwise fallback to
5396 * the raw NAND default one.
5398 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5399 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5400 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5401 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5403 chip->ecc.placement = nand->ecc.user_conf.placement;
5404 chip->ecc.algo = nand->ecc.user_conf.algo;
5405 chip->ecc.strength = nand->ecc.user_conf.strength;
5406 chip->ecc.size = nand->ecc.user_conf.step_size;
5412 * nand_scan_ident - Scan for the NAND device
5413 * @chip: NAND chip object
5414 * @maxchips: number of chips to scan for
5415 * @table: alternative NAND ID table
5417 * This is the first phase of the normal nand_scan() function. It reads the
5418 * flash ID and sets up MTD fields accordingly.
5420 * This helper used to be called directly from controller drivers that needed
5421 * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5422 * prevented dynamic allocations during this phase which was unconvenient and
5423 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5425 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5426 struct nand_flash_dev *table)
5428 struct mtd_info *mtd = nand_to_mtd(chip);
5429 struct nand_memory_organization *memorg;
5430 int nand_maf_id, nand_dev_id;
5434 memorg = nanddev_get_memorg(&chip->base);
5436 /* Assume all dies are deselected when we enter nand_scan_ident(). */
5439 mutex_init(&chip->lock);
5441 /* Enforce the right timings for reset/detection */
5442 chip->current_interface_config = nand_get_reset_interface_config();
5444 ret = rawnand_dt_init(chip);
5448 if (!mtd->name && mtd->dev.parent)
5449 mtd->name = dev_name(mtd->dev.parent);
5451 /* Set the default functions */
5452 nand_set_defaults(chip);
5454 ret = nand_legacy_check_hooks(chip);
5458 memorg->ntargets = maxchips;
5460 /* Read the flash type */
5461 ret = nand_detect(chip, table);
5463 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5464 pr_warn("No NAND device found\n");
5465 nand_deselect_target(chip);
5469 nand_maf_id = chip->id.data[0];
5470 nand_dev_id = chip->id.data[1];
5472 nand_deselect_target(chip);
5474 /* Check for a chip array */
5475 for (i = 1; i < maxchips; i++) {
5478 /* See comment in nand_get_flash_type for reset */
5479 ret = nand_reset(chip, i);
5483 nand_select_target(chip, i);
5484 /* Send the command for reading device ID */
5485 ret = nand_readid_op(chip, 0, id, sizeof(id));
5488 /* Read manufacturer and device IDs */
5489 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5490 nand_deselect_target(chip);
5493 nand_deselect_target(chip);
5496 pr_info("%d chips detected\n", i);
5498 /* Store the number of chips and calc total size for mtd */
5499 memorg->ntargets = i;
5500 mtd->size = i * nanddev_target_size(&chip->base);
5505 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5507 kfree(chip->parameters.model);
5508 kfree(chip->parameters.onfi);
5511 int rawnand_sw_hamming_init(struct nand_chip *chip)
5513 struct nand_ecc_sw_hamming_conf *engine_conf;
5514 struct nand_device *base = &chip->base;
5517 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5518 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
5519 base->ecc.user_conf.strength = chip->ecc.strength;
5520 base->ecc.user_conf.step_size = chip->ecc.size;
5522 ret = nand_ecc_sw_hamming_init_ctx(base);
5526 engine_conf = base->ecc.ctx.priv;
5528 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
5529 engine_conf->sm_order = true;
5531 chip->ecc.size = base->ecc.ctx.conf.step_size;
5532 chip->ecc.strength = base->ecc.ctx.conf.strength;
5533 chip->ecc.total = base->ecc.ctx.total;
5534 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5535 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5539 EXPORT_SYMBOL(rawnand_sw_hamming_init);
5541 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
5542 const unsigned char *buf,
5543 unsigned char *code)
5545 struct nand_device *base = &chip->base;
5547 return nand_ecc_sw_hamming_calculate(base, buf, code);
5549 EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
5551 int rawnand_sw_hamming_correct(struct nand_chip *chip,
5553 unsigned char *read_ecc,
5554 unsigned char *calc_ecc)
5556 struct nand_device *base = &chip->base;
5558 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
5560 EXPORT_SYMBOL(rawnand_sw_hamming_correct);
5562 void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
5564 struct nand_device *base = &chip->base;
5566 nand_ecc_sw_hamming_cleanup_ctx(base);
5568 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
5570 int rawnand_sw_bch_init(struct nand_chip *chip)
5572 struct nand_device *base = &chip->base;
5573 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
5576 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5577 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
5578 base->ecc.user_conf.step_size = chip->ecc.size;
5579 base->ecc.user_conf.strength = chip->ecc.strength;
5581 ret = nand_ecc_sw_bch_init_ctx(base);
5585 chip->ecc.size = ecc_conf->step_size;
5586 chip->ecc.strength = ecc_conf->strength;
5587 chip->ecc.total = base->ecc.ctx.total;
5588 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5589 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5593 EXPORT_SYMBOL(rawnand_sw_bch_init);
5595 static int rawnand_sw_bch_calculate(struct nand_chip *chip,
5596 const unsigned char *buf,
5597 unsigned char *code)
5599 struct nand_device *base = &chip->base;
5601 return nand_ecc_sw_bch_calculate(base, buf, code);
5604 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
5605 unsigned char *read_ecc, unsigned char *calc_ecc)
5607 struct nand_device *base = &chip->base;
5609 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
5611 EXPORT_SYMBOL(rawnand_sw_bch_correct);
5613 void rawnand_sw_bch_cleanup(struct nand_chip *chip)
5615 struct nand_device *base = &chip->base;
5617 nand_ecc_sw_bch_cleanup_ctx(base);
5619 EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
5621 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5623 struct nand_ecc_ctrl *ecc = &chip->ecc;
5625 switch (ecc->placement) {
5626 case NAND_ECC_PLACEMENT_UNKNOWN:
5627 case NAND_ECC_PLACEMENT_OOB:
5628 /* Use standard hwecc read page function? */
5629 if (!ecc->read_page)
5630 ecc->read_page = nand_read_page_hwecc;
5631 if (!ecc->write_page)
5632 ecc->write_page = nand_write_page_hwecc;
5633 if (!ecc->read_page_raw)
5634 ecc->read_page_raw = nand_read_page_raw;
5635 if (!ecc->write_page_raw)
5636 ecc->write_page_raw = nand_write_page_raw;
5638 ecc->read_oob = nand_read_oob_std;
5639 if (!ecc->write_oob)
5640 ecc->write_oob = nand_write_oob_std;
5641 if (!ecc->read_subpage)
5642 ecc->read_subpage = nand_read_subpage;
5643 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5644 ecc->write_subpage = nand_write_subpage_hwecc;
5647 case NAND_ECC_PLACEMENT_INTERLEAVED:
5648 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5650 ecc->read_page == nand_read_page_hwecc ||
5652 ecc->write_page == nand_write_page_hwecc)) {
5653 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5656 /* Use standard syndrome read/write page function? */
5657 if (!ecc->read_page)
5658 ecc->read_page = nand_read_page_syndrome;
5659 if (!ecc->write_page)
5660 ecc->write_page = nand_write_page_syndrome;
5661 if (!ecc->read_page_raw)
5662 ecc->read_page_raw = nand_read_page_raw_syndrome;
5663 if (!ecc->write_page_raw)
5664 ecc->write_page_raw = nand_write_page_raw_syndrome;
5666 ecc->read_oob = nand_read_oob_syndrome;
5667 if (!ecc->write_oob)
5668 ecc->write_oob = nand_write_oob_syndrome;
5672 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5680 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5682 struct mtd_info *mtd = nand_to_mtd(chip);
5683 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5684 struct nand_ecc_ctrl *ecc = &chip->ecc;
5687 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5690 switch (ecc->algo) {
5691 case NAND_ECC_ALGO_HAMMING:
5692 ecc->calculate = rawnand_sw_hamming_calculate;
5693 ecc->correct = rawnand_sw_hamming_correct;
5694 ecc->read_page = nand_read_page_swecc;
5695 ecc->read_subpage = nand_read_subpage;
5696 ecc->write_page = nand_write_page_swecc;
5697 if (!ecc->read_page_raw)
5698 ecc->read_page_raw = nand_read_page_raw;
5699 if (!ecc->write_page_raw)
5700 ecc->write_page_raw = nand_write_page_raw;
5701 ecc->read_oob = nand_read_oob_std;
5702 ecc->write_oob = nand_write_oob_std;
5708 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5709 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5711 ret = rawnand_sw_hamming_init(chip);
5713 WARN(1, "Hamming ECC initialization failed!\n");
5718 case NAND_ECC_ALGO_BCH:
5719 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
5720 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5723 ecc->calculate = rawnand_sw_bch_calculate;
5724 ecc->correct = rawnand_sw_bch_correct;
5725 ecc->read_page = nand_read_page_swecc;
5726 ecc->read_subpage = nand_read_subpage;
5727 ecc->write_page = nand_write_page_swecc;
5728 if (!ecc->read_page_raw)
5729 ecc->read_page_raw = nand_read_page_raw;
5730 if (!ecc->write_page_raw)
5731 ecc->write_page_raw = nand_write_page_raw;
5732 ecc->read_oob = nand_read_oob_std;
5733 ecc->write_oob = nand_write_oob_std;
5736 * We can only maximize ECC config when the default layout is
5737 * used, otherwise we don't know how many bytes can really be
5740 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
5741 mtd->ooblayout != nand_get_large_page_ooblayout())
5742 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
5744 ret = rawnand_sw_bch_init(chip);
5746 WARN(1, "BCH ECC initialization failed!\n");
5752 WARN(1, "Unsupported ECC algorithm!\n");
5758 * nand_check_ecc_caps - check the sanity of preset ECC settings
5759 * @chip: nand chip info structure
5760 * @caps: ECC caps info structure
5761 * @oobavail: OOB size that the ECC engine can use
5763 * When ECC step size and strength are already set, check if they are supported
5764 * by the controller and the calculated ECC bytes fit within the chip's OOB.
5765 * On success, the calculated ECC bytes is set.
5768 nand_check_ecc_caps(struct nand_chip *chip,
5769 const struct nand_ecc_caps *caps, int oobavail)
5771 struct mtd_info *mtd = nand_to_mtd(chip);
5772 const struct nand_ecc_step_info *stepinfo;
5773 int preset_step = chip->ecc.size;
5774 int preset_strength = chip->ecc.strength;
5775 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5778 for (i = 0; i < caps->nstepinfos; i++) {
5779 stepinfo = &caps->stepinfos[i];
5781 if (stepinfo->stepsize != preset_step)
5784 for (j = 0; j < stepinfo->nstrengths; j++) {
5785 if (stepinfo->strengths[j] != preset_strength)
5788 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5790 if (WARN_ON_ONCE(ecc_bytes < 0))
5793 if (ecc_bytes * nsteps > oobavail) {
5794 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5795 preset_step, preset_strength);
5799 chip->ecc.bytes = ecc_bytes;
5805 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5806 preset_step, preset_strength);
5812 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5813 * @chip: nand chip info structure
5814 * @caps: ECC engine caps info structure
5815 * @oobavail: OOB size that the ECC engine can use
5817 * If a chip's ECC requirement is provided, try to meet it with the least
5818 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5819 * On success, the chosen ECC settings are set.
5822 nand_match_ecc_req(struct nand_chip *chip,
5823 const struct nand_ecc_caps *caps, int oobavail)
5825 const struct nand_ecc_props *requirements =
5826 nanddev_get_ecc_requirements(&chip->base);
5827 struct mtd_info *mtd = nand_to_mtd(chip);
5828 const struct nand_ecc_step_info *stepinfo;
5829 int req_step = requirements->step_size;
5830 int req_strength = requirements->strength;
5831 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5832 int best_step, best_strength, best_ecc_bytes;
5833 int best_ecc_bytes_total = INT_MAX;
5836 /* No information provided by the NAND chip */
5837 if (!req_step || !req_strength)
5840 /* number of correctable bits the chip requires in a page */
5841 req_corr = mtd->writesize / req_step * req_strength;
5843 for (i = 0; i < caps->nstepinfos; i++) {
5844 stepinfo = &caps->stepinfos[i];
5845 step_size = stepinfo->stepsize;
5847 for (j = 0; j < stepinfo->nstrengths; j++) {
5848 strength = stepinfo->strengths[j];
5851 * If both step size and strength are smaller than the
5852 * chip's requirement, it is not easy to compare the
5853 * resulted reliability.
5855 if (step_size < req_step && strength < req_strength)
5858 if (mtd->writesize % step_size)
5861 nsteps = mtd->writesize / step_size;
5863 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5864 if (WARN_ON_ONCE(ecc_bytes < 0))
5866 ecc_bytes_total = ecc_bytes * nsteps;
5868 if (ecc_bytes_total > oobavail ||
5869 strength * nsteps < req_corr)
5873 * We assume the best is to meet the chip's requrement
5874 * with the least number of ECC bytes.
5876 if (ecc_bytes_total < best_ecc_bytes_total) {
5877 best_ecc_bytes_total = ecc_bytes_total;
5878 best_step = step_size;
5879 best_strength = strength;
5880 best_ecc_bytes = ecc_bytes;
5885 if (best_ecc_bytes_total == INT_MAX)
5888 chip->ecc.size = best_step;
5889 chip->ecc.strength = best_strength;
5890 chip->ecc.bytes = best_ecc_bytes;
5896 * nand_maximize_ecc - choose the max ECC strength available
5897 * @chip: nand chip info structure
5898 * @caps: ECC engine caps info structure
5899 * @oobavail: OOB size that the ECC engine can use
5901 * Choose the max ECC strength that is supported on the controller, and can fit
5902 * within the chip's OOB. On success, the chosen ECC settings are set.
5905 nand_maximize_ecc(struct nand_chip *chip,
5906 const struct nand_ecc_caps *caps, int oobavail)
5908 struct mtd_info *mtd = nand_to_mtd(chip);
5909 const struct nand_ecc_step_info *stepinfo;
5910 int step_size, strength, nsteps, ecc_bytes, corr;
5913 int best_strength, best_ecc_bytes;
5916 for (i = 0; i < caps->nstepinfos; i++) {
5917 stepinfo = &caps->stepinfos[i];
5918 step_size = stepinfo->stepsize;
5920 /* If chip->ecc.size is already set, respect it */
5921 if (chip->ecc.size && step_size != chip->ecc.size)
5924 for (j = 0; j < stepinfo->nstrengths; j++) {
5925 strength = stepinfo->strengths[j];
5927 if (mtd->writesize % step_size)
5930 nsteps = mtd->writesize / step_size;
5932 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5933 if (WARN_ON_ONCE(ecc_bytes < 0))
5936 if (ecc_bytes * nsteps > oobavail)
5939 corr = strength * nsteps;
5942 * If the number of correctable bits is the same,
5943 * bigger step_size has more reliability.
5945 if (corr > best_corr ||
5946 (corr == best_corr && step_size > best_step)) {
5948 best_step = step_size;
5949 best_strength = strength;
5950 best_ecc_bytes = ecc_bytes;
5958 chip->ecc.size = best_step;
5959 chip->ecc.strength = best_strength;
5960 chip->ecc.bytes = best_ecc_bytes;
5966 * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5967 * @chip: nand chip info structure
5968 * @caps: ECC engine caps info structure
5969 * @oobavail: OOB size that the ECC engine can use
5971 * Choose the ECC configuration according to following logic.
5973 * 1. If both ECC step size and ECC strength are already set (usually by DT)
5974 * then check if it is supported by this controller.
5975 * 2. If the user provided the nand-ecc-maximize property, then select maximum
5977 * 3. Otherwise, try to match the ECC step size and ECC strength closest
5978 * to the chip's requirement. If available OOB size can't fit the chip
5979 * requirement then fallback to the maximum ECC step size and ECC strength.
5981 * On success, the chosen ECC settings are set.
5983 int nand_ecc_choose_conf(struct nand_chip *chip,
5984 const struct nand_ecc_caps *caps, int oobavail)
5986 struct mtd_info *mtd = nand_to_mtd(chip);
5987 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5989 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5992 if (chip->ecc.size && chip->ecc.strength)
5993 return nand_check_ecc_caps(chip, caps, oobavail);
5995 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5996 return nand_maximize_ecc(chip, caps, oobavail);
5998 if (!nand_match_ecc_req(chip, caps, oobavail))
6001 return nand_maximize_ecc(chip, caps, oobavail);
6003 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
6005 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
6007 struct nand_chip *chip = container_of(nand, struct nand_chip,
6009 unsigned int eb = nanddev_pos_to_row(nand, pos);
6012 eb >>= nand->rowconv.eraseblock_addr_shift;
6014 nand_select_target(chip, pos->target);
6015 ret = nand_erase_op(chip, eb);
6016 nand_deselect_target(chip);
6021 static int rawnand_markbad(struct nand_device *nand,
6022 const struct nand_pos *pos)
6024 struct nand_chip *chip = container_of(nand, struct nand_chip,
6027 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6030 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
6032 struct nand_chip *chip = container_of(nand, struct nand_chip,
6036 nand_select_target(chip, pos->target);
6037 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6038 nand_deselect_target(chip);
6043 static const struct nand_ops rawnand_ops = {
6044 .erase = rawnand_erase,
6045 .markbad = rawnand_markbad,
6046 .isbad = rawnand_isbad,
6050 * nand_scan_tail - Scan for the NAND device
6051 * @chip: NAND chip object
6053 * This is the second phase of the normal nand_scan() function. It fills out
6054 * all the uninitialized function pointers with the defaults and scans for a
6055 * bad block table if appropriate.
6057 static int nand_scan_tail(struct nand_chip *chip)
6059 struct mtd_info *mtd = nand_to_mtd(chip);
6060 struct nand_ecc_ctrl *ecc = &chip->ecc;
6063 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
6064 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6065 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6069 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6070 if (!chip->data_buf)
6074 * FIXME: some NAND manufacturer drivers expect the first die to be
6075 * selected when manufacturer->init() is called. They should be fixed
6076 * to explictly select the relevant die when interacting with the NAND
6079 nand_select_target(chip, 0);
6080 ret = nand_manufacturer_init(chip);
6081 nand_deselect_target(chip);
6085 /* Set the internal oob buffer location, just after the page data */
6086 chip->oob_poi = chip->data_buf + mtd->writesize;
6089 * If no default placement scheme is given, select an appropriate one.
6091 if (!mtd->ooblayout &&
6092 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6093 ecc->algo == NAND_ECC_ALGO_BCH) &&
6094 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6095 ecc->algo == NAND_ECC_ALGO_HAMMING)) {
6096 switch (mtd->oobsize) {
6099 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
6103 mtd_set_ooblayout(mtd,
6104 nand_get_large_page_hamming_ooblayout());
6108 * Expose the whole OOB area to users if ECC_NONE
6109 * is passed. We could do that for all kind of
6110 * ->oobsize, but we must keep the old large/small
6111 * page with ECC layout when ->oobsize <= 128 for
6112 * compatibility reasons.
6114 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
6115 mtd_set_ooblayout(mtd,
6116 nand_get_large_page_ooblayout());
6120 WARN(1, "No oob scheme defined for oobsize %d\n",
6123 goto err_nand_manuf_cleanup;
6128 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6129 * selected and we have 256 byte pagesize fallback to software ECC
6132 switch (ecc->engine_type) {
6133 case NAND_ECC_ENGINE_TYPE_ON_HOST:
6134 ret = nand_set_ecc_on_host_ops(chip);
6136 goto err_nand_manuf_cleanup;
6138 if (mtd->writesize >= ecc->size) {
6139 if (!ecc->strength) {
6140 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6142 goto err_nand_manuf_cleanup;
6146 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6147 ecc->size, mtd->writesize);
6148 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
6149 ecc->algo = NAND_ECC_ALGO_HAMMING;
6152 case NAND_ECC_ENGINE_TYPE_SOFT:
6153 ret = nand_set_ecc_soft_ops(chip);
6155 goto err_nand_manuf_cleanup;
6158 case NAND_ECC_ENGINE_TYPE_ON_DIE:
6159 if (!ecc->read_page || !ecc->write_page) {
6160 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6162 goto err_nand_manuf_cleanup;
6165 ecc->read_oob = nand_read_oob_std;
6166 if (!ecc->write_oob)
6167 ecc->write_oob = nand_write_oob_std;
6170 case NAND_ECC_ENGINE_TYPE_NONE:
6171 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
6172 ecc->read_page = nand_read_page_raw;
6173 ecc->write_page = nand_write_page_raw;
6174 ecc->read_oob = nand_read_oob_std;
6175 ecc->read_page_raw = nand_read_page_raw;
6176 ecc->write_page_raw = nand_write_page_raw;
6177 ecc->write_oob = nand_write_oob_std;
6178 ecc->size = mtd->writesize;
6184 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
6186 goto err_nand_manuf_cleanup;
6189 if (ecc->correct || ecc->calculate) {
6190 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6191 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6192 if (!ecc->calc_buf || !ecc->code_buf) {
6194 goto err_nand_manuf_cleanup;
6198 /* For many systems, the standard OOB write also works for raw */
6199 if (!ecc->read_oob_raw)
6200 ecc->read_oob_raw = ecc->read_oob;
6201 if (!ecc->write_oob_raw)
6202 ecc->write_oob_raw = ecc->write_oob;
6204 /* propagate ecc info to mtd_info */
6205 mtd->ecc_strength = ecc->strength;
6206 mtd->ecc_step_size = ecc->size;
6209 * Set the number of read / write steps for one page depending on ECC
6213 ecc->steps = mtd->writesize / ecc->size;
6214 if (ecc->steps * ecc->size != mtd->writesize) {
6215 WARN(1, "Invalid ECC parameters\n");
6217 goto err_nand_manuf_cleanup;
6221 ecc->total = ecc->steps * ecc->bytes;
6222 chip->base.ecc.ctx.total = ecc->total;
6225 if (ecc->total > mtd->oobsize) {
6226 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6228 goto err_nand_manuf_cleanup;
6232 * The number of bytes available for a client to place data into
6233 * the out of band area.
6235 ret = mtd_ooblayout_count_freebytes(mtd);
6239 mtd->oobavail = ret;
6241 /* ECC sanity check: warn if it's too weak */
6242 if (!nand_ecc_is_strong_enough(&chip->base))
6243 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
6244 mtd->name, chip->ecc.strength, chip->ecc.size,
6245 nanddev_get_ecc_requirements(&chip->base)->strength,
6246 nanddev_get_ecc_requirements(&chip->base)->step_size);
6248 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6249 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6250 switch (ecc->steps) {
6252 mtd->subpage_sft = 1;
6257 mtd->subpage_sft = 2;
6261 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6263 /* Invalidate the pagebuffer reference */
6264 chip->pagecache.page = -1;
6266 /* Large page NAND with SOFT_ECC should support subpage reads */
6267 switch (ecc->engine_type) {
6268 case NAND_ECC_ENGINE_TYPE_SOFT:
6269 if (chip->page_shift > 9)
6270 chip->options |= NAND_SUBPAGE_READ;
6277 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
6279 goto err_nand_manuf_cleanup;
6281 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
6282 if (chip->options & NAND_ROM)
6283 mtd->flags = MTD_CAP_ROM;
6285 /* Fill in remaining MTD driver data */
6286 mtd->_erase = nand_erase;
6288 mtd->_unpoint = NULL;
6289 mtd->_panic_write = panic_nand_write;
6290 mtd->_read_oob = nand_read_oob;
6291 mtd->_write_oob = nand_write_oob;
6292 mtd->_sync = nand_sync;
6293 mtd->_lock = nand_lock;
6294 mtd->_unlock = nand_unlock;
6295 mtd->_suspend = nand_suspend;
6296 mtd->_resume = nand_resume;
6297 mtd->_reboot = nand_shutdown;
6298 mtd->_block_isreserved = nand_block_isreserved;
6299 mtd->_block_isbad = nand_block_isbad;
6300 mtd->_block_markbad = nand_block_markbad;
6301 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
6304 * Initialize bitflip_threshold to its default prior scan_bbt() call.
6305 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6308 if (!mtd->bitflip_threshold)
6309 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6311 /* Find the fastest data interface for this chip */
6312 ret = nand_choose_interface_config(chip);
6314 goto err_nanddev_cleanup;
6316 /* Enter fastest possible mode on all dies. */
6317 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
6318 ret = nand_setup_interface(chip, i);
6320 goto err_free_interface_config;
6324 * Look for secure regions in the NAND chip. These regions are supposed
6325 * to be protected by a secure element like Trustzone. So the read/write
6326 * accesses to these regions will be blocked in the runtime by this
6329 ret = of_get_nand_secure_regions(chip);
6331 goto err_free_interface_config;
6333 /* Check, if we should skip the bad block table scan */
6334 if (chip->options & NAND_SKIP_BBTSCAN)
6337 /* Build bad block table */
6338 ret = nand_create_bbt(chip);
6340 goto err_free_secure_regions;
6344 err_free_secure_regions:
6345 kfree(chip->secure_regions);
6347 err_free_interface_config:
6348 kfree(chip->best_interface_config);
6350 err_nanddev_cleanup:
6351 nanddev_cleanup(&chip->base);
6353 err_nand_manuf_cleanup:
6354 nand_manufacturer_cleanup(chip);
6357 kfree(chip->data_buf);
6358 kfree(ecc->code_buf);
6359 kfree(ecc->calc_buf);
6364 static int nand_attach(struct nand_chip *chip)
6366 if (chip->controller->ops && chip->controller->ops->attach_chip)
6367 return chip->controller->ops->attach_chip(chip);
6372 static void nand_detach(struct nand_chip *chip)
6374 if (chip->controller->ops && chip->controller->ops->detach_chip)
6375 chip->controller->ops->detach_chip(chip);
6379 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
6380 * @chip: NAND chip object
6381 * @maxchips: number of chips to scan for.
6382 * @ids: optional flash IDs table
6384 * This fills out all the uninitialized function pointers with the defaults.
6385 * The flash ID is read and the mtd/chip structures are filled with the
6386 * appropriate values.
6388 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6389 struct nand_flash_dev *ids)
6396 ret = nand_scan_ident(chip, maxchips, ids);
6400 ret = nand_attach(chip);
6404 ret = nand_scan_tail(chip);
6413 nand_scan_ident_cleanup(chip);
6417 EXPORT_SYMBOL(nand_scan_with_ids);
6420 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6421 * @chip: NAND chip object
6423 void nand_cleanup(struct nand_chip *chip)
6425 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
6426 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
6427 rawnand_sw_hamming_cleanup(chip);
6428 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
6429 rawnand_sw_bch_cleanup(chip);
6432 nanddev_cleanup(&chip->base);
6434 /* Free secure regions data */
6435 kfree(chip->secure_regions);
6437 /* Free bad block table memory */
6439 kfree(chip->data_buf);
6440 kfree(chip->ecc.code_buf);
6441 kfree(chip->ecc.calc_buf);
6443 /* Free bad block descriptor memory */
6444 if (chip->badblock_pattern && chip->badblock_pattern->options
6445 & NAND_BBT_DYNAMICSTRUCT)
6446 kfree(chip->badblock_pattern);
6448 /* Free the data interface */
6449 kfree(chip->best_interface_config);
6451 /* Free manufacturer priv data. */
6452 nand_manufacturer_cleanup(chip);
6454 /* Free controller specific allocations after chip identification */
6457 /* Free identification phase allocations */
6458 nand_scan_ident_cleanup(chip);
6461 EXPORT_SYMBOL_GPL(nand_cleanup);
6463 MODULE_LICENSE("GPL");
6464 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6465 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6466 MODULE_DESCRIPTION("Generic NAND flash driver code");