Merge tag 'nand/for-5.18' into mtd/next
[linux-2.6-microblaze.git] / drivers / mtd / nand / raw / nand_base.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Overview:
4  *   This is the generic MTD driver for NAND flash devices. It should be
5  *   capable of working with almost all NAND chips currently available.
6  *
7  *      Additional technical information is available on
8  *      http://www.linux-mtd.infradead.org/doc/nand.html
9  *
10  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11  *                2002-2006 Thomas Gleixner (tglx@linutronix.de)
12  *
13  *  Credits:
14  *      David Woodhouse for adding multichip support
15  *
16  *      Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17  *      rework for 2K page size chips
18  *
19  *  TODO:
20  *      Enable cached programming for 2k page size chips
21  *      Check, if mtd->ecctype should be set to MTD_ECC_HW
22  *      if we have HW ECC support.
23  *      BBT table is not serialized, has to be fixed
24  */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/mm.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/nand-ecc-sw-hamming.h>
39 #include <linux/mtd/nand-ecc-sw-bch.h>
40 #include <linux/interrupt.h>
41 #include <linux/bitops.h>
42 #include <linux/io.h>
43 #include <linux/mtd/partitions.h>
44 #include <linux/of.h>
45 #include <linux/of_gpio.h>
46 #include <linux/gpio/consumer.h>
47
48 #include "internals.h"
49
50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
51                                        struct mtd_pairing_info *info)
52 {
53         int lastpage = (mtd->erasesize / mtd->writesize) - 1;
54         int dist = 3;
55
56         if (page == lastpage)
57                 dist = 2;
58
59         if (!page || (page & 1)) {
60                 info->group = 0;
61                 info->pair = (page + 1) / 2;
62         } else {
63                 info->group = 1;
64                 info->pair = (page + 1 - dist) / 2;
65         }
66
67         return 0;
68 }
69
70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
71                                         const struct mtd_pairing_info *info)
72 {
73         int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
74         int page = info->pair * 2;
75         int dist = 3;
76
77         if (!info->group && !info->pair)
78                 return 0;
79
80         if (info->pair == lastpair && info->group)
81                 dist = 2;
82
83         if (!info->group)
84                 page--;
85         else if (info->pair)
86                 page += dist - 1;
87
88         if (page >= mtd->erasesize / mtd->writesize)
89                 return -EINVAL;
90
91         return page;
92 }
93
94 const struct mtd_pairing_scheme dist3_pairing_scheme = {
95         .ngroups = 2,
96         .get_info = nand_pairing_dist3_get_info,
97         .get_wunit = nand_pairing_dist3_get_wunit,
98 };
99
100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
101 {
102         int ret = 0;
103
104         /* Start address must align on block boundary */
105         if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
106                 pr_debug("%s: unaligned address\n", __func__);
107                 ret = -EINVAL;
108         }
109
110         /* Length must align on block boundary */
111         if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
112                 pr_debug("%s: length not block aligned\n", __func__);
113                 ret = -EINVAL;
114         }
115
116         return ret;
117 }
118
119 /**
120  * nand_extract_bits - Copy unaligned bits from one buffer to another one
121  * @dst: destination buffer
122  * @dst_off: bit offset at which the writing starts
123  * @src: source buffer
124  * @src_off: bit offset at which the reading starts
125  * @nbits: number of bits to copy from @src to @dst
126  *
127  * Copy bits from one memory region to another (overlap authorized).
128  */
129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
130                        unsigned int src_off, unsigned int nbits)
131 {
132         unsigned int tmp, n;
133
134         dst += dst_off / 8;
135         dst_off %= 8;
136         src += src_off / 8;
137         src_off %= 8;
138
139         while (nbits) {
140                 n = min3(8 - dst_off, 8 - src_off, nbits);
141
142                 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
143                 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
144                 *dst |= tmp << dst_off;
145
146                 dst_off += n;
147                 if (dst_off >= 8) {
148                         dst++;
149                         dst_off -= 8;
150                 }
151
152                 src_off += n;
153                 if (src_off >= 8) {
154                         src++;
155                         src_off -= 8;
156                 }
157
158                 nbits -= n;
159         }
160 }
161 EXPORT_SYMBOL_GPL(nand_extract_bits);
162
163 /**
164  * nand_select_target() - Select a NAND target (A.K.A. die)
165  * @chip: NAND chip object
166  * @cs: the CS line to select. Note that this CS id is always from the chip
167  *      PoV, not the controller one
168  *
169  * Select a NAND target so that further operations executed on @chip go to the
170  * selected NAND target.
171  */
172 void nand_select_target(struct nand_chip *chip, unsigned int cs)
173 {
174         /*
175          * cs should always lie between 0 and nanddev_ntargets(), when that's
176          * not the case it's a bug and the caller should be fixed.
177          */
178         if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
179                 return;
180
181         chip->cur_cs = cs;
182
183         if (chip->legacy.select_chip)
184                 chip->legacy.select_chip(chip, cs);
185 }
186 EXPORT_SYMBOL_GPL(nand_select_target);
187
188 /**
189  * nand_deselect_target() - Deselect the currently selected target
190  * @chip: NAND chip object
191  *
192  * Deselect the currently selected NAND target. The result of operations
193  * executed on @chip after the target has been deselected is undefined.
194  */
195 void nand_deselect_target(struct nand_chip *chip)
196 {
197         if (chip->legacy.select_chip)
198                 chip->legacy.select_chip(chip, -1);
199
200         chip->cur_cs = -1;
201 }
202 EXPORT_SYMBOL_GPL(nand_deselect_target);
203
204 /**
205  * nand_release_device - [GENERIC] release chip
206  * @chip: NAND chip object
207  *
208  * Release chip lock and wake up anyone waiting on the device.
209  */
210 static void nand_release_device(struct nand_chip *chip)
211 {
212         /* Release the controller and the chip */
213         mutex_unlock(&chip->controller->lock);
214         mutex_unlock(&chip->lock);
215 }
216
217 /**
218  * nand_bbm_get_next_page - Get the next page for bad block markers
219  * @chip: NAND chip object
220  * @page: First page to start checking for bad block marker usage
221  *
222  * Returns an integer that corresponds to the page offset within a block, for
223  * a page that is used to store bad block markers. If no more pages are
224  * available, -EINVAL is returned.
225  */
226 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
227 {
228         struct mtd_info *mtd = nand_to_mtd(chip);
229         int last_page = ((mtd->erasesize - mtd->writesize) >>
230                          chip->page_shift) & chip->pagemask;
231         unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
232                 | NAND_BBM_LASTPAGE;
233
234         if (page == 0 && !(chip->options & bbm_flags))
235                 return 0;
236         if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
237                 return 0;
238         if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
239                 return 1;
240         if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
241                 return last_page;
242
243         return -EINVAL;
244 }
245
246 /**
247  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
248  * @chip: NAND chip object
249  * @ofs: offset from device start
250  *
251  * Check, if the block is bad.
252  */
253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
254 {
255         int first_page, page_offset;
256         int res;
257         u8 bad;
258
259         first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
260         page_offset = nand_bbm_get_next_page(chip, 0);
261
262         while (page_offset >= 0) {
263                 res = chip->ecc.read_oob(chip, first_page + page_offset);
264                 if (res < 0)
265                         return res;
266
267                 bad = chip->oob_poi[chip->badblockpos];
268
269                 if (likely(chip->badblockbits == 8))
270                         res = bad != 0xFF;
271                 else
272                         res = hweight8(bad) < chip->badblockbits;
273                 if (res)
274                         return res;
275
276                 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
277         }
278
279         return 0;
280 }
281
282 /**
283  * nand_region_is_secured() - Check if the region is secured
284  * @chip: NAND chip object
285  * @offset: Offset of the region to check
286  * @size: Size of the region to check
287  *
288  * Checks if the region is secured by comparing the offset and size with the
289  * list of secure regions obtained from DT. Returns true if the region is
290  * secured else false.
291  */
292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
293 {
294         int i;
295
296         /* Skip touching the secure regions if present */
297         for (i = 0; i < chip->nr_secure_regions; i++) {
298                 const struct nand_secure_region *region = &chip->secure_regions[i];
299
300                 if (offset + size <= region->offset ||
301                     offset >= region->offset + region->size)
302                         continue;
303
304                 pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
305                          __func__, offset, offset + size);
306
307                 return true;
308         }
309
310         return false;
311 }
312
313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
314 {
315         struct mtd_info *mtd = nand_to_mtd(chip);
316
317         if (chip->options & NAND_NO_BBM_QUIRK)
318                 return 0;
319
320         /* Check if the region is secured */
321         if (nand_region_is_secured(chip, ofs, mtd->erasesize))
322                 return -EIO;
323
324         if (mtd_check_expert_analysis_mode())
325                 return 0;
326
327         if (chip->legacy.block_bad)
328                 return chip->legacy.block_bad(chip, ofs);
329
330         return nand_block_bad(chip, ofs);
331 }
332
333 /**
334  * nand_get_device - [GENERIC] Get chip for selected access
335  * @chip: NAND chip structure
336  *
337  * Lock the device and its controller for exclusive access
338  *
339  * Return: -EBUSY if the chip has been suspended, 0 otherwise
340  */
341 static void nand_get_device(struct nand_chip *chip)
342 {
343         /* Wait until the device is resumed. */
344         while (1) {
345                 mutex_lock(&chip->lock);
346                 if (!chip->suspended) {
347                         mutex_lock(&chip->controller->lock);
348                         return;
349                 }
350                 mutex_unlock(&chip->lock);
351
352                 wait_event(chip->resume_wq, !chip->suspended);
353         }
354 }
355
356 /**
357  * nand_check_wp - [GENERIC] check if the chip is write protected
358  * @chip: NAND chip object
359  *
360  * Check, if the device is write protected. The function expects, that the
361  * device is already selected.
362  */
363 static int nand_check_wp(struct nand_chip *chip)
364 {
365         u8 status;
366         int ret;
367
368         /* Broken xD cards report WP despite being writable */
369         if (chip->options & NAND_BROKEN_XD)
370                 return 0;
371
372         /* Check the WP bit */
373         ret = nand_status_op(chip, &status);
374         if (ret)
375                 return ret;
376
377         return status & NAND_STATUS_WP ? 0 : 1;
378 }
379
380 /**
381  * nand_fill_oob - [INTERN] Transfer client buffer to oob
382  * @chip: NAND chip object
383  * @oob: oob data buffer
384  * @len: oob data write length
385  * @ops: oob ops structure
386  */
387 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
388                               struct mtd_oob_ops *ops)
389 {
390         struct mtd_info *mtd = nand_to_mtd(chip);
391         int ret;
392
393         /*
394          * Initialise to all 0xFF, to avoid the possibility of left over OOB
395          * data from a previous OOB read.
396          */
397         memset(chip->oob_poi, 0xff, mtd->oobsize);
398
399         switch (ops->mode) {
400
401         case MTD_OPS_PLACE_OOB:
402         case MTD_OPS_RAW:
403                 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
404                 return oob + len;
405
406         case MTD_OPS_AUTO_OOB:
407                 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
408                                                   ops->ooboffs, len);
409                 BUG_ON(ret);
410                 return oob + len;
411
412         default:
413                 BUG();
414         }
415         return NULL;
416 }
417
418 /**
419  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
420  * @chip: NAND chip object
421  * @to: offset to write to
422  * @ops: oob operation description structure
423  *
424  * NAND write out-of-band.
425  */
426 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
427                              struct mtd_oob_ops *ops)
428 {
429         struct mtd_info *mtd = nand_to_mtd(chip);
430         int chipnr, page, status, len, ret;
431
432         pr_debug("%s: to = 0x%08x, len = %i\n",
433                          __func__, (unsigned int)to, (int)ops->ooblen);
434
435         len = mtd_oobavail(mtd, ops);
436
437         /* Do not allow write past end of page */
438         if ((ops->ooboffs + ops->ooblen) > len) {
439                 pr_debug("%s: attempt to write past end of page\n",
440                                 __func__);
441                 return -EINVAL;
442         }
443
444         /* Check if the region is secured */
445         if (nand_region_is_secured(chip, to, ops->ooblen))
446                 return -EIO;
447
448         chipnr = (int)(to >> chip->chip_shift);
449
450         /*
451          * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
452          * of my DiskOnChip 2000 test units) will clear the whole data page too
453          * if we don't do this. I have no clue why, but I seem to have 'fixed'
454          * it in the doc2000 driver in August 1999.  dwmw2.
455          */
456         ret = nand_reset(chip, chipnr);
457         if (ret)
458                 return ret;
459
460         nand_select_target(chip, chipnr);
461
462         /* Shift to get page */
463         page = (int)(to >> chip->page_shift);
464
465         /* Check, if it is write protected */
466         if (nand_check_wp(chip)) {
467                 nand_deselect_target(chip);
468                 return -EROFS;
469         }
470
471         /* Invalidate the page cache, if we write to the cached page */
472         if (page == chip->pagecache.page)
473                 chip->pagecache.page = -1;
474
475         nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
476
477         if (ops->mode == MTD_OPS_RAW)
478                 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
479         else
480                 status = chip->ecc.write_oob(chip, page & chip->pagemask);
481
482         nand_deselect_target(chip);
483
484         if (status)
485                 return status;
486
487         ops->oobretlen = ops->ooblen;
488
489         return 0;
490 }
491
492 /**
493  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
494  * @chip: NAND chip object
495  * @ofs: offset from device start
496  *
497  * This is the default implementation, which can be overridden by a hardware
498  * specific driver. It provides the details for writing a bad block marker to a
499  * block.
500  */
501 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
502 {
503         struct mtd_info *mtd = nand_to_mtd(chip);
504         struct mtd_oob_ops ops;
505         uint8_t buf[2] = { 0, 0 };
506         int ret = 0, res, page_offset;
507
508         memset(&ops, 0, sizeof(ops));
509         ops.oobbuf = buf;
510         ops.ooboffs = chip->badblockpos;
511         if (chip->options & NAND_BUSWIDTH_16) {
512                 ops.ooboffs &= ~0x01;
513                 ops.len = ops.ooblen = 2;
514         } else {
515                 ops.len = ops.ooblen = 1;
516         }
517         ops.mode = MTD_OPS_PLACE_OOB;
518
519         page_offset = nand_bbm_get_next_page(chip, 0);
520
521         while (page_offset >= 0) {
522                 res = nand_do_write_oob(chip,
523                                         ofs + (page_offset * mtd->writesize),
524                                         &ops);
525
526                 if (!ret)
527                         ret = res;
528
529                 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
530         }
531
532         return ret;
533 }
534
535 /**
536  * nand_markbad_bbm - mark a block by updating the BBM
537  * @chip: NAND chip object
538  * @ofs: offset of the block to mark bad
539  */
540 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
541 {
542         if (chip->legacy.block_markbad)
543                 return chip->legacy.block_markbad(chip, ofs);
544
545         return nand_default_block_markbad(chip, ofs);
546 }
547
548 /**
549  * nand_block_markbad_lowlevel - mark a block bad
550  * @chip: NAND chip object
551  * @ofs: offset from device start
552  *
553  * This function performs the generic NAND bad block marking steps (i.e., bad
554  * block table(s) and/or marker(s)). We only allow the hardware driver to
555  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
556  *
557  * We try operations in the following order:
558  *
559  *  (1) erase the affected block, to allow OOB marker to be written cleanly
560  *  (2) write bad block marker to OOB area of affected block (unless flag
561  *      NAND_BBT_NO_OOB_BBM is present)
562  *  (3) update the BBT
563  *
564  * Note that we retain the first error encountered in (2) or (3), finish the
565  * procedures, and dump the error in the end.
566 */
567 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
568 {
569         struct mtd_info *mtd = nand_to_mtd(chip);
570         int res, ret = 0;
571
572         if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
573                 struct erase_info einfo;
574
575                 /* Attempt erase before marking OOB */
576                 memset(&einfo, 0, sizeof(einfo));
577                 einfo.addr = ofs;
578                 einfo.len = 1ULL << chip->phys_erase_shift;
579                 nand_erase_nand(chip, &einfo, 0);
580
581                 /* Write bad block marker to OOB */
582                 nand_get_device(chip);
583
584                 ret = nand_markbad_bbm(chip, ofs);
585                 nand_release_device(chip);
586         }
587
588         /* Mark block bad in BBT */
589         if (chip->bbt) {
590                 res = nand_markbad_bbt(chip, ofs);
591                 if (!ret)
592                         ret = res;
593         }
594
595         if (!ret)
596                 mtd->ecc_stats.badblocks++;
597
598         return ret;
599 }
600
601 /**
602  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
603  * @mtd: MTD device structure
604  * @ofs: offset from device start
605  *
606  * Check if the block is marked as reserved.
607  */
608 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
609 {
610         struct nand_chip *chip = mtd_to_nand(mtd);
611
612         if (!chip->bbt)
613                 return 0;
614         /* Return info from the table */
615         return nand_isreserved_bbt(chip, ofs);
616 }
617
618 /**
619  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
620  * @chip: NAND chip object
621  * @ofs: offset from device start
622  * @allowbbt: 1, if its allowed to access the bbt area
623  *
624  * Check, if the block is bad. Either by reading the bad block table or
625  * calling of the scan function.
626  */
627 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
628 {
629         /* Return info from the table */
630         if (chip->bbt)
631                 return nand_isbad_bbt(chip, ofs, allowbbt);
632
633         return nand_isbad_bbm(chip, ofs);
634 }
635
636 /**
637  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
638  * @chip: NAND chip structure
639  * @timeout_ms: Timeout in ms
640  *
641  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
642  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
643  * returned.
644  *
645  * This helper is intended to be used when the controller does not have access
646  * to the NAND R/B pin.
647  *
648  * Be aware that calling this helper from an ->exec_op() implementation means
649  * ->exec_op() must be re-entrant.
650  *
651  * Return 0 if the NAND chip is ready, a negative error otherwise.
652  */
653 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
654 {
655         const struct nand_interface_config *conf;
656         u8 status = 0;
657         int ret;
658
659         if (!nand_has_exec_op(chip))
660                 return -ENOTSUPP;
661
662         /* Wait tWB before polling the STATUS reg. */
663         conf = nand_get_interface_config(chip);
664         ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
665
666         ret = nand_status_op(chip, NULL);
667         if (ret)
668                 return ret;
669
670         /*
671          * +1 below is necessary because if we are now in the last fraction
672          * of jiffy and msecs_to_jiffies is 1 then we will wait only that
673          * small jiffy fraction - possibly leading to false timeout
674          */
675         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
676         do {
677                 ret = nand_read_data_op(chip, &status, sizeof(status), true,
678                                         false);
679                 if (ret)
680                         break;
681
682                 if (status & NAND_STATUS_READY)
683                         break;
684
685                 /*
686                  * Typical lowest execution time for a tR on most NANDs is 10us,
687                  * use this as polling delay before doing something smarter (ie.
688                  * deriving a delay from the timeout value, timeout_ms/ratio).
689                  */
690                 udelay(10);
691         } while (time_before(jiffies, timeout_ms));
692
693         /*
694          * We have to exit READ_STATUS mode in order to read real data on the
695          * bus in case the WAITRDY instruction is preceding a DATA_IN
696          * instruction.
697          */
698         nand_exit_status_op(chip);
699
700         if (ret)
701                 return ret;
702
703         return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
704 };
705 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
706
707 /**
708  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
709  * @chip: NAND chip structure
710  * @gpiod: GPIO descriptor of R/B pin
711  * @timeout_ms: Timeout in ms
712  *
713  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
714  * whitin the specified timeout, -ETIMEDOUT is returned.
715  *
716  * This helper is intended to be used when the controller has access to the
717  * NAND R/B pin over GPIO.
718  *
719  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
720  */
721 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
722                       unsigned long timeout_ms)
723 {
724
725         /*
726          * Wait until R/B pin indicates chip is ready or timeout occurs.
727          * +1 below is necessary because if we are now in the last fraction
728          * of jiffy and msecs_to_jiffies is 1 then we will wait only that
729          * small jiffy fraction - possibly leading to false timeout.
730          */
731         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
732         do {
733                 if (gpiod_get_value_cansleep(gpiod))
734                         return 0;
735
736                 cond_resched();
737         } while (time_before(jiffies, timeout_ms));
738
739         return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
740 };
741 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
742
743 /**
744  * panic_nand_wait - [GENERIC] wait until the command is done
745  * @chip: NAND chip structure
746  * @timeo: timeout
747  *
748  * Wait for command done. This is a helper function for nand_wait used when
749  * we are in interrupt context. May happen when in panic and trying to write
750  * an oops through mtdoops.
751  */
752 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
753 {
754         int i;
755         for (i = 0; i < timeo; i++) {
756                 if (chip->legacy.dev_ready) {
757                         if (chip->legacy.dev_ready(chip))
758                                 break;
759                 } else {
760                         int ret;
761                         u8 status;
762
763                         ret = nand_read_data_op(chip, &status, sizeof(status),
764                                                 true, false);
765                         if (ret)
766                                 return;
767
768                         if (status & NAND_STATUS_READY)
769                                 break;
770                 }
771                 mdelay(1);
772         }
773 }
774
775 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
776 {
777         return (chip->parameters.supports_set_get_features &&
778                 test_bit(addr, chip->parameters.get_feature_list));
779 }
780
781 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
782 {
783         return (chip->parameters.supports_set_get_features &&
784                 test_bit(addr, chip->parameters.set_feature_list));
785 }
786
787 /**
788  * nand_reset_interface - Reset data interface and timings
789  * @chip: The NAND chip
790  * @chipnr: Internal die id
791  *
792  * Reset the Data interface and timings to ONFI mode 0.
793  *
794  * Returns 0 for success or negative error code otherwise.
795  */
796 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
797 {
798         const struct nand_controller_ops *ops = chip->controller->ops;
799         int ret;
800
801         if (!nand_controller_can_setup_interface(chip))
802                 return 0;
803
804         /*
805          * The ONFI specification says:
806          * "
807          * To transition from NV-DDR or NV-DDR2 to the SDR data
808          * interface, the host shall use the Reset (FFh) command
809          * using SDR timing mode 0. A device in any timing mode is
810          * required to recognize Reset (FFh) command issued in SDR
811          * timing mode 0.
812          * "
813          *
814          * Configure the data interface in SDR mode and set the
815          * timings to timing mode 0.
816          */
817
818         chip->current_interface_config = nand_get_reset_interface_config();
819         ret = ops->setup_interface(chip, chipnr,
820                                    chip->current_interface_config);
821         if (ret)
822                 pr_err("Failed to configure data interface to SDR timing mode 0\n");
823
824         return ret;
825 }
826
827 /**
828  * nand_setup_interface - Setup the best data interface and timings
829  * @chip: The NAND chip
830  * @chipnr: Internal die id
831  *
832  * Configure what has been reported to be the best data interface and NAND
833  * timings supported by the chip and the driver.
834  *
835  * Returns 0 for success or negative error code otherwise.
836  */
837 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
838 {
839         const struct nand_controller_ops *ops = chip->controller->ops;
840         u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
841         int ret;
842
843         if (!nand_controller_can_setup_interface(chip))
844                 return 0;
845
846         /*
847          * A nand_reset_interface() put both the NAND chip and the NAND
848          * controller in timings mode 0. If the default mode for this chip is
849          * also 0, no need to proceed to the change again. Plus, at probe time,
850          * nand_setup_interface() uses ->set/get_features() which would
851          * fail anyway as the parameter page is not available yet.
852          */
853         if (!chip->best_interface_config)
854                 return 0;
855
856         request = chip->best_interface_config->timings.mode;
857         if (nand_interface_is_sdr(chip->best_interface_config))
858                 request |= ONFI_DATA_INTERFACE_SDR;
859         else
860                 request |= ONFI_DATA_INTERFACE_NVDDR;
861         tmode_param[0] = request;
862
863         /* Change the mode on the chip side (if supported by the NAND chip) */
864         if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
865                 nand_select_target(chip, chipnr);
866                 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
867                                         tmode_param);
868                 nand_deselect_target(chip);
869                 if (ret)
870                         return ret;
871         }
872
873         /* Change the mode on the controller side */
874         ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
875         if (ret)
876                 return ret;
877
878         /* Check the mode has been accepted by the chip, if supported */
879         if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
880                 goto update_interface_config;
881
882         memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
883         nand_select_target(chip, chipnr);
884         ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
885                                 tmode_param);
886         nand_deselect_target(chip);
887         if (ret)
888                 goto err_reset_chip;
889
890         if (request != tmode_param[0]) {
891                 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
892                         nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
893                         chip->best_interface_config->timings.mode);
894                 pr_debug("NAND chip would work in %s timing mode %d\n",
895                          tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
896                          (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
897                 goto err_reset_chip;
898         }
899
900 update_interface_config:
901         chip->current_interface_config = chip->best_interface_config;
902
903         return 0;
904
905 err_reset_chip:
906         /*
907          * Fallback to mode 0 if the chip explicitly did not ack the chosen
908          * timing mode.
909          */
910         nand_reset_interface(chip, chipnr);
911         nand_select_target(chip, chipnr);
912         nand_reset_op(chip);
913         nand_deselect_target(chip);
914
915         return ret;
916 }
917
918 /**
919  * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
920  *                                NAND controller and the NAND chip support
921  * @chip: the NAND chip
922  * @iface: the interface configuration (can eventually be updated)
923  * @spec_timings: specific timings, when not fitting the ONFI specification
924  *
925  * If specific timings are provided, use them. Otherwise, retrieve supported
926  * timing modes from ONFI information.
927  */
928 int nand_choose_best_sdr_timings(struct nand_chip *chip,
929                                  struct nand_interface_config *iface,
930                                  struct nand_sdr_timings *spec_timings)
931 {
932         const struct nand_controller_ops *ops = chip->controller->ops;
933         int best_mode = 0, mode, ret = -EOPNOTSUPP;
934
935         iface->type = NAND_SDR_IFACE;
936
937         if (spec_timings) {
938                 iface->timings.sdr = *spec_timings;
939                 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
940
941                 /* Verify the controller supports the requested interface */
942                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
943                                            iface);
944                 if (!ret) {
945                         chip->best_interface_config = iface;
946                         return ret;
947                 }
948
949                 /* Fallback to slower modes */
950                 best_mode = iface->timings.mode;
951         } else if (chip->parameters.onfi) {
952                 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
953         }
954
955         for (mode = best_mode; mode >= 0; mode--) {
956                 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
957
958                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
959                                            iface);
960                 if (!ret) {
961                         chip->best_interface_config = iface;
962                         break;
963                 }
964         }
965
966         return ret;
967 }
968
969 /**
970  * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
971  *                                  NAND controller and the NAND chip support
972  * @chip: the NAND chip
973  * @iface: the interface configuration (can eventually be updated)
974  * @spec_timings: specific timings, when not fitting the ONFI specification
975  *
976  * If specific timings are provided, use them. Otherwise, retrieve supported
977  * timing modes from ONFI information.
978  */
979 int nand_choose_best_nvddr_timings(struct nand_chip *chip,
980                                    struct nand_interface_config *iface,
981                                    struct nand_nvddr_timings *spec_timings)
982 {
983         const struct nand_controller_ops *ops = chip->controller->ops;
984         int best_mode = 0, mode, ret = -EOPNOTSUPP;
985
986         iface->type = NAND_NVDDR_IFACE;
987
988         if (spec_timings) {
989                 iface->timings.nvddr = *spec_timings;
990                 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
991
992                 /* Verify the controller supports the requested interface */
993                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
994                                            iface);
995                 if (!ret) {
996                         chip->best_interface_config = iface;
997                         return ret;
998                 }
999
1000                 /* Fallback to slower modes */
1001                 best_mode = iface->timings.mode;
1002         } else if (chip->parameters.onfi) {
1003                 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
1004         }
1005
1006         for (mode = best_mode; mode >= 0; mode--) {
1007                 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
1008
1009                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1010                                            iface);
1011                 if (!ret) {
1012                         chip->best_interface_config = iface;
1013                         break;
1014                 }
1015         }
1016
1017         return ret;
1018 }
1019
1020 /**
1021  * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
1022  *                            NAND controller and the NAND chip support
1023  * @chip: the NAND chip
1024  * @iface: the interface configuration (can eventually be updated)
1025  *
1026  * If specific timings are provided, use them. Otherwise, retrieve supported
1027  * timing modes from ONFI information.
1028  */
1029 static int nand_choose_best_timings(struct nand_chip *chip,
1030                                     struct nand_interface_config *iface)
1031 {
1032         int ret;
1033
1034         /* Try the fastest timings: NV-DDR */
1035         ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
1036         if (!ret)
1037                 return 0;
1038
1039         /* Fallback to SDR timings otherwise */
1040         return nand_choose_best_sdr_timings(chip, iface, NULL);
1041 }
1042
1043 /**
1044  * nand_choose_interface_config - find the best data interface and timings
1045  * @chip: The NAND chip
1046  *
1047  * Find the best data interface and NAND timings supported by the chip
1048  * and the driver. Eventually let the NAND manufacturer driver propose his own
1049  * set of timings.
1050  *
1051  * After this function nand_chip->interface_config is initialized with the best
1052  * timing mode available.
1053  *
1054  * Returns 0 for success or negative error code otherwise.
1055  */
1056 static int nand_choose_interface_config(struct nand_chip *chip)
1057 {
1058         struct nand_interface_config *iface;
1059         int ret;
1060
1061         if (!nand_controller_can_setup_interface(chip))
1062                 return 0;
1063
1064         iface = kzalloc(sizeof(*iface), GFP_KERNEL);
1065         if (!iface)
1066                 return -ENOMEM;
1067
1068         if (chip->ops.choose_interface_config)
1069                 ret = chip->ops.choose_interface_config(chip, iface);
1070         else
1071                 ret = nand_choose_best_timings(chip, iface);
1072
1073         if (ret)
1074                 kfree(iface);
1075
1076         return ret;
1077 }
1078
1079 /**
1080  * nand_fill_column_cycles - fill the column cycles of an address
1081  * @chip: The NAND chip
1082  * @addrs: Array of address cycles to fill
1083  * @offset_in_page: The offset in the page
1084  *
1085  * Fills the first or the first two bytes of the @addrs field depending
1086  * on the NAND bus width and the page size.
1087  *
1088  * Returns the number of cycles needed to encode the column, or a negative
1089  * error code in case one of the arguments is invalid.
1090  */
1091 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1092                                    unsigned int offset_in_page)
1093 {
1094         struct mtd_info *mtd = nand_to_mtd(chip);
1095
1096         /* Make sure the offset is less than the actual page size. */
1097         if (offset_in_page > mtd->writesize + mtd->oobsize)
1098                 return -EINVAL;
1099
1100         /*
1101          * On small page NANDs, there's a dedicated command to access the OOB
1102          * area, and the column address is relative to the start of the OOB
1103          * area, not the start of the page. Asjust the address accordingly.
1104          */
1105         if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1106                 offset_in_page -= mtd->writesize;
1107
1108         /*
1109          * The offset in page is expressed in bytes, if the NAND bus is 16-bit
1110          * wide, then it must be divided by 2.
1111          */
1112         if (chip->options & NAND_BUSWIDTH_16) {
1113                 if (WARN_ON(offset_in_page % 2))
1114                         return -EINVAL;
1115
1116                 offset_in_page /= 2;
1117         }
1118
1119         addrs[0] = offset_in_page;
1120
1121         /*
1122          * Small page NANDs use 1 cycle for the columns, while large page NANDs
1123          * need 2
1124          */
1125         if (mtd->writesize <= 512)
1126                 return 1;
1127
1128         addrs[1] = offset_in_page >> 8;
1129
1130         return 2;
1131 }
1132
1133 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1134                                      unsigned int offset_in_page, void *buf,
1135                                      unsigned int len)
1136 {
1137         const struct nand_interface_config *conf =
1138                 nand_get_interface_config(chip);
1139         struct mtd_info *mtd = nand_to_mtd(chip);
1140         u8 addrs[4];
1141         struct nand_op_instr instrs[] = {
1142                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1143                 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1144                 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1145                                  NAND_COMMON_TIMING_NS(conf, tRR_min)),
1146                 NAND_OP_DATA_IN(len, buf, 0),
1147         };
1148         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1149         int ret;
1150
1151         /* Drop the DATA_IN instruction if len is set to 0. */
1152         if (!len)
1153                 op.ninstrs--;
1154
1155         if (offset_in_page >= mtd->writesize)
1156                 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1157         else if (offset_in_page >= 256 &&
1158                  !(chip->options & NAND_BUSWIDTH_16))
1159                 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1160
1161         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1162         if (ret < 0)
1163                 return ret;
1164
1165         addrs[1] = page;
1166         addrs[2] = page >> 8;
1167
1168         if (chip->options & NAND_ROW_ADDR_3) {
1169                 addrs[3] = page >> 16;
1170                 instrs[1].ctx.addr.naddrs++;
1171         }
1172
1173         return nand_exec_op(chip, &op);
1174 }
1175
1176 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1177                                      unsigned int offset_in_page, void *buf,
1178                                      unsigned int len)
1179 {
1180         const struct nand_interface_config *conf =
1181                 nand_get_interface_config(chip);
1182         u8 addrs[5];
1183         struct nand_op_instr instrs[] = {
1184                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1185                 NAND_OP_ADDR(4, addrs, 0),
1186                 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1187                 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1188                                  NAND_COMMON_TIMING_NS(conf, tRR_min)),
1189                 NAND_OP_DATA_IN(len, buf, 0),
1190         };
1191         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1192         int ret;
1193
1194         /* Drop the DATA_IN instruction if len is set to 0. */
1195         if (!len)
1196                 op.ninstrs--;
1197
1198         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1199         if (ret < 0)
1200                 return ret;
1201
1202         addrs[2] = page;
1203         addrs[3] = page >> 8;
1204
1205         if (chip->options & NAND_ROW_ADDR_3) {
1206                 addrs[4] = page >> 16;
1207                 instrs[1].ctx.addr.naddrs++;
1208         }
1209
1210         return nand_exec_op(chip, &op);
1211 }
1212
1213 /**
1214  * nand_read_page_op - Do a READ PAGE operation
1215  * @chip: The NAND chip
1216  * @page: page to read
1217  * @offset_in_page: offset within the page
1218  * @buf: buffer used to store the data
1219  * @len: length of the buffer
1220  *
1221  * This function issues a READ PAGE operation.
1222  * This function does not select/unselect the CS line.
1223  *
1224  * Returns 0 on success, a negative error code otherwise.
1225  */
1226 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1227                       unsigned int offset_in_page, void *buf, unsigned int len)
1228 {
1229         struct mtd_info *mtd = nand_to_mtd(chip);
1230
1231         if (len && !buf)
1232                 return -EINVAL;
1233
1234         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1235                 return -EINVAL;
1236
1237         if (nand_has_exec_op(chip)) {
1238                 if (mtd->writesize > 512)
1239                         return nand_lp_exec_read_page_op(chip, page,
1240                                                          offset_in_page, buf,
1241                                                          len);
1242
1243                 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1244                                                  buf, len);
1245         }
1246
1247         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1248         if (len)
1249                 chip->legacy.read_buf(chip, buf, len);
1250
1251         return 0;
1252 }
1253 EXPORT_SYMBOL_GPL(nand_read_page_op);
1254
1255 /**
1256  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1257  * @chip: The NAND chip
1258  * @page: parameter page to read
1259  * @buf: buffer used to store the data
1260  * @len: length of the buffer
1261  *
1262  * This function issues a READ PARAMETER PAGE operation.
1263  * This function does not select/unselect the CS line.
1264  *
1265  * Returns 0 on success, a negative error code otherwise.
1266  */
1267 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1268                             unsigned int len)
1269 {
1270         unsigned int i;
1271         u8 *p = buf;
1272
1273         if (len && !buf)
1274                 return -EINVAL;
1275
1276         if (nand_has_exec_op(chip)) {
1277                 const struct nand_interface_config *conf =
1278                         nand_get_interface_config(chip);
1279                 struct nand_op_instr instrs[] = {
1280                         NAND_OP_CMD(NAND_CMD_PARAM, 0),
1281                         NAND_OP_ADDR(1, &page,
1282                                      NAND_COMMON_TIMING_NS(conf, tWB_max)),
1283                         NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1284                                          NAND_COMMON_TIMING_NS(conf, tRR_min)),
1285                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1286                 };
1287                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1288
1289                 /* Drop the DATA_IN instruction if len is set to 0. */
1290                 if (!len)
1291                         op.ninstrs--;
1292
1293                 return nand_exec_op(chip, &op);
1294         }
1295
1296         chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1297         for (i = 0; i < len; i++)
1298                 p[i] = chip->legacy.read_byte(chip);
1299
1300         return 0;
1301 }
1302
1303 /**
1304  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1305  * @chip: The NAND chip
1306  * @offset_in_page: offset within the page
1307  * @buf: buffer used to store the data
1308  * @len: length of the buffer
1309  * @force_8bit: force 8-bit bus access
1310  *
1311  * This function issues a CHANGE READ COLUMN operation.
1312  * This function does not select/unselect the CS line.
1313  *
1314  * Returns 0 on success, a negative error code otherwise.
1315  */
1316 int nand_change_read_column_op(struct nand_chip *chip,
1317                                unsigned int offset_in_page, void *buf,
1318                                unsigned int len, bool force_8bit)
1319 {
1320         struct mtd_info *mtd = nand_to_mtd(chip);
1321
1322         if (len && !buf)
1323                 return -EINVAL;
1324
1325         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1326                 return -EINVAL;
1327
1328         /* Small page NANDs do not support column change. */
1329         if (mtd->writesize <= 512)
1330                 return -ENOTSUPP;
1331
1332         if (nand_has_exec_op(chip)) {
1333                 const struct nand_interface_config *conf =
1334                         nand_get_interface_config(chip);
1335                 u8 addrs[2] = {};
1336                 struct nand_op_instr instrs[] = {
1337                         NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1338                         NAND_OP_ADDR(2, addrs, 0),
1339                         NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1340                                     NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1341                         NAND_OP_DATA_IN(len, buf, 0),
1342                 };
1343                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1344                 int ret;
1345
1346                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1347                 if (ret < 0)
1348                         return ret;
1349
1350                 /* Drop the DATA_IN instruction if len is set to 0. */
1351                 if (!len)
1352                         op.ninstrs--;
1353
1354                 instrs[3].ctx.data.force_8bit = force_8bit;
1355
1356                 return nand_exec_op(chip, &op);
1357         }
1358
1359         chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1360         if (len)
1361                 chip->legacy.read_buf(chip, buf, len);
1362
1363         return 0;
1364 }
1365 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1366
1367 /**
1368  * nand_read_oob_op - Do a READ OOB operation
1369  * @chip: The NAND chip
1370  * @page: page to read
1371  * @offset_in_oob: offset within the OOB area
1372  * @buf: buffer used to store the data
1373  * @len: length of the buffer
1374  *
1375  * This function issues a READ OOB operation.
1376  * This function does not select/unselect the CS line.
1377  *
1378  * Returns 0 on success, a negative error code otherwise.
1379  */
1380 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1381                      unsigned int offset_in_oob, void *buf, unsigned int len)
1382 {
1383         struct mtd_info *mtd = nand_to_mtd(chip);
1384
1385         if (len && !buf)
1386                 return -EINVAL;
1387
1388         if (offset_in_oob + len > mtd->oobsize)
1389                 return -EINVAL;
1390
1391         if (nand_has_exec_op(chip))
1392                 return nand_read_page_op(chip, page,
1393                                          mtd->writesize + offset_in_oob,
1394                                          buf, len);
1395
1396         chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1397         if (len)
1398                 chip->legacy.read_buf(chip, buf, len);
1399
1400         return 0;
1401 }
1402 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1403
1404 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1405                                   unsigned int offset_in_page, const void *buf,
1406                                   unsigned int len, bool prog)
1407 {
1408         const struct nand_interface_config *conf =
1409                 nand_get_interface_config(chip);
1410         struct mtd_info *mtd = nand_to_mtd(chip);
1411         u8 addrs[5] = {};
1412         struct nand_op_instr instrs[] = {
1413                 /*
1414                  * The first instruction will be dropped if we're dealing
1415                  * with a large page NAND and adjusted if we're dealing
1416                  * with a small page NAND and the page offset is > 255.
1417                  */
1418                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1419                 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1420                 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
1421                 NAND_OP_DATA_OUT(len, buf, 0),
1422                 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1423                             NAND_COMMON_TIMING_NS(conf, tWB_max)),
1424                 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
1425         };
1426         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1427         int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1428
1429         if (naddrs < 0)
1430                 return naddrs;
1431
1432         addrs[naddrs++] = page;
1433         addrs[naddrs++] = page >> 8;
1434         if (chip->options & NAND_ROW_ADDR_3)
1435                 addrs[naddrs++] = page >> 16;
1436
1437         instrs[2].ctx.addr.naddrs = naddrs;
1438
1439         /* Drop the last two instructions if we're not programming the page. */
1440         if (!prog) {
1441                 op.ninstrs -= 2;
1442                 /* Also drop the DATA_OUT instruction if empty. */
1443                 if (!len)
1444                         op.ninstrs--;
1445         }
1446
1447         if (mtd->writesize <= 512) {
1448                 /*
1449                  * Small pages need some more tweaking: we have to adjust the
1450                  * first instruction depending on the page offset we're trying
1451                  * to access.
1452                  */
1453                 if (offset_in_page >= mtd->writesize)
1454                         instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1455                 else if (offset_in_page >= 256 &&
1456                          !(chip->options & NAND_BUSWIDTH_16))
1457                         instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1458         } else {
1459                 /*
1460                  * Drop the first command if we're dealing with a large page
1461                  * NAND.
1462                  */
1463                 op.instrs++;
1464                 op.ninstrs--;
1465         }
1466
1467         return nand_exec_op(chip, &op);
1468 }
1469
1470 /**
1471  * nand_prog_page_begin_op - starts a PROG PAGE operation
1472  * @chip: The NAND chip
1473  * @page: page to write
1474  * @offset_in_page: offset within the page
1475  * @buf: buffer containing the data to write to the page
1476  * @len: length of the buffer
1477  *
1478  * This function issues the first half of a PROG PAGE operation.
1479  * This function does not select/unselect the CS line.
1480  *
1481  * Returns 0 on success, a negative error code otherwise.
1482  */
1483 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1484                             unsigned int offset_in_page, const void *buf,
1485                             unsigned int len)
1486 {
1487         struct mtd_info *mtd = nand_to_mtd(chip);
1488
1489         if (len && !buf)
1490                 return -EINVAL;
1491
1492         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1493                 return -EINVAL;
1494
1495         if (nand_has_exec_op(chip))
1496                 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1497                                               len, false);
1498
1499         chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1500
1501         if (buf)
1502                 chip->legacy.write_buf(chip, buf, len);
1503
1504         return 0;
1505 }
1506 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1507
1508 /**
1509  * nand_prog_page_end_op - ends a PROG PAGE operation
1510  * @chip: The NAND chip
1511  *
1512  * This function issues the second half of a PROG PAGE operation.
1513  * This function does not select/unselect the CS line.
1514  *
1515  * Returns 0 on success, a negative error code otherwise.
1516  */
1517 int nand_prog_page_end_op(struct nand_chip *chip)
1518 {
1519         int ret;
1520         u8 status;
1521
1522         if (nand_has_exec_op(chip)) {
1523                 const struct nand_interface_config *conf =
1524                         nand_get_interface_config(chip);
1525                 struct nand_op_instr instrs[] = {
1526                         NAND_OP_CMD(NAND_CMD_PAGEPROG,
1527                                     NAND_COMMON_TIMING_NS(conf, tWB_max)),
1528                         NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
1529                                          0),
1530                 };
1531                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1532
1533                 ret = nand_exec_op(chip, &op);
1534                 if (ret)
1535                         return ret;
1536
1537                 ret = nand_status_op(chip, &status);
1538                 if (ret)
1539                         return ret;
1540         } else {
1541                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1542                 ret = chip->legacy.waitfunc(chip);
1543                 if (ret < 0)
1544                         return ret;
1545
1546                 status = ret;
1547         }
1548
1549         if (status & NAND_STATUS_FAIL)
1550                 return -EIO;
1551
1552         return 0;
1553 }
1554 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1555
1556 /**
1557  * nand_prog_page_op - Do a full PROG PAGE operation
1558  * @chip: The NAND chip
1559  * @page: page to write
1560  * @offset_in_page: offset within the page
1561  * @buf: buffer containing the data to write to the page
1562  * @len: length of the buffer
1563  *
1564  * This function issues a full PROG PAGE operation.
1565  * This function does not select/unselect the CS line.
1566  *
1567  * Returns 0 on success, a negative error code otherwise.
1568  */
1569 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1570                       unsigned int offset_in_page, const void *buf,
1571                       unsigned int len)
1572 {
1573         struct mtd_info *mtd = nand_to_mtd(chip);
1574         u8 status;
1575         int ret;
1576
1577         if (!len || !buf)
1578                 return -EINVAL;
1579
1580         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1581                 return -EINVAL;
1582
1583         if (nand_has_exec_op(chip)) {
1584                 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1585                                                 len, true);
1586                 if (ret)
1587                         return ret;
1588
1589                 ret = nand_status_op(chip, &status);
1590                 if (ret)
1591                         return ret;
1592         } else {
1593                 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1594                                      page);
1595                 chip->legacy.write_buf(chip, buf, len);
1596                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1597                 ret = chip->legacy.waitfunc(chip);
1598                 if (ret < 0)
1599                         return ret;
1600
1601                 status = ret;
1602         }
1603
1604         if (status & NAND_STATUS_FAIL)
1605                 return -EIO;
1606
1607         return 0;
1608 }
1609 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1610
1611 /**
1612  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1613  * @chip: The NAND chip
1614  * @offset_in_page: offset within the page
1615  * @buf: buffer containing the data to send to the NAND
1616  * @len: length of the buffer
1617  * @force_8bit: force 8-bit bus access
1618  *
1619  * This function issues a CHANGE WRITE COLUMN operation.
1620  * This function does not select/unselect the CS line.
1621  *
1622  * Returns 0 on success, a negative error code otherwise.
1623  */
1624 int nand_change_write_column_op(struct nand_chip *chip,
1625                                 unsigned int offset_in_page,
1626                                 const void *buf, unsigned int len,
1627                                 bool force_8bit)
1628 {
1629         struct mtd_info *mtd = nand_to_mtd(chip);
1630
1631         if (len && !buf)
1632                 return -EINVAL;
1633
1634         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1635                 return -EINVAL;
1636
1637         /* Small page NANDs do not support column change. */
1638         if (mtd->writesize <= 512)
1639                 return -ENOTSUPP;
1640
1641         if (nand_has_exec_op(chip)) {
1642                 const struct nand_interface_config *conf =
1643                         nand_get_interface_config(chip);
1644                 u8 addrs[2];
1645                 struct nand_op_instr instrs[] = {
1646                         NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1647                         NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1648                         NAND_OP_DATA_OUT(len, buf, 0),
1649                 };
1650                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1651                 int ret;
1652
1653                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1654                 if (ret < 0)
1655                         return ret;
1656
1657                 instrs[2].ctx.data.force_8bit = force_8bit;
1658
1659                 /* Drop the DATA_OUT instruction if len is set to 0. */
1660                 if (!len)
1661                         op.ninstrs--;
1662
1663                 return nand_exec_op(chip, &op);
1664         }
1665
1666         chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1667         if (len)
1668                 chip->legacy.write_buf(chip, buf, len);
1669
1670         return 0;
1671 }
1672 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1673
1674 /**
1675  * nand_readid_op - Do a READID operation
1676  * @chip: The NAND chip
1677  * @addr: address cycle to pass after the READID command
1678  * @buf: buffer used to store the ID
1679  * @len: length of the buffer
1680  *
1681  * This function sends a READID command and reads back the ID returned by the
1682  * NAND.
1683  * This function does not select/unselect the CS line.
1684  *
1685  * Returns 0 on success, a negative error code otherwise.
1686  */
1687 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1688                    unsigned int len)
1689 {
1690         unsigned int i;
1691         u8 *id = buf, *ddrbuf = NULL;
1692
1693         if (len && !buf)
1694                 return -EINVAL;
1695
1696         if (nand_has_exec_op(chip)) {
1697                 const struct nand_interface_config *conf =
1698                         nand_get_interface_config(chip);
1699                 struct nand_op_instr instrs[] = {
1700                         NAND_OP_CMD(NAND_CMD_READID, 0),
1701                         NAND_OP_ADDR(1, &addr,
1702                                      NAND_COMMON_TIMING_NS(conf, tADL_min)),
1703                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1704                 };
1705                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1706                 int ret;
1707
1708                 /* READ_ID data bytes are received twice in NV-DDR mode */
1709                 if (len && nand_interface_is_nvddr(conf)) {
1710                         ddrbuf = kzalloc(len * 2, GFP_KERNEL);
1711                         if (!ddrbuf)
1712                                 return -ENOMEM;
1713
1714                         instrs[2].ctx.data.len *= 2;
1715                         instrs[2].ctx.data.buf.in = ddrbuf;
1716                 }
1717
1718                 /* Drop the DATA_IN instruction if len is set to 0. */
1719                 if (!len)
1720                         op.ninstrs--;
1721
1722                 ret = nand_exec_op(chip, &op);
1723                 if (!ret && len && nand_interface_is_nvddr(conf)) {
1724                         for (i = 0; i < len; i++)
1725                                 id[i] = ddrbuf[i * 2];
1726                 }
1727
1728                 kfree(ddrbuf);
1729
1730                 return ret;
1731         }
1732
1733         chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1734
1735         for (i = 0; i < len; i++)
1736                 id[i] = chip->legacy.read_byte(chip);
1737
1738         return 0;
1739 }
1740 EXPORT_SYMBOL_GPL(nand_readid_op);
1741
1742 /**
1743  * nand_status_op - Do a STATUS operation
1744  * @chip: The NAND chip
1745  * @status: out variable to store the NAND status
1746  *
1747  * This function sends a STATUS command and reads back the status returned by
1748  * the NAND.
1749  * This function does not select/unselect the CS line.
1750  *
1751  * Returns 0 on success, a negative error code otherwise.
1752  */
1753 int nand_status_op(struct nand_chip *chip, u8 *status)
1754 {
1755         if (nand_has_exec_op(chip)) {
1756                 const struct nand_interface_config *conf =
1757                         nand_get_interface_config(chip);
1758                 u8 ddrstatus[2];
1759                 struct nand_op_instr instrs[] = {
1760                         NAND_OP_CMD(NAND_CMD_STATUS,
1761                                     NAND_COMMON_TIMING_NS(conf, tADL_min)),
1762                         NAND_OP_8BIT_DATA_IN(1, status, 0),
1763                 };
1764                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1765                 int ret;
1766
1767                 /* The status data byte will be received twice in NV-DDR mode */
1768                 if (status && nand_interface_is_nvddr(conf)) {
1769                         instrs[1].ctx.data.len *= 2;
1770                         instrs[1].ctx.data.buf.in = ddrstatus;
1771                 }
1772
1773                 if (!status)
1774                         op.ninstrs--;
1775
1776                 ret = nand_exec_op(chip, &op);
1777                 if (!ret && status && nand_interface_is_nvddr(conf))
1778                         *status = ddrstatus[0];
1779
1780                 return ret;
1781         }
1782
1783         chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1784         if (status)
1785                 *status = chip->legacy.read_byte(chip);
1786
1787         return 0;
1788 }
1789 EXPORT_SYMBOL_GPL(nand_status_op);
1790
1791 /**
1792  * nand_exit_status_op - Exit a STATUS operation
1793  * @chip: The NAND chip
1794  *
1795  * This function sends a READ0 command to cancel the effect of the STATUS
1796  * command to avoid reading only the status until a new read command is sent.
1797  *
1798  * This function does not select/unselect the CS line.
1799  *
1800  * Returns 0 on success, a negative error code otherwise.
1801  */
1802 int nand_exit_status_op(struct nand_chip *chip)
1803 {
1804         if (nand_has_exec_op(chip)) {
1805                 struct nand_op_instr instrs[] = {
1806                         NAND_OP_CMD(NAND_CMD_READ0, 0),
1807                 };
1808                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1809
1810                 return nand_exec_op(chip, &op);
1811         }
1812
1813         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1814
1815         return 0;
1816 }
1817
1818 /**
1819  * nand_erase_op - Do an erase operation
1820  * @chip: The NAND chip
1821  * @eraseblock: block to erase
1822  *
1823  * This function sends an ERASE command and waits for the NAND to be ready
1824  * before returning.
1825  * This function does not select/unselect the CS line.
1826  *
1827  * Returns 0 on success, a negative error code otherwise.
1828  */
1829 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1830 {
1831         unsigned int page = eraseblock <<
1832                             (chip->phys_erase_shift - chip->page_shift);
1833         int ret;
1834         u8 status;
1835
1836         if (nand_has_exec_op(chip)) {
1837                 const struct nand_interface_config *conf =
1838                         nand_get_interface_config(chip);
1839                 u8 addrs[3] = { page, page >> 8, page >> 16 };
1840                 struct nand_op_instr instrs[] = {
1841                         NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1842                         NAND_OP_ADDR(2, addrs, 0),
1843                         NAND_OP_CMD(NAND_CMD_ERASE2,
1844                                     NAND_COMMON_TIMING_NS(conf, tWB_max)),
1845                         NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
1846                                          0),
1847                 };
1848                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1849
1850                 if (chip->options & NAND_ROW_ADDR_3)
1851                         instrs[1].ctx.addr.naddrs++;
1852
1853                 ret = nand_exec_op(chip, &op);
1854                 if (ret)
1855                         return ret;
1856
1857                 ret = nand_status_op(chip, &status);
1858                 if (ret)
1859                         return ret;
1860         } else {
1861                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1862                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1863
1864                 ret = chip->legacy.waitfunc(chip);
1865                 if (ret < 0)
1866                         return ret;
1867
1868                 status = ret;
1869         }
1870
1871         if (status & NAND_STATUS_FAIL)
1872                 return -EIO;
1873
1874         return 0;
1875 }
1876 EXPORT_SYMBOL_GPL(nand_erase_op);
1877
1878 /**
1879  * nand_set_features_op - Do a SET FEATURES operation
1880  * @chip: The NAND chip
1881  * @feature: feature id
1882  * @data: 4 bytes of data
1883  *
1884  * This function sends a SET FEATURES command and waits for the NAND to be
1885  * ready before returning.
1886  * This function does not select/unselect the CS line.
1887  *
1888  * Returns 0 on success, a negative error code otherwise.
1889  */
1890 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1891                                 const void *data)
1892 {
1893         const u8 *params = data;
1894         int i, ret;
1895
1896         if (nand_has_exec_op(chip)) {
1897                 const struct nand_interface_config *conf =
1898                         nand_get_interface_config(chip);
1899                 struct nand_op_instr instrs[] = {
1900                         NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1901                         NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
1902                                                                         tADL_min)),
1903                         NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1904                                               NAND_COMMON_TIMING_NS(conf,
1905                                                                     tWB_max)),
1906                         NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1907                                          0),
1908                 };
1909                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1910
1911                 return nand_exec_op(chip, &op);
1912         }
1913
1914         chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1915         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1916                 chip->legacy.write_byte(chip, params[i]);
1917
1918         ret = chip->legacy.waitfunc(chip);
1919         if (ret < 0)
1920                 return ret;
1921
1922         if (ret & NAND_STATUS_FAIL)
1923                 return -EIO;
1924
1925         return 0;
1926 }
1927
1928 /**
1929  * nand_get_features_op - Do a GET FEATURES operation
1930  * @chip: The NAND chip
1931  * @feature: feature id
1932  * @data: 4 bytes of data
1933  *
1934  * This function sends a GET FEATURES command and waits for the NAND to be
1935  * ready before returning.
1936  * This function does not select/unselect the CS line.
1937  *
1938  * Returns 0 on success, a negative error code otherwise.
1939  */
1940 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1941                                 void *data)
1942 {
1943         u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
1944         int i;
1945
1946         if (nand_has_exec_op(chip)) {
1947                 const struct nand_interface_config *conf =
1948                         nand_get_interface_config(chip);
1949                 struct nand_op_instr instrs[] = {
1950                         NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1951                         NAND_OP_ADDR(1, &feature,
1952                                      NAND_COMMON_TIMING_NS(conf, tWB_max)),
1953                         NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1954                                          NAND_COMMON_TIMING_NS(conf, tRR_min)),
1955                         NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1956                                              data, 0),
1957                 };
1958                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1959                 int ret;
1960
1961                 /* GET_FEATURE data bytes are received twice in NV-DDR mode */
1962                 if (nand_interface_is_nvddr(conf)) {
1963                         instrs[3].ctx.data.len *= 2;
1964                         instrs[3].ctx.data.buf.in = ddrbuf;
1965                 }
1966
1967                 ret = nand_exec_op(chip, &op);
1968                 if (nand_interface_is_nvddr(conf)) {
1969                         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
1970                                 params[i] = ddrbuf[i * 2];
1971                 }
1972
1973                 return ret;
1974         }
1975
1976         chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1977         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1978                 params[i] = chip->legacy.read_byte(chip);
1979
1980         return 0;
1981 }
1982
1983 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1984                             unsigned int delay_ns)
1985 {
1986         if (nand_has_exec_op(chip)) {
1987                 struct nand_op_instr instrs[] = {
1988                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1989                                          PSEC_TO_NSEC(delay_ns)),
1990                 };
1991                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1992
1993                 return nand_exec_op(chip, &op);
1994         }
1995
1996         /* Apply delay or wait for ready/busy pin */
1997         if (!chip->legacy.dev_ready)
1998                 udelay(chip->legacy.chip_delay);
1999         else
2000                 nand_wait_ready(chip);
2001
2002         return 0;
2003 }
2004
2005 /**
2006  * nand_reset_op - Do a reset operation
2007  * @chip: The NAND chip
2008  *
2009  * This function sends a RESET command and waits for the NAND to be ready
2010  * before returning.
2011  * This function does not select/unselect the CS line.
2012  *
2013  * Returns 0 on success, a negative error code otherwise.
2014  */
2015 int nand_reset_op(struct nand_chip *chip)
2016 {
2017         if (nand_has_exec_op(chip)) {
2018                 const struct nand_interface_config *conf =
2019                         nand_get_interface_config(chip);
2020                 struct nand_op_instr instrs[] = {
2021                         NAND_OP_CMD(NAND_CMD_RESET,
2022                                     NAND_COMMON_TIMING_NS(conf, tWB_max)),
2023                         NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
2024                                          0),
2025                 };
2026                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2027
2028                 return nand_exec_op(chip, &op);
2029         }
2030
2031         chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
2032
2033         return 0;
2034 }
2035 EXPORT_SYMBOL_GPL(nand_reset_op);
2036
2037 /**
2038  * nand_read_data_op - Read data from the NAND
2039  * @chip: The NAND chip
2040  * @buf: buffer used to store the data
2041  * @len: length of the buffer
2042  * @force_8bit: force 8-bit bus access
2043  * @check_only: do not actually run the command, only checks if the
2044  *              controller driver supports it
2045  *
2046  * This function does a raw data read on the bus. Usually used after launching
2047  * another NAND operation like nand_read_page_op().
2048  * This function does not select/unselect the CS line.
2049  *
2050  * Returns 0 on success, a negative error code otherwise.
2051  */
2052 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2053                       bool force_8bit, bool check_only)
2054 {
2055         if (!len || !buf)
2056                 return -EINVAL;
2057
2058         if (nand_has_exec_op(chip)) {
2059                 const struct nand_interface_config *conf =
2060                         nand_get_interface_config(chip);
2061                 struct nand_op_instr instrs[] = {
2062                         NAND_OP_DATA_IN(len, buf, 0),
2063                 };
2064                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2065                 u8 *ddrbuf = NULL;
2066                 int ret, i;
2067
2068                 instrs[0].ctx.data.force_8bit = force_8bit;
2069
2070                 /*
2071                  * Parameter payloads (ID, status, features, etc) do not go
2072                  * through the same pipeline as regular data, hence the
2073                  * force_8bit flag must be set and this also indicates that in
2074                  * case NV-DDR timings are being used the data will be received
2075                  * twice.
2076                  */
2077                 if (force_8bit && nand_interface_is_nvddr(conf)) {
2078                         ddrbuf = kzalloc(len * 2, GFP_KERNEL);
2079                         if (!ddrbuf)
2080                                 return -ENOMEM;
2081
2082                         instrs[0].ctx.data.len *= 2;
2083                         instrs[0].ctx.data.buf.in = ddrbuf;
2084                 }
2085
2086                 if (check_only) {
2087                         ret = nand_check_op(chip, &op);
2088                         kfree(ddrbuf);
2089                         return ret;
2090                 }
2091
2092                 ret = nand_exec_op(chip, &op);
2093                 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
2094                         u8 *dst = buf;
2095
2096                         for (i = 0; i < len; i++)
2097                                 dst[i] = ddrbuf[i * 2];
2098                 }
2099
2100                 kfree(ddrbuf);
2101
2102                 return ret;
2103         }
2104
2105         if (check_only)
2106                 return 0;
2107
2108         if (force_8bit) {
2109                 u8 *p = buf;
2110                 unsigned int i;
2111
2112                 for (i = 0; i < len; i++)
2113                         p[i] = chip->legacy.read_byte(chip);
2114         } else {
2115                 chip->legacy.read_buf(chip, buf, len);
2116         }
2117
2118         return 0;
2119 }
2120 EXPORT_SYMBOL_GPL(nand_read_data_op);
2121
2122 /**
2123  * nand_write_data_op - Write data from the NAND
2124  * @chip: The NAND chip
2125  * @buf: buffer containing the data to send on the bus
2126  * @len: length of the buffer
2127  * @force_8bit: force 8-bit bus access
2128  *
2129  * This function does a raw data write on the bus. Usually used after launching
2130  * another NAND operation like nand_write_page_begin_op().
2131  * This function does not select/unselect the CS line.
2132  *
2133  * Returns 0 on success, a negative error code otherwise.
2134  */
2135 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2136                        unsigned int len, bool force_8bit)
2137 {
2138         if (!len || !buf)
2139                 return -EINVAL;
2140
2141         if (nand_has_exec_op(chip)) {
2142                 struct nand_op_instr instrs[] = {
2143                         NAND_OP_DATA_OUT(len, buf, 0),
2144                 };
2145                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2146
2147                 instrs[0].ctx.data.force_8bit = force_8bit;
2148
2149                 return nand_exec_op(chip, &op);
2150         }
2151
2152         if (force_8bit) {
2153                 const u8 *p = buf;
2154                 unsigned int i;
2155
2156                 for (i = 0; i < len; i++)
2157                         chip->legacy.write_byte(chip, p[i]);
2158         } else {
2159                 chip->legacy.write_buf(chip, buf, len);
2160         }
2161
2162         return 0;
2163 }
2164 EXPORT_SYMBOL_GPL(nand_write_data_op);
2165
2166 /**
2167  * struct nand_op_parser_ctx - Context used by the parser
2168  * @instrs: array of all the instructions that must be addressed
2169  * @ninstrs: length of the @instrs array
2170  * @subop: Sub-operation to be passed to the NAND controller
2171  *
2172  * This structure is used by the core to split NAND operations into
2173  * sub-operations that can be handled by the NAND controller.
2174  */
2175 struct nand_op_parser_ctx {
2176         const struct nand_op_instr *instrs;
2177         unsigned int ninstrs;
2178         struct nand_subop subop;
2179 };
2180
2181 /**
2182  * nand_op_parser_must_split_instr - Checks if an instruction must be split
2183  * @pat: the parser pattern element that matches @instr
2184  * @instr: pointer to the instruction to check
2185  * @start_offset: this is an in/out parameter. If @instr has already been
2186  *                split, then @start_offset is the offset from which to start
2187  *                (either an address cycle or an offset in the data buffer).
2188  *                Conversely, if the function returns true (ie. instr must be
2189  *                split), this parameter is updated to point to the first
2190  *                data/address cycle that has not been taken care of.
2191  *
2192  * Some NAND controllers are limited and cannot send X address cycles with a
2193  * unique operation, or cannot read/write more than Y bytes at the same time.
2194  * In this case, split the instruction that does not fit in a single
2195  * controller-operation into two or more chunks.
2196  *
2197  * Returns true if the instruction must be split, false otherwise.
2198  * The @start_offset parameter is also updated to the offset at which the next
2199  * bundle of instruction must start (if an address or a data instruction).
2200  */
2201 static bool
2202 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2203                                 const struct nand_op_instr *instr,
2204                                 unsigned int *start_offset)
2205 {
2206         switch (pat->type) {
2207         case NAND_OP_ADDR_INSTR:
2208                 if (!pat->ctx.addr.maxcycles)
2209                         break;
2210
2211                 if (instr->ctx.addr.naddrs - *start_offset >
2212                     pat->ctx.addr.maxcycles) {
2213                         *start_offset += pat->ctx.addr.maxcycles;
2214                         return true;
2215                 }
2216                 break;
2217
2218         case NAND_OP_DATA_IN_INSTR:
2219         case NAND_OP_DATA_OUT_INSTR:
2220                 if (!pat->ctx.data.maxlen)
2221                         break;
2222
2223                 if (instr->ctx.data.len - *start_offset >
2224                     pat->ctx.data.maxlen) {
2225                         *start_offset += pat->ctx.data.maxlen;
2226                         return true;
2227                 }
2228                 break;
2229
2230         default:
2231                 break;
2232         }
2233
2234         return false;
2235 }
2236
2237 /**
2238  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2239  *                            remaining in the parser context
2240  * @pat: the pattern to test
2241  * @ctx: the parser context structure to match with the pattern @pat
2242  *
2243  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2244  * Returns true if this is the case, false ortherwise. When true is returned,
2245  * @ctx->subop is updated with the set of instructions to be passed to the
2246  * controller driver.
2247  */
2248 static bool
2249 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2250                          struct nand_op_parser_ctx *ctx)
2251 {
2252         unsigned int instr_offset = ctx->subop.first_instr_start_off;
2253         const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2254         const struct nand_op_instr *instr = ctx->subop.instrs;
2255         unsigned int i, ninstrs;
2256
2257         for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2258                 /*
2259                  * The pattern instruction does not match the operation
2260                  * instruction. If the instruction is marked optional in the
2261                  * pattern definition, we skip the pattern element and continue
2262                  * to the next one. If the element is mandatory, there's no
2263                  * match and we can return false directly.
2264                  */
2265                 if (instr->type != pat->elems[i].type) {
2266                         if (!pat->elems[i].optional)
2267                                 return false;
2268
2269                         continue;
2270                 }
2271
2272                 /*
2273                  * Now check the pattern element constraints. If the pattern is
2274                  * not able to handle the whole instruction in a single step,
2275                  * we have to split it.
2276                  * The last_instr_end_off value comes back updated to point to
2277                  * the position where we have to split the instruction (the
2278                  * start of the next subop chunk).
2279                  */
2280                 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2281                                                     &instr_offset)) {
2282                         ninstrs++;
2283                         i++;
2284                         break;
2285                 }
2286
2287                 instr++;
2288                 ninstrs++;
2289                 instr_offset = 0;
2290         }
2291
2292         /*
2293          * This can happen if all instructions of a pattern are optional.
2294          * Still, if there's not at least one instruction handled by this
2295          * pattern, this is not a match, and we should try the next one (if
2296          * any).
2297          */
2298         if (!ninstrs)
2299                 return false;
2300
2301         /*
2302          * We had a match on the pattern head, but the pattern may be longer
2303          * than the instructions we're asked to execute. We need to make sure
2304          * there's no mandatory elements in the pattern tail.
2305          */
2306         for (; i < pat->nelems; i++) {
2307                 if (!pat->elems[i].optional)
2308                         return false;
2309         }
2310
2311         /*
2312          * We have a match: update the subop structure accordingly and return
2313          * true.
2314          */
2315         ctx->subop.ninstrs = ninstrs;
2316         ctx->subop.last_instr_end_off = instr_offset;
2317
2318         return true;
2319 }
2320
2321 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2322 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2323 {
2324         const struct nand_op_instr *instr;
2325         char *prefix = "      ";
2326         unsigned int i;
2327
2328         pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2329
2330         for (i = 0; i < ctx->ninstrs; i++) {
2331                 instr = &ctx->instrs[i];
2332
2333                 if (instr == &ctx->subop.instrs[0])
2334                         prefix = "    ->";
2335
2336                 nand_op_trace(prefix, instr);
2337
2338                 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2339                         prefix = "      ";
2340         }
2341 }
2342 #else
2343 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2344 {
2345         /* NOP */
2346 }
2347 #endif
2348
2349 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2350                                   const struct nand_op_parser_ctx *b)
2351 {
2352         if (a->subop.ninstrs < b->subop.ninstrs)
2353                 return -1;
2354         else if (a->subop.ninstrs > b->subop.ninstrs)
2355                 return 1;
2356
2357         if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2358                 return -1;
2359         else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2360                 return 1;
2361
2362         return 0;
2363 }
2364
2365 /**
2366  * nand_op_parser_exec_op - exec_op parser
2367  * @chip: the NAND chip
2368  * @parser: patterns description provided by the controller driver
2369  * @op: the NAND operation to address
2370  * @check_only: when true, the function only checks if @op can be handled but
2371  *              does not execute the operation
2372  *
2373  * Helper function designed to ease integration of NAND controller drivers that
2374  * only support a limited set of instruction sequences. The supported sequences
2375  * are described in @parser, and the framework takes care of splitting @op into
2376  * multiple sub-operations (if required) and pass them back to the ->exec()
2377  * callback of the matching pattern if @check_only is set to false.
2378  *
2379  * NAND controller drivers should call this function from their own ->exec_op()
2380  * implementation.
2381  *
2382  * Returns 0 on success, a negative error code otherwise. A failure can be
2383  * caused by an unsupported operation (none of the supported patterns is able
2384  * to handle the requested operation), or an error returned by one of the
2385  * matching pattern->exec() hook.
2386  */
2387 int nand_op_parser_exec_op(struct nand_chip *chip,
2388                            const struct nand_op_parser *parser,
2389                            const struct nand_operation *op, bool check_only)
2390 {
2391         struct nand_op_parser_ctx ctx = {
2392                 .subop.cs = op->cs,
2393                 .subop.instrs = op->instrs,
2394                 .instrs = op->instrs,
2395                 .ninstrs = op->ninstrs,
2396         };
2397         unsigned int i;
2398
2399         while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2400                 const struct nand_op_parser_pattern *pattern;
2401                 struct nand_op_parser_ctx best_ctx;
2402                 int ret, best_pattern = -1;
2403
2404                 for (i = 0; i < parser->npatterns; i++) {
2405                         struct nand_op_parser_ctx test_ctx = ctx;
2406
2407                         pattern = &parser->patterns[i];
2408                         if (!nand_op_parser_match_pat(pattern, &test_ctx))
2409                                 continue;
2410
2411                         if (best_pattern >= 0 &&
2412                             nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2413                                 continue;
2414
2415                         best_pattern = i;
2416                         best_ctx = test_ctx;
2417                 }
2418
2419                 if (best_pattern < 0) {
2420                         pr_debug("->exec_op() parser: pattern not found!\n");
2421                         return -ENOTSUPP;
2422                 }
2423
2424                 ctx = best_ctx;
2425                 nand_op_parser_trace(&ctx);
2426
2427                 if (!check_only) {
2428                         pattern = &parser->patterns[best_pattern];
2429                         ret = pattern->exec(chip, &ctx.subop);
2430                         if (ret)
2431                                 return ret;
2432                 }
2433
2434                 /*
2435                  * Update the context structure by pointing to the start of the
2436                  * next subop.
2437                  */
2438                 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2439                 if (ctx.subop.last_instr_end_off)
2440                         ctx.subop.instrs -= 1;
2441
2442                 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2443         }
2444
2445         return 0;
2446 }
2447 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2448
2449 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2450 {
2451         return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2452                          instr->type == NAND_OP_DATA_OUT_INSTR);
2453 }
2454
2455 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2456                                       unsigned int instr_idx)
2457 {
2458         return subop && instr_idx < subop->ninstrs;
2459 }
2460
2461 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2462                                              unsigned int instr_idx)
2463 {
2464         if (instr_idx)
2465                 return 0;
2466
2467         return subop->first_instr_start_off;
2468 }
2469
2470 /**
2471  * nand_subop_get_addr_start_off - Get the start offset in an address array
2472  * @subop: The entire sub-operation
2473  * @instr_idx: Index of the instruction inside the sub-operation
2474  *
2475  * During driver development, one could be tempted to directly use the
2476  * ->addr.addrs field of address instructions. This is wrong as address
2477  * instructions might be split.
2478  *
2479  * Given an address instruction, returns the offset of the first cycle to issue.
2480  */
2481 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2482                                            unsigned int instr_idx)
2483 {
2484         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2485                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2486                 return 0;
2487
2488         return nand_subop_get_start_off(subop, instr_idx);
2489 }
2490 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2491
2492 /**
2493  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2494  * @subop: The entire sub-operation
2495  * @instr_idx: Index of the instruction inside the sub-operation
2496  *
2497  * During driver development, one could be tempted to directly use the
2498  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2499  * might be split.
2500  *
2501  * Given an address instruction, returns the number of address cycle to issue.
2502  */
2503 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2504                                          unsigned int instr_idx)
2505 {
2506         int start_off, end_off;
2507
2508         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2509                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2510                 return 0;
2511
2512         start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2513
2514         if (instr_idx == subop->ninstrs - 1 &&
2515             subop->last_instr_end_off)
2516                 end_off = subop->last_instr_end_off;
2517         else
2518                 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2519
2520         return end_off - start_off;
2521 }
2522 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2523
2524 /**
2525  * nand_subop_get_data_start_off - Get the start offset in a data array
2526  * @subop: The entire sub-operation
2527  * @instr_idx: Index of the instruction inside the sub-operation
2528  *
2529  * During driver development, one could be tempted to directly use the
2530  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2531  * instructions might be split.
2532  *
2533  * Given a data instruction, returns the offset to start from.
2534  */
2535 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2536                                            unsigned int instr_idx)
2537 {
2538         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2539                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2540                 return 0;
2541
2542         return nand_subop_get_start_off(subop, instr_idx);
2543 }
2544 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2545
2546 /**
2547  * nand_subop_get_data_len - Get the number of bytes to retrieve
2548  * @subop: The entire sub-operation
2549  * @instr_idx: Index of the instruction inside the sub-operation
2550  *
2551  * During driver development, one could be tempted to directly use the
2552  * ->data->len field of a data instruction. This is wrong as data instructions
2553  * might be split.
2554  *
2555  * Returns the length of the chunk of data to send/receive.
2556  */
2557 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2558                                      unsigned int instr_idx)
2559 {
2560         int start_off = 0, end_off;
2561
2562         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2563                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2564                 return 0;
2565
2566         start_off = nand_subop_get_data_start_off(subop, instr_idx);
2567
2568         if (instr_idx == subop->ninstrs - 1 &&
2569             subop->last_instr_end_off)
2570                 end_off = subop->last_instr_end_off;
2571         else
2572                 end_off = subop->instrs[instr_idx].ctx.data.len;
2573
2574         return end_off - start_off;
2575 }
2576 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2577
2578 /**
2579  * nand_reset - Reset and initialize a NAND device
2580  * @chip: The NAND chip
2581  * @chipnr: Internal die id
2582  *
2583  * Save the timings data structure, then apply SDR timings mode 0 (see
2584  * nand_reset_interface for details), do the reset operation, and apply
2585  * back the previous timings.
2586  *
2587  * Returns 0 on success, a negative error code otherwise.
2588  */
2589 int nand_reset(struct nand_chip *chip, int chipnr)
2590 {
2591         int ret;
2592
2593         ret = nand_reset_interface(chip, chipnr);
2594         if (ret)
2595                 return ret;
2596
2597         /*
2598          * The CS line has to be released before we can apply the new NAND
2599          * interface settings, hence this weird nand_select_target()
2600          * nand_deselect_target() dance.
2601          */
2602         nand_select_target(chip, chipnr);
2603         ret = nand_reset_op(chip);
2604         nand_deselect_target(chip);
2605         if (ret)
2606                 return ret;
2607
2608         ret = nand_setup_interface(chip, chipnr);
2609         if (ret)
2610                 return ret;
2611
2612         return 0;
2613 }
2614 EXPORT_SYMBOL_GPL(nand_reset);
2615
2616 /**
2617  * nand_get_features - wrapper to perform a GET_FEATURE
2618  * @chip: NAND chip info structure
2619  * @addr: feature address
2620  * @subfeature_param: the subfeature parameters, a four bytes array
2621  *
2622  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2623  * operation cannot be handled.
2624  */
2625 int nand_get_features(struct nand_chip *chip, int addr,
2626                       u8 *subfeature_param)
2627 {
2628         if (!nand_supports_get_features(chip, addr))
2629                 return -ENOTSUPP;
2630
2631         if (chip->legacy.get_features)
2632                 return chip->legacy.get_features(chip, addr, subfeature_param);
2633
2634         return nand_get_features_op(chip, addr, subfeature_param);
2635 }
2636
2637 /**
2638  * nand_set_features - wrapper to perform a SET_FEATURE
2639  * @chip: NAND chip info structure
2640  * @addr: feature address
2641  * @subfeature_param: the subfeature parameters, a four bytes array
2642  *
2643  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2644  * operation cannot be handled.
2645  */
2646 int nand_set_features(struct nand_chip *chip, int addr,
2647                       u8 *subfeature_param)
2648 {
2649         if (!nand_supports_set_features(chip, addr))
2650                 return -ENOTSUPP;
2651
2652         if (chip->legacy.set_features)
2653                 return chip->legacy.set_features(chip, addr, subfeature_param);
2654
2655         return nand_set_features_op(chip, addr, subfeature_param);
2656 }
2657
2658 /**
2659  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2660  * @buf: buffer to test
2661  * @len: buffer length
2662  * @bitflips_threshold: maximum number of bitflips
2663  *
2664  * Check if a buffer contains only 0xff, which means the underlying region
2665  * has been erased and is ready to be programmed.
2666  * The bitflips_threshold specify the maximum number of bitflips before
2667  * considering the region is not erased.
2668  * Note: The logic of this function has been extracted from the memweight
2669  * implementation, except that nand_check_erased_buf function exit before
2670  * testing the whole buffer if the number of bitflips exceed the
2671  * bitflips_threshold value.
2672  *
2673  * Returns a positive number of bitflips less than or equal to
2674  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2675  * threshold.
2676  */
2677 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2678 {
2679         const unsigned char *bitmap = buf;
2680         int bitflips = 0;
2681         int weight;
2682
2683         for (; len && ((uintptr_t)bitmap) % sizeof(long);
2684              len--, bitmap++) {
2685                 weight = hweight8(*bitmap);
2686                 bitflips += BITS_PER_BYTE - weight;
2687                 if (unlikely(bitflips > bitflips_threshold))
2688                         return -EBADMSG;
2689         }
2690
2691         for (; len >= sizeof(long);
2692              len -= sizeof(long), bitmap += sizeof(long)) {
2693                 unsigned long d = *((unsigned long *)bitmap);
2694                 if (d == ~0UL)
2695                         continue;
2696                 weight = hweight_long(d);
2697                 bitflips += BITS_PER_LONG - weight;
2698                 if (unlikely(bitflips > bitflips_threshold))
2699                         return -EBADMSG;
2700         }
2701
2702         for (; len > 0; len--, bitmap++) {
2703                 weight = hweight8(*bitmap);
2704                 bitflips += BITS_PER_BYTE - weight;
2705                 if (unlikely(bitflips > bitflips_threshold))
2706                         return -EBADMSG;
2707         }
2708
2709         return bitflips;
2710 }
2711
2712 /**
2713  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2714  *                               0xff data
2715  * @data: data buffer to test
2716  * @datalen: data length
2717  * @ecc: ECC buffer
2718  * @ecclen: ECC length
2719  * @extraoob: extra OOB buffer
2720  * @extraooblen: extra OOB length
2721  * @bitflips_threshold: maximum number of bitflips
2722  *
2723  * Check if a data buffer and its associated ECC and OOB data contains only
2724  * 0xff pattern, which means the underlying region has been erased and is
2725  * ready to be programmed.
2726  * The bitflips_threshold specify the maximum number of bitflips before
2727  * considering the region as not erased.
2728  *
2729  * Note:
2730  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2731  *    different from the NAND page size. When fixing bitflips, ECC engines will
2732  *    report the number of errors per chunk, and the NAND core infrastructure
2733  *    expect you to return the maximum number of bitflips for the whole page.
2734  *    This is why you should always use this function on a single chunk and
2735  *    not on the whole page. After checking each chunk you should update your
2736  *    max_bitflips value accordingly.
2737  * 2/ When checking for bitflips in erased pages you should not only check
2738  *    the payload data but also their associated ECC data, because a user might
2739  *    have programmed almost all bits to 1 but a few. In this case, we
2740  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2741  *    this case.
2742  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2743  *    data are protected by the ECC engine.
2744  *    It could also be used if you support subpages and want to attach some
2745  *    extra OOB data to an ECC chunk.
2746  *
2747  * Returns a positive number of bitflips less than or equal to
2748  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2749  * threshold. In case of success, the passed buffers are filled with 0xff.
2750  */
2751 int nand_check_erased_ecc_chunk(void *data, int datalen,
2752                                 void *ecc, int ecclen,
2753                                 void *extraoob, int extraooblen,
2754                                 int bitflips_threshold)
2755 {
2756         int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2757
2758         data_bitflips = nand_check_erased_buf(data, datalen,
2759                                               bitflips_threshold);
2760         if (data_bitflips < 0)
2761                 return data_bitflips;
2762
2763         bitflips_threshold -= data_bitflips;
2764
2765         ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2766         if (ecc_bitflips < 0)
2767                 return ecc_bitflips;
2768
2769         bitflips_threshold -= ecc_bitflips;
2770
2771         extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2772                                                   bitflips_threshold);
2773         if (extraoob_bitflips < 0)
2774                 return extraoob_bitflips;
2775
2776         if (data_bitflips)
2777                 memset(data, 0xff, datalen);
2778
2779         if (ecc_bitflips)
2780                 memset(ecc, 0xff, ecclen);
2781
2782         if (extraoob_bitflips)
2783                 memset(extraoob, 0xff, extraooblen);
2784
2785         return data_bitflips + ecc_bitflips + extraoob_bitflips;
2786 }
2787 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2788
2789 /**
2790  * nand_read_page_raw_notsupp - dummy read raw page function
2791  * @chip: nand chip info structure
2792  * @buf: buffer to store read data
2793  * @oob_required: caller requires OOB data read to chip->oob_poi
2794  * @page: page number to read
2795  *
2796  * Returns -ENOTSUPP unconditionally.
2797  */
2798 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2799                                int oob_required, int page)
2800 {
2801         return -ENOTSUPP;
2802 }
2803
2804 /**
2805  * nand_read_page_raw - [INTERN] read raw page data without ecc
2806  * @chip: nand chip info structure
2807  * @buf: buffer to store read data
2808  * @oob_required: caller requires OOB data read to chip->oob_poi
2809  * @page: page number to read
2810  *
2811  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2812  */
2813 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2814                        int page)
2815 {
2816         struct mtd_info *mtd = nand_to_mtd(chip);
2817         int ret;
2818
2819         ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2820         if (ret)
2821                 return ret;
2822
2823         if (oob_required) {
2824                 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2825                                         false, false);
2826                 if (ret)
2827                         return ret;
2828         }
2829
2830         return 0;
2831 }
2832 EXPORT_SYMBOL(nand_read_page_raw);
2833
2834 /**
2835  * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2836  * @chip: NAND chip info structure
2837  * @buf: buffer to store read data
2838  * @oob_required: caller requires OOB data read to chip->oob_poi
2839  * @page: page number to read
2840  *
2841  * This is a raw page read, ie. without any error detection/correction.
2842  * Monolithic means we are requesting all the relevant data (main plus
2843  * eventually OOB) to be loaded in the NAND cache and sent over the
2844  * bus (from the NAND chip to the NAND controller) in a single
2845  * operation. This is an alternative to nand_read_page_raw(), which
2846  * first reads the main data, and if the OOB data is requested too,
2847  * then reads more data on the bus.
2848  */
2849 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2850                                   int oob_required, int page)
2851 {
2852         struct mtd_info *mtd = nand_to_mtd(chip);
2853         unsigned int size = mtd->writesize;
2854         u8 *read_buf = buf;
2855         int ret;
2856
2857         if (oob_required) {
2858                 size += mtd->oobsize;
2859
2860                 if (buf != chip->data_buf)
2861                         read_buf = nand_get_data_buf(chip);
2862         }
2863
2864         ret = nand_read_page_op(chip, page, 0, read_buf, size);
2865         if (ret)
2866                 return ret;
2867
2868         if (buf != chip->data_buf)
2869                 memcpy(buf, read_buf, mtd->writesize);
2870
2871         return 0;
2872 }
2873 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2874
2875 /**
2876  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2877  * @chip: nand chip info structure
2878  * @buf: buffer to store read data
2879  * @oob_required: caller requires OOB data read to chip->oob_poi
2880  * @page: page number to read
2881  *
2882  * We need a special oob layout and handling even when OOB isn't used.
2883  */
2884 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2885                                        int oob_required, int page)
2886 {
2887         struct mtd_info *mtd = nand_to_mtd(chip);
2888         int eccsize = chip->ecc.size;
2889         int eccbytes = chip->ecc.bytes;
2890         uint8_t *oob = chip->oob_poi;
2891         int steps, size, ret;
2892
2893         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2894         if (ret)
2895                 return ret;
2896
2897         for (steps = chip->ecc.steps; steps > 0; steps--) {
2898                 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2899                 if (ret)
2900                         return ret;
2901
2902                 buf += eccsize;
2903
2904                 if (chip->ecc.prepad) {
2905                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2906                                                 false, false);
2907                         if (ret)
2908                                 return ret;
2909
2910                         oob += chip->ecc.prepad;
2911                 }
2912
2913                 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2914                 if (ret)
2915                         return ret;
2916
2917                 oob += eccbytes;
2918
2919                 if (chip->ecc.postpad) {
2920                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2921                                                 false, false);
2922                         if (ret)
2923                                 return ret;
2924
2925                         oob += chip->ecc.postpad;
2926                 }
2927         }
2928
2929         size = mtd->oobsize - (oob - chip->oob_poi);
2930         if (size) {
2931                 ret = nand_read_data_op(chip, oob, size, false, false);
2932                 if (ret)
2933                         return ret;
2934         }
2935
2936         return 0;
2937 }
2938
2939 /**
2940  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2941  * @chip: nand chip info structure
2942  * @buf: buffer to store read data
2943  * @oob_required: caller requires OOB data read to chip->oob_poi
2944  * @page: page number to read
2945  */
2946 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2947                                 int oob_required, int page)
2948 {
2949         struct mtd_info *mtd = nand_to_mtd(chip);
2950         int i, eccsize = chip->ecc.size, ret;
2951         int eccbytes = chip->ecc.bytes;
2952         int eccsteps = chip->ecc.steps;
2953         uint8_t *p = buf;
2954         uint8_t *ecc_calc = chip->ecc.calc_buf;
2955         uint8_t *ecc_code = chip->ecc.code_buf;
2956         unsigned int max_bitflips = 0;
2957
2958         chip->ecc.read_page_raw(chip, buf, 1, page);
2959
2960         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2961                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2962
2963         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2964                                          chip->ecc.total);
2965         if (ret)
2966                 return ret;
2967
2968         eccsteps = chip->ecc.steps;
2969         p = buf;
2970
2971         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2972                 int stat;
2973
2974                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2975                 if (stat < 0) {
2976                         mtd->ecc_stats.failed++;
2977                 } else {
2978                         mtd->ecc_stats.corrected += stat;
2979                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2980                 }
2981         }
2982         return max_bitflips;
2983 }
2984
2985 /**
2986  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2987  * @chip: nand chip info structure
2988  * @data_offs: offset of requested data within the page
2989  * @readlen: data length
2990  * @bufpoi: buffer to store read data
2991  * @page: page number to read
2992  */
2993 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2994                              uint32_t readlen, uint8_t *bufpoi, int page)
2995 {
2996         struct mtd_info *mtd = nand_to_mtd(chip);
2997         int start_step, end_step, num_steps, ret;
2998         uint8_t *p;
2999         int data_col_addr, i, gaps = 0;
3000         int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3001         int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3002         int index, section = 0;
3003         unsigned int max_bitflips = 0;
3004         struct mtd_oob_region oobregion = { };
3005
3006         /* Column address within the page aligned to ECC size (256bytes) */
3007         start_step = data_offs / chip->ecc.size;
3008         end_step = (data_offs + readlen - 1) / chip->ecc.size;
3009         num_steps = end_step - start_step + 1;
3010         index = start_step * chip->ecc.bytes;
3011
3012         /* Data size aligned to ECC ecc.size */
3013         datafrag_len = num_steps * chip->ecc.size;
3014         eccfrag_len = num_steps * chip->ecc.bytes;
3015
3016         data_col_addr = start_step * chip->ecc.size;
3017         /* If we read not a page aligned data */
3018         p = bufpoi + data_col_addr;
3019         ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3020         if (ret)
3021                 return ret;
3022
3023         /* Calculate ECC */
3024         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3025                 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
3026
3027         /*
3028          * The performance is faster if we position offsets according to
3029          * ecc.pos. Let's make sure that there are no gaps in ECC positions.
3030          */
3031         ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
3032         if (ret)
3033                 return ret;
3034
3035         if (oobregion.length < eccfrag_len)
3036                 gaps = 1;
3037
3038         if (gaps) {
3039                 ret = nand_change_read_column_op(chip, mtd->writesize,
3040                                                  chip->oob_poi, mtd->oobsize,
3041                                                  false);
3042                 if (ret)
3043                         return ret;
3044         } else {
3045                 /*
3046                  * Send the command to read the particular ECC bytes take care
3047                  * about buswidth alignment in read_buf.
3048                  */
3049                 aligned_pos = oobregion.offset & ~(busw - 1);
3050                 aligned_len = eccfrag_len;
3051                 if (oobregion.offset & (busw - 1))
3052                         aligned_len++;
3053                 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3054                     (busw - 1))
3055                         aligned_len++;
3056
3057                 ret = nand_change_read_column_op(chip,
3058                                                  mtd->writesize + aligned_pos,
3059                                                  &chip->oob_poi[aligned_pos],
3060                                                  aligned_len, false);
3061                 if (ret)
3062                         return ret;
3063         }
3064
3065         ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3066                                          chip->oob_poi, index, eccfrag_len);
3067         if (ret)
3068                 return ret;
3069
3070         p = bufpoi + data_col_addr;
3071         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3072                 int stat;
3073
3074                 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
3075                                          &chip->ecc.calc_buf[i]);
3076                 if (stat == -EBADMSG &&
3077                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3078                         /* check for empty pages with bitflips */
3079                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3080                                                 &chip->ecc.code_buf[i],
3081                                                 chip->ecc.bytes,
3082                                                 NULL, 0,
3083                                                 chip->ecc.strength);
3084                 }
3085
3086                 if (stat < 0) {
3087                         mtd->ecc_stats.failed++;
3088                 } else {
3089                         mtd->ecc_stats.corrected += stat;
3090                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3091                 }
3092         }
3093         return max_bitflips;
3094 }
3095
3096 /**
3097  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
3098  * @chip: nand chip info structure
3099  * @buf: buffer to store read data
3100  * @oob_required: caller requires OOB data read to chip->oob_poi
3101  * @page: page number to read
3102  *
3103  * Not for syndrome calculating ECC controllers which need a special oob layout.
3104  */
3105 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
3106                                 int oob_required, int page)
3107 {
3108         struct mtd_info *mtd = nand_to_mtd(chip);
3109         int i, eccsize = chip->ecc.size, ret;
3110         int eccbytes = chip->ecc.bytes;
3111         int eccsteps = chip->ecc.steps;
3112         uint8_t *p = buf;
3113         uint8_t *ecc_calc = chip->ecc.calc_buf;
3114         uint8_t *ecc_code = chip->ecc.code_buf;
3115         unsigned int max_bitflips = 0;
3116
3117         ret = nand_read_page_op(chip, page, 0, NULL, 0);
3118         if (ret)
3119                 return ret;
3120
3121         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3122                 chip->ecc.hwctl(chip, NAND_ECC_READ);
3123
3124                 ret = nand_read_data_op(chip, p, eccsize, false, false);
3125                 if (ret)
3126                         return ret;
3127
3128                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3129         }
3130
3131         ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
3132                                 false);
3133         if (ret)
3134                 return ret;
3135
3136         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3137                                          chip->ecc.total);
3138         if (ret)
3139                 return ret;
3140
3141         eccsteps = chip->ecc.steps;
3142         p = buf;
3143
3144         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3145                 int stat;
3146
3147                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
3148                 if (stat == -EBADMSG &&
3149                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3150                         /* check for empty pages with bitflips */
3151                         stat = nand_check_erased_ecc_chunk(p, eccsize,
3152                                                 &ecc_code[i], eccbytes,
3153                                                 NULL, 0,
3154                                                 chip->ecc.strength);
3155                 }
3156
3157                 if (stat < 0) {
3158                         mtd->ecc_stats.failed++;
3159                 } else {
3160                         mtd->ecc_stats.corrected += stat;
3161                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3162                 }
3163         }
3164         return max_bitflips;
3165 }
3166
3167 /**
3168  * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
3169  *                                  data read from OOB area
3170  * @chip: nand chip info structure
3171  * @buf: buffer to store read data
3172  * @oob_required: caller requires OOB data read to chip->oob_poi
3173  * @page: page number to read
3174  *
3175  * Hardware ECC for large page chips, which requires the ECC data to be
3176  * extracted from the OOB before the actual data is read.
3177  */
3178 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
3179                                    int oob_required, int page)
3180 {
3181         struct mtd_info *mtd = nand_to_mtd(chip);
3182         int i, eccsize = chip->ecc.size, ret;
3183         int eccbytes = chip->ecc.bytes;
3184         int eccsteps = chip->ecc.steps;
3185         uint8_t *p = buf;
3186         uint8_t *ecc_code = chip->ecc.code_buf;
3187         unsigned int max_bitflips = 0;
3188
3189         /* Read the OOB area first */
3190         ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3191         if (ret)
3192                 return ret;
3193
3194         /* Move read cursor to start of page */
3195         ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
3196         if (ret)
3197                 return ret;
3198
3199         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3200                                          chip->ecc.total);
3201         if (ret)
3202                 return ret;
3203
3204         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3205                 int stat;
3206
3207                 chip->ecc.hwctl(chip, NAND_ECC_READ);
3208
3209                 ret = nand_read_data_op(chip, p, eccsize, false, false);
3210                 if (ret)
3211                         return ret;
3212
3213                 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
3214                 if (stat == -EBADMSG &&
3215                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3216                         /* check for empty pages with bitflips */
3217                         stat = nand_check_erased_ecc_chunk(p, eccsize,
3218                                                            &ecc_code[i],
3219                                                            eccbytes, NULL, 0,
3220                                                            chip->ecc.strength);
3221                 }
3222
3223                 if (stat < 0) {
3224                         mtd->ecc_stats.failed++;
3225                 } else {
3226                         mtd->ecc_stats.corrected += stat;
3227                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3228                 }
3229         }
3230         return max_bitflips;
3231 }
3232 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
3233
3234 /**
3235  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3236  * @chip: nand chip info structure
3237  * @buf: buffer to store read data
3238  * @oob_required: caller requires OOB data read to chip->oob_poi
3239  * @page: page number to read
3240  *
3241  * The hw generator calculates the error syndrome automatically. Therefore we
3242  * need a special oob layout and handling.
3243  */
3244 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3245                                    int oob_required, int page)
3246 {
3247         struct mtd_info *mtd = nand_to_mtd(chip);
3248         int ret, i, eccsize = chip->ecc.size;
3249         int eccbytes = chip->ecc.bytes;
3250         int eccsteps = chip->ecc.steps;
3251         int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3252         uint8_t *p = buf;
3253         uint8_t *oob = chip->oob_poi;
3254         unsigned int max_bitflips = 0;
3255
3256         ret = nand_read_page_op(chip, page, 0, NULL, 0);
3257         if (ret)
3258                 return ret;
3259
3260         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3261                 int stat;
3262
3263                 chip->ecc.hwctl(chip, NAND_ECC_READ);
3264
3265                 ret = nand_read_data_op(chip, p, eccsize, false, false);
3266                 if (ret)
3267                         return ret;
3268
3269                 if (chip->ecc.prepad) {
3270                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3271                                                 false, false);
3272                         if (ret)
3273                                 return ret;
3274
3275                         oob += chip->ecc.prepad;
3276                 }
3277
3278                 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3279
3280                 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
3281                 if (ret)
3282                         return ret;
3283
3284                 stat = chip->ecc.correct(chip, p, oob, NULL);
3285
3286                 oob += eccbytes;
3287
3288                 if (chip->ecc.postpad) {
3289                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3290                                                 false, false);
3291                         if (ret)
3292                                 return ret;
3293
3294                         oob += chip->ecc.postpad;
3295                 }
3296
3297                 if (stat == -EBADMSG &&
3298                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3299                         /* check for empty pages with bitflips */
3300                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3301                                                            oob - eccpadbytes,
3302                                                            eccpadbytes,
3303                                                            NULL, 0,
3304                                                            chip->ecc.strength);
3305                 }
3306
3307                 if (stat < 0) {
3308                         mtd->ecc_stats.failed++;
3309                 } else {
3310                         mtd->ecc_stats.corrected += stat;
3311                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3312                 }
3313         }
3314
3315         /* Calculate remaining oob bytes */
3316         i = mtd->oobsize - (oob - chip->oob_poi);
3317         if (i) {
3318                 ret = nand_read_data_op(chip, oob, i, false, false);
3319                 if (ret)
3320                         return ret;
3321         }
3322
3323         return max_bitflips;
3324 }
3325
3326 /**
3327  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3328  * @chip: NAND chip object
3329  * @oob: oob destination address
3330  * @ops: oob ops structure
3331  * @len: size of oob to transfer
3332  */
3333 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3334                                   struct mtd_oob_ops *ops, size_t len)
3335 {
3336         struct mtd_info *mtd = nand_to_mtd(chip);
3337         int ret;
3338
3339         switch (ops->mode) {
3340
3341         case MTD_OPS_PLACE_OOB:
3342         case MTD_OPS_RAW:
3343                 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3344                 return oob + len;
3345
3346         case MTD_OPS_AUTO_OOB:
3347                 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3348                                                   ops->ooboffs, len);
3349                 BUG_ON(ret);
3350                 return oob + len;
3351
3352         default:
3353                 BUG();
3354         }
3355         return NULL;
3356 }
3357
3358 /**
3359  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3360  * @chip: NAND chip object
3361  * @retry_mode: the retry mode to use
3362  *
3363  * Some vendors supply a special command to shift the Vt threshold, to be used
3364  * when there are too many bitflips in a page (i.e., ECC error). After setting
3365  * a new threshold, the host should retry reading the page.
3366  */
3367 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3368 {
3369         pr_debug("setting READ RETRY mode %d\n", retry_mode);
3370
3371         if (retry_mode >= chip->read_retries)
3372                 return -EINVAL;
3373
3374         if (!chip->ops.setup_read_retry)
3375                 return -EOPNOTSUPP;
3376
3377         return chip->ops.setup_read_retry(chip, retry_mode);
3378 }
3379
3380 static void nand_wait_readrdy(struct nand_chip *chip)
3381 {
3382         const struct nand_interface_config *conf;
3383
3384         if (!(chip->options & NAND_NEED_READRDY))
3385                 return;
3386
3387         conf = nand_get_interface_config(chip);
3388         WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
3389 }
3390
3391 /**
3392  * nand_do_read_ops - [INTERN] Read data with ECC
3393  * @chip: NAND chip object
3394  * @from: offset to read from
3395  * @ops: oob ops structure
3396  *
3397  * Internal function. Called with chip held.
3398  */
3399 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3400                             struct mtd_oob_ops *ops)
3401 {
3402         int chipnr, page, realpage, col, bytes, aligned, oob_required;
3403         struct mtd_info *mtd = nand_to_mtd(chip);
3404         int ret = 0;
3405         uint32_t readlen = ops->len;
3406         uint32_t oobreadlen = ops->ooblen;
3407         uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3408
3409         uint8_t *bufpoi, *oob, *buf;
3410         int use_bounce_buf;
3411         unsigned int max_bitflips = 0;
3412         int retry_mode = 0;
3413         bool ecc_fail = false;
3414
3415         /* Check if the region is secured */
3416         if (nand_region_is_secured(chip, from, readlen))
3417                 return -EIO;
3418
3419         chipnr = (int)(from >> chip->chip_shift);
3420         nand_select_target(chip, chipnr);
3421
3422         realpage = (int)(from >> chip->page_shift);
3423         page = realpage & chip->pagemask;
3424
3425         col = (int)(from & (mtd->writesize - 1));
3426
3427         buf = ops->datbuf;
3428         oob = ops->oobbuf;
3429         oob_required = oob ? 1 : 0;
3430
3431         while (1) {
3432                 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3433
3434                 bytes = min(mtd->writesize - col, readlen);
3435                 aligned = (bytes == mtd->writesize);
3436
3437                 if (!aligned)
3438                         use_bounce_buf = 1;
3439                 else if (chip->options & NAND_USES_DMA)
3440                         use_bounce_buf = !virt_addr_valid(buf) ||
3441                                          !IS_ALIGNED((unsigned long)buf,
3442                                                      chip->buf_align);
3443                 else
3444                         use_bounce_buf = 0;
3445
3446                 /* Is the current page in the buffer? */
3447                 if (realpage != chip->pagecache.page || oob) {
3448                         bufpoi = use_bounce_buf ? chip->data_buf : buf;
3449
3450                         if (use_bounce_buf && aligned)
3451                                 pr_debug("%s: using read bounce buffer for buf@%p\n",
3452                                                  __func__, buf);
3453
3454 read_retry:
3455                         /*
3456                          * Now read the page into the buffer.  Absent an error,
3457                          * the read methods return max bitflips per ecc step.
3458                          */
3459                         if (unlikely(ops->mode == MTD_OPS_RAW))
3460                                 ret = chip->ecc.read_page_raw(chip, bufpoi,
3461                                                               oob_required,
3462                                                               page);
3463                         else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3464                                  !oob)
3465                                 ret = chip->ecc.read_subpage(chip, col, bytes,
3466                                                              bufpoi, page);
3467                         else
3468                                 ret = chip->ecc.read_page(chip, bufpoi,
3469                                                           oob_required, page);
3470                         if (ret < 0) {
3471                                 if (use_bounce_buf)
3472                                         /* Invalidate page cache */
3473                                         chip->pagecache.page = -1;
3474                                 break;
3475                         }
3476
3477                         /*
3478                          * Copy back the data in the initial buffer when reading
3479                          * partial pages or when a bounce buffer is required.
3480                          */
3481                         if (use_bounce_buf) {
3482                                 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3483                                     !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3484                                     (ops->mode != MTD_OPS_RAW)) {
3485                                         chip->pagecache.page = realpage;
3486                                         chip->pagecache.bitflips = ret;
3487                                 } else {
3488                                         /* Invalidate page cache */
3489                                         chip->pagecache.page = -1;
3490                                 }
3491                                 memcpy(buf, bufpoi + col, bytes);
3492                         }
3493
3494                         if (unlikely(oob)) {
3495                                 int toread = min(oobreadlen, max_oobsize);
3496
3497                                 if (toread) {
3498                                         oob = nand_transfer_oob(chip, oob, ops,
3499                                                                 toread);
3500                                         oobreadlen -= toread;
3501                                 }
3502                         }
3503
3504                         nand_wait_readrdy(chip);
3505
3506                         if (mtd->ecc_stats.failed - ecc_stats.failed) {
3507                                 if (retry_mode + 1 < chip->read_retries) {
3508                                         retry_mode++;
3509                                         ret = nand_setup_read_retry(chip,
3510                                                         retry_mode);
3511                                         if (ret < 0)
3512                                                 break;
3513
3514                                         /* Reset ecc_stats; retry */
3515                                         mtd->ecc_stats = ecc_stats;
3516                                         goto read_retry;
3517                                 } else {
3518                                         /* No more retry modes; real failure */
3519                                         ecc_fail = true;
3520                                 }
3521                         }
3522
3523                         buf += bytes;
3524                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
3525                 } else {
3526                         memcpy(buf, chip->data_buf + col, bytes);
3527                         buf += bytes;
3528                         max_bitflips = max_t(unsigned int, max_bitflips,
3529                                              chip->pagecache.bitflips);
3530                 }
3531
3532                 readlen -= bytes;
3533
3534                 /* Reset to retry mode 0 */
3535                 if (retry_mode) {
3536                         ret = nand_setup_read_retry(chip, 0);
3537                         if (ret < 0)
3538                                 break;
3539                         retry_mode = 0;
3540                 }
3541
3542                 if (!readlen)
3543                         break;
3544
3545                 /* For subsequent reads align to page boundary */
3546                 col = 0;
3547                 /* Increment page address */
3548                 realpage++;
3549
3550                 page = realpage & chip->pagemask;
3551                 /* Check, if we cross a chip boundary */
3552                 if (!page) {
3553                         chipnr++;
3554                         nand_deselect_target(chip);
3555                         nand_select_target(chip, chipnr);
3556                 }
3557         }
3558         nand_deselect_target(chip);
3559
3560         ops->retlen = ops->len - (size_t) readlen;
3561         if (oob)
3562                 ops->oobretlen = ops->ooblen - oobreadlen;
3563
3564         if (ret < 0)
3565                 return ret;
3566
3567         if (ecc_fail)
3568                 return -EBADMSG;
3569
3570         return max_bitflips;
3571 }
3572
3573 /**
3574  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3575  * @chip: nand chip info structure
3576  * @page: page number to read
3577  */
3578 int nand_read_oob_std(struct nand_chip *chip, int page)
3579 {
3580         struct mtd_info *mtd = nand_to_mtd(chip);
3581
3582         return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3583 }
3584 EXPORT_SYMBOL(nand_read_oob_std);
3585
3586 /**
3587  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3588  *                          with syndromes
3589  * @chip: nand chip info structure
3590  * @page: page number to read
3591  */
3592 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3593 {
3594         struct mtd_info *mtd = nand_to_mtd(chip);
3595         int length = mtd->oobsize;
3596         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3597         int eccsize = chip->ecc.size;
3598         uint8_t *bufpoi = chip->oob_poi;
3599         int i, toread, sndrnd = 0, pos, ret;
3600
3601         ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3602         if (ret)
3603                 return ret;
3604
3605         for (i = 0; i < chip->ecc.steps; i++) {
3606                 if (sndrnd) {
3607                         int ret;
3608
3609                         pos = eccsize + i * (eccsize + chunk);
3610                         if (mtd->writesize > 512)
3611                                 ret = nand_change_read_column_op(chip, pos,
3612                                                                  NULL, 0,
3613                                                                  false);
3614                         else
3615                                 ret = nand_read_page_op(chip, page, pos, NULL,
3616                                                         0);
3617
3618                         if (ret)
3619                                 return ret;
3620                 } else
3621                         sndrnd = 1;
3622                 toread = min_t(int, length, chunk);
3623
3624                 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3625                 if (ret)
3626                         return ret;
3627
3628                 bufpoi += toread;
3629                 length -= toread;
3630         }
3631         if (length > 0) {
3632                 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3633                 if (ret)
3634                         return ret;
3635         }
3636
3637         return 0;
3638 }
3639
3640 /**
3641  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3642  * @chip: nand chip info structure
3643  * @page: page number to write
3644  */
3645 int nand_write_oob_std(struct nand_chip *chip, int page)
3646 {
3647         struct mtd_info *mtd = nand_to_mtd(chip);
3648
3649         return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3650                                  mtd->oobsize);
3651 }
3652 EXPORT_SYMBOL(nand_write_oob_std);
3653
3654 /**
3655  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3656  *                           with syndrome - only for large page flash
3657  * @chip: nand chip info structure
3658  * @page: page number to write
3659  */
3660 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3661 {
3662         struct mtd_info *mtd = nand_to_mtd(chip);
3663         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3664         int eccsize = chip->ecc.size, length = mtd->oobsize;
3665         int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3666         const uint8_t *bufpoi = chip->oob_poi;
3667
3668         /*
3669          * data-ecc-data-ecc ... ecc-oob
3670          * or
3671          * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3672          */
3673         if (!chip->ecc.prepad && !chip->ecc.postpad) {
3674                 pos = steps * (eccsize + chunk);
3675                 steps = 0;
3676         } else
3677                 pos = eccsize;
3678
3679         ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3680         if (ret)
3681                 return ret;
3682
3683         for (i = 0; i < steps; i++) {
3684                 if (sndcmd) {
3685                         if (mtd->writesize <= 512) {
3686                                 uint32_t fill = 0xFFFFFFFF;
3687
3688                                 len = eccsize;
3689                                 while (len > 0) {
3690                                         int num = min_t(int, len, 4);
3691
3692                                         ret = nand_write_data_op(chip, &fill,
3693                                                                  num, false);
3694                                         if (ret)
3695                                                 return ret;
3696
3697                                         len -= num;
3698                                 }
3699                         } else {
3700                                 pos = eccsize + i * (eccsize + chunk);
3701                                 ret = nand_change_write_column_op(chip, pos,
3702                                                                   NULL, 0,
3703                                                                   false);
3704                                 if (ret)
3705                                         return ret;
3706                         }
3707                 } else
3708                         sndcmd = 1;
3709                 len = min_t(int, length, chunk);
3710
3711                 ret = nand_write_data_op(chip, bufpoi, len, false);
3712                 if (ret)
3713                         return ret;
3714
3715                 bufpoi += len;
3716                 length -= len;
3717         }
3718         if (length > 0) {
3719                 ret = nand_write_data_op(chip, bufpoi, length, false);
3720                 if (ret)
3721                         return ret;
3722         }
3723
3724         return nand_prog_page_end_op(chip);
3725 }
3726
3727 /**
3728  * nand_do_read_oob - [INTERN] NAND read out-of-band
3729  * @chip: NAND chip object
3730  * @from: offset to read from
3731  * @ops: oob operations description structure
3732  *
3733  * NAND read out-of-band data from the spare area.
3734  */
3735 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3736                             struct mtd_oob_ops *ops)
3737 {
3738         struct mtd_info *mtd = nand_to_mtd(chip);
3739         unsigned int max_bitflips = 0;
3740         int page, realpage, chipnr;
3741         struct mtd_ecc_stats stats;
3742         int readlen = ops->ooblen;
3743         int len;
3744         uint8_t *buf = ops->oobbuf;
3745         int ret = 0;
3746
3747         pr_debug("%s: from = 0x%08Lx, len = %i\n",
3748                         __func__, (unsigned long long)from, readlen);
3749
3750         /* Check if the region is secured */
3751         if (nand_region_is_secured(chip, from, readlen))
3752                 return -EIO;
3753
3754         stats = mtd->ecc_stats;
3755
3756         len = mtd_oobavail(mtd, ops);
3757
3758         chipnr = (int)(from >> chip->chip_shift);
3759         nand_select_target(chip, chipnr);
3760
3761         /* Shift to get page */
3762         realpage = (int)(from >> chip->page_shift);
3763         page = realpage & chip->pagemask;
3764
3765         while (1) {
3766                 if (ops->mode == MTD_OPS_RAW)
3767                         ret = chip->ecc.read_oob_raw(chip, page);
3768                 else
3769                         ret = chip->ecc.read_oob(chip, page);
3770
3771                 if (ret < 0)
3772                         break;
3773
3774                 len = min(len, readlen);
3775                 buf = nand_transfer_oob(chip, buf, ops, len);
3776
3777                 nand_wait_readrdy(chip);
3778
3779                 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3780
3781                 readlen -= len;
3782                 if (!readlen)
3783                         break;
3784
3785                 /* Increment page address */
3786                 realpage++;
3787
3788                 page = realpage & chip->pagemask;
3789                 /* Check, if we cross a chip boundary */
3790                 if (!page) {
3791                         chipnr++;
3792                         nand_deselect_target(chip);
3793                         nand_select_target(chip, chipnr);
3794                 }
3795         }
3796         nand_deselect_target(chip);
3797
3798         ops->oobretlen = ops->ooblen - readlen;
3799
3800         if (ret < 0)
3801                 return ret;
3802
3803         if (mtd->ecc_stats.failed - stats.failed)
3804                 return -EBADMSG;
3805
3806         return max_bitflips;
3807 }
3808
3809 /**
3810  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3811  * @mtd: MTD device structure
3812  * @from: offset to read from
3813  * @ops: oob operation description structure
3814  *
3815  * NAND read data and/or out-of-band data.
3816  */
3817 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3818                          struct mtd_oob_ops *ops)
3819 {
3820         struct nand_chip *chip = mtd_to_nand(mtd);
3821         int ret;
3822
3823         ops->retlen = 0;
3824
3825         if (ops->mode != MTD_OPS_PLACE_OOB &&
3826             ops->mode != MTD_OPS_AUTO_OOB &&
3827             ops->mode != MTD_OPS_RAW)
3828                 return -ENOTSUPP;
3829
3830         nand_get_device(chip);
3831
3832         if (!ops->datbuf)
3833                 ret = nand_do_read_oob(chip, from, ops);
3834         else
3835                 ret = nand_do_read_ops(chip, from, ops);
3836
3837         nand_release_device(chip);
3838         return ret;
3839 }
3840
3841 /**
3842  * nand_write_page_raw_notsupp - dummy raw page write function
3843  * @chip: nand chip info structure
3844  * @buf: data buffer
3845  * @oob_required: must write chip->oob_poi to OOB
3846  * @page: page number to write
3847  *
3848  * Returns -ENOTSUPP unconditionally.
3849  */
3850 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3851                                 int oob_required, int page)
3852 {
3853         return -ENOTSUPP;
3854 }
3855
3856 /**
3857  * nand_write_page_raw - [INTERN] raw page write function
3858  * @chip: nand chip info structure
3859  * @buf: data buffer
3860  * @oob_required: must write chip->oob_poi to OOB
3861  * @page: page number to write
3862  *
3863  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3864  */
3865 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3866                         int oob_required, int page)
3867 {
3868         struct mtd_info *mtd = nand_to_mtd(chip);
3869         int ret;
3870
3871         ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3872         if (ret)
3873                 return ret;
3874
3875         if (oob_required) {
3876                 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3877                                          false);
3878                 if (ret)
3879                         return ret;
3880         }
3881
3882         return nand_prog_page_end_op(chip);
3883 }
3884 EXPORT_SYMBOL(nand_write_page_raw);
3885
3886 /**
3887  * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3888  * @chip: NAND chip info structure
3889  * @buf: data buffer to write
3890  * @oob_required: must write chip->oob_poi to OOB
3891  * @page: page number to write
3892  *
3893  * This is a raw page write, ie. without any error detection/correction.
3894  * Monolithic means we are requesting all the relevant data (main plus
3895  * eventually OOB) to be sent over the bus and effectively programmed
3896  * into the NAND chip arrays in a single operation. This is an
3897  * alternative to nand_write_page_raw(), which first sends the main
3898  * data, then eventually send the OOB data by latching more data
3899  * cycles on the NAND bus, and finally sends the program command to
3900  * synchronyze the NAND chip cache.
3901  */
3902 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3903                                    int oob_required, int page)
3904 {
3905         struct mtd_info *mtd = nand_to_mtd(chip);
3906         unsigned int size = mtd->writesize;
3907         u8 *write_buf = (u8 *)buf;
3908
3909         if (oob_required) {
3910                 size += mtd->oobsize;
3911
3912                 if (buf != chip->data_buf) {
3913                         write_buf = nand_get_data_buf(chip);
3914                         memcpy(write_buf, buf, mtd->writesize);
3915                 }
3916         }
3917
3918         return nand_prog_page_op(chip, page, 0, write_buf, size);
3919 }
3920 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3921
3922 /**
3923  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3924  * @chip: nand chip info structure
3925  * @buf: data buffer
3926  * @oob_required: must write chip->oob_poi to OOB
3927  * @page: page number to write
3928  *
3929  * We need a special oob layout and handling even when ECC isn't checked.
3930  */
3931 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3932                                         const uint8_t *buf, int oob_required,
3933                                         int page)
3934 {
3935         struct mtd_info *mtd = nand_to_mtd(chip);
3936         int eccsize = chip->ecc.size;
3937         int eccbytes = chip->ecc.bytes;
3938         uint8_t *oob = chip->oob_poi;
3939         int steps, size, ret;
3940
3941         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3942         if (ret)
3943                 return ret;
3944
3945         for (steps = chip->ecc.steps; steps > 0; steps--) {
3946                 ret = nand_write_data_op(chip, buf, eccsize, false);
3947                 if (ret)
3948                         return ret;
3949
3950                 buf += eccsize;
3951
3952                 if (chip->ecc.prepad) {
3953                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3954                                                  false);
3955                         if (ret)
3956                                 return ret;
3957
3958                         oob += chip->ecc.prepad;
3959                 }
3960
3961                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3962                 if (ret)
3963                         return ret;
3964
3965                 oob += eccbytes;
3966
3967                 if (chip->ecc.postpad) {
3968                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3969                                                  false);
3970                         if (ret)
3971                                 return ret;
3972
3973                         oob += chip->ecc.postpad;
3974                 }
3975         }
3976
3977         size = mtd->oobsize - (oob - chip->oob_poi);
3978         if (size) {
3979                 ret = nand_write_data_op(chip, oob, size, false);
3980                 if (ret)
3981                         return ret;
3982         }
3983
3984         return nand_prog_page_end_op(chip);
3985 }
3986 /**
3987  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3988  * @chip: nand chip info structure
3989  * @buf: data buffer
3990  * @oob_required: must write chip->oob_poi to OOB
3991  * @page: page number to write
3992  */
3993 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3994                                  int oob_required, int page)
3995 {
3996         struct mtd_info *mtd = nand_to_mtd(chip);
3997         int i, eccsize = chip->ecc.size, ret;
3998         int eccbytes = chip->ecc.bytes;
3999         int eccsteps = chip->ecc.steps;
4000         uint8_t *ecc_calc = chip->ecc.calc_buf;
4001         const uint8_t *p = buf;
4002
4003         /* Software ECC calculation */
4004         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4005                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4006
4007         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4008                                          chip->ecc.total);
4009         if (ret)
4010                 return ret;
4011
4012         return chip->ecc.write_page_raw(chip, buf, 1, page);
4013 }
4014
4015 /**
4016  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
4017  * @chip: nand chip info structure
4018  * @buf: data buffer
4019  * @oob_required: must write chip->oob_poi to OOB
4020  * @page: page number to write
4021  */
4022 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
4023                                  int oob_required, int page)
4024 {
4025         struct mtd_info *mtd = nand_to_mtd(chip);
4026         int i, eccsize = chip->ecc.size, ret;
4027         int eccbytes = chip->ecc.bytes;
4028         int eccsteps = chip->ecc.steps;
4029         uint8_t *ecc_calc = chip->ecc.calc_buf;
4030         const uint8_t *p = buf;
4031
4032         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4033         if (ret)
4034                 return ret;
4035
4036         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4037                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4038
4039                 ret = nand_write_data_op(chip, p, eccsize, false);
4040                 if (ret)
4041                         return ret;
4042
4043                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4044         }
4045
4046         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4047                                          chip->ecc.total);
4048         if (ret)
4049                 return ret;
4050
4051         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4052         if (ret)
4053                 return ret;
4054
4055         return nand_prog_page_end_op(chip);
4056 }
4057
4058
4059 /**
4060  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
4061  * @chip:       nand chip info structure
4062  * @offset:     column address of subpage within the page
4063  * @data_len:   data length
4064  * @buf:        data buffer
4065  * @oob_required: must write chip->oob_poi to OOB
4066  * @page: page number to write
4067  */
4068 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
4069                                     uint32_t data_len, const uint8_t *buf,
4070                                     int oob_required, int page)
4071 {
4072         struct mtd_info *mtd = nand_to_mtd(chip);
4073         uint8_t *oob_buf  = chip->oob_poi;
4074         uint8_t *ecc_calc = chip->ecc.calc_buf;
4075         int ecc_size      = chip->ecc.size;
4076         int ecc_bytes     = chip->ecc.bytes;
4077         int ecc_steps     = chip->ecc.steps;
4078         uint32_t start_step = offset / ecc_size;
4079         uint32_t end_step   = (offset + data_len - 1) / ecc_size;
4080         int oob_bytes       = mtd->oobsize / ecc_steps;
4081         int step, ret;
4082
4083         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4084         if (ret)
4085                 return ret;
4086
4087         for (step = 0; step < ecc_steps; step++) {
4088                 /* configure controller for WRITE access */
4089                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4090
4091                 /* write data (untouched subpages already masked by 0xFF) */
4092                 ret = nand_write_data_op(chip, buf, ecc_size, false);
4093                 if (ret)
4094                         return ret;
4095
4096                 /* mask ECC of un-touched subpages by padding 0xFF */
4097                 if ((step < start_step) || (step > end_step))
4098                         memset(ecc_calc, 0xff, ecc_bytes);
4099                 else
4100                         chip->ecc.calculate(chip, buf, ecc_calc);
4101
4102                 /* mask OOB of un-touched subpages by padding 0xFF */
4103                 /* if oob_required, preserve OOB metadata of written subpage */
4104                 if (!oob_required || (step < start_step) || (step > end_step))
4105                         memset(oob_buf, 0xff, oob_bytes);
4106
4107                 buf += ecc_size;
4108                 ecc_calc += ecc_bytes;
4109                 oob_buf  += oob_bytes;
4110         }
4111
4112         /* copy calculated ECC for whole page to chip->buffer->oob */
4113         /* this include masked-value(0xFF) for unwritten subpages */
4114         ecc_calc = chip->ecc.calc_buf;
4115         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4116                                          chip->ecc.total);
4117         if (ret)
4118                 return ret;
4119
4120         /* write OOB buffer to NAND device */
4121         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4122         if (ret)
4123                 return ret;
4124
4125         return nand_prog_page_end_op(chip);
4126 }
4127
4128
4129 /**
4130  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
4131  * @chip: nand chip info structure
4132  * @buf: data buffer
4133  * @oob_required: must write chip->oob_poi to OOB
4134  * @page: page number to write
4135  *
4136  * The hw generator calculates the error syndrome automatically. Therefore we
4137  * need a special oob layout and handling.
4138  */
4139 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
4140                                     int oob_required, int page)
4141 {
4142         struct mtd_info *mtd = nand_to_mtd(chip);
4143         int i, eccsize = chip->ecc.size;
4144         int eccbytes = chip->ecc.bytes;
4145         int eccsteps = chip->ecc.steps;
4146         const uint8_t *p = buf;
4147         uint8_t *oob = chip->oob_poi;
4148         int ret;
4149
4150         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4151         if (ret)
4152                 return ret;
4153
4154         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4155                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4156
4157                 ret = nand_write_data_op(chip, p, eccsize, false);
4158                 if (ret)
4159                         return ret;
4160
4161                 if (chip->ecc.prepad) {
4162                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4163                                                  false);
4164                         if (ret)
4165                                 return ret;
4166
4167                         oob += chip->ecc.prepad;
4168                 }
4169
4170                 chip->ecc.calculate(chip, p, oob);
4171
4172                 ret = nand_write_data_op(chip, oob, eccbytes, false);
4173                 if (ret)
4174                         return ret;
4175
4176                 oob += eccbytes;
4177
4178                 if (chip->ecc.postpad) {
4179                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4180                                                  false);
4181                         if (ret)
4182                                 return ret;
4183
4184                         oob += chip->ecc.postpad;
4185                 }
4186         }
4187
4188         /* Calculate remaining oob bytes */
4189         i = mtd->oobsize - (oob - chip->oob_poi);
4190         if (i) {
4191                 ret = nand_write_data_op(chip, oob, i, false);
4192                 if (ret)
4193                         return ret;
4194         }
4195
4196         return nand_prog_page_end_op(chip);
4197 }
4198
4199 /**
4200  * nand_write_page - write one page
4201  * @chip: NAND chip descriptor
4202  * @offset: address offset within the page
4203  * @data_len: length of actual data to be written
4204  * @buf: the data to write
4205  * @oob_required: must write chip->oob_poi to OOB
4206  * @page: page number to write
4207  * @raw: use _raw version of write_page
4208  */
4209 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
4210                            int data_len, const uint8_t *buf, int oob_required,
4211                            int page, int raw)
4212 {
4213         struct mtd_info *mtd = nand_to_mtd(chip);
4214         int status, subpage;
4215
4216         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4217                 chip->ecc.write_subpage)
4218                 subpage = offset || (data_len < mtd->writesize);
4219         else
4220                 subpage = 0;
4221
4222         if (unlikely(raw))
4223                 status = chip->ecc.write_page_raw(chip, buf, oob_required,
4224                                                   page);
4225         else if (subpage)
4226                 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
4227                                                  oob_required, page);
4228         else
4229                 status = chip->ecc.write_page(chip, buf, oob_required, page);
4230
4231         if (status < 0)
4232                 return status;
4233
4234         return 0;
4235 }
4236
4237 #define NOTALIGNED(x)   ((x & (chip->subpagesize - 1)) != 0)
4238
4239 /**
4240  * nand_do_write_ops - [INTERN] NAND write with ECC
4241  * @chip: NAND chip object
4242  * @to: offset to write to
4243  * @ops: oob operations description structure
4244  *
4245  * NAND write with ECC.
4246  */
4247 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
4248                              struct mtd_oob_ops *ops)
4249 {
4250         struct mtd_info *mtd = nand_to_mtd(chip);
4251         int chipnr, realpage, page, column;
4252         uint32_t writelen = ops->len;
4253
4254         uint32_t oobwritelen = ops->ooblen;
4255         uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4256
4257         uint8_t *oob = ops->oobbuf;
4258         uint8_t *buf = ops->datbuf;
4259         int ret;
4260         int oob_required = oob ? 1 : 0;
4261
4262         ops->retlen = 0;
4263         if (!writelen)
4264                 return 0;
4265
4266         /* Reject writes, which are not page aligned */
4267         if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4268                 pr_notice("%s: attempt to write non page aligned data\n",
4269                            __func__);
4270                 return -EINVAL;
4271         }
4272
4273         /* Check if the region is secured */
4274         if (nand_region_is_secured(chip, to, writelen))
4275                 return -EIO;
4276
4277         column = to & (mtd->writesize - 1);
4278
4279         chipnr = (int)(to >> chip->chip_shift);
4280         nand_select_target(chip, chipnr);
4281
4282         /* Check, if it is write protected */
4283         if (nand_check_wp(chip)) {
4284                 ret = -EIO;
4285                 goto err_out;
4286         }
4287
4288         realpage = (int)(to >> chip->page_shift);
4289         page = realpage & chip->pagemask;
4290
4291         /* Invalidate the page cache, when we write to the cached page */
4292         if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4293             ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4294                 chip->pagecache.page = -1;
4295
4296         /* Don't allow multipage oob writes with offset */
4297         if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4298                 ret = -EINVAL;
4299                 goto err_out;
4300         }
4301
4302         while (1) {
4303                 int bytes = mtd->writesize;
4304                 uint8_t *wbuf = buf;
4305                 int use_bounce_buf;
4306                 int part_pagewr = (column || writelen < mtd->writesize);
4307
4308                 if (part_pagewr)
4309                         use_bounce_buf = 1;
4310                 else if (chip->options & NAND_USES_DMA)
4311                         use_bounce_buf = !virt_addr_valid(buf) ||
4312                                          !IS_ALIGNED((unsigned long)buf,
4313                                                      chip->buf_align);
4314                 else
4315                         use_bounce_buf = 0;
4316
4317                 /*
4318                  * Copy the data from the initial buffer when doing partial page
4319                  * writes or when a bounce buffer is required.
4320                  */
4321                 if (use_bounce_buf) {
4322                         pr_debug("%s: using write bounce buffer for buf@%p\n",
4323                                          __func__, buf);
4324                         if (part_pagewr)
4325                                 bytes = min_t(int, bytes - column, writelen);
4326                         wbuf = nand_get_data_buf(chip);
4327                         memset(wbuf, 0xff, mtd->writesize);
4328                         memcpy(&wbuf[column], buf, bytes);
4329                 }
4330
4331                 if (unlikely(oob)) {
4332                         size_t len = min(oobwritelen, oobmaxlen);
4333                         oob = nand_fill_oob(chip, oob, len, ops);
4334                         oobwritelen -= len;
4335                 } else {
4336                         /* We still need to erase leftover OOB data */
4337                         memset(chip->oob_poi, 0xff, mtd->oobsize);
4338                 }
4339
4340                 ret = nand_write_page(chip, column, bytes, wbuf,
4341                                       oob_required, page,
4342                                       (ops->mode == MTD_OPS_RAW));
4343                 if (ret)
4344                         break;
4345
4346                 writelen -= bytes;
4347                 if (!writelen)
4348                         break;
4349
4350                 column = 0;
4351                 buf += bytes;
4352                 realpage++;
4353
4354                 page = realpage & chip->pagemask;
4355                 /* Check, if we cross a chip boundary */
4356                 if (!page) {
4357                         chipnr++;
4358                         nand_deselect_target(chip);
4359                         nand_select_target(chip, chipnr);
4360                 }
4361         }
4362
4363         ops->retlen = ops->len - writelen;
4364         if (unlikely(oob))
4365                 ops->oobretlen = ops->ooblen;
4366
4367 err_out:
4368         nand_deselect_target(chip);
4369         return ret;
4370 }
4371
4372 /**
4373  * panic_nand_write - [MTD Interface] NAND write with ECC
4374  * @mtd: MTD device structure
4375  * @to: offset to write to
4376  * @len: number of bytes to write
4377  * @retlen: pointer to variable to store the number of written bytes
4378  * @buf: the data to write
4379  *
4380  * NAND write with ECC. Used when performing writes in interrupt context, this
4381  * may for example be called by mtdoops when writing an oops while in panic.
4382  */
4383 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4384                             size_t *retlen, const uint8_t *buf)
4385 {
4386         struct nand_chip *chip = mtd_to_nand(mtd);
4387         int chipnr = (int)(to >> chip->chip_shift);
4388         struct mtd_oob_ops ops;
4389         int ret;
4390
4391         nand_select_target(chip, chipnr);
4392
4393         /* Wait for the device to get ready */
4394         panic_nand_wait(chip, 400);
4395
4396         memset(&ops, 0, sizeof(ops));
4397         ops.len = len;
4398         ops.datbuf = (uint8_t *)buf;
4399         ops.mode = MTD_OPS_PLACE_OOB;
4400
4401         ret = nand_do_write_ops(chip, to, &ops);
4402
4403         *retlen = ops.retlen;
4404         return ret;
4405 }
4406
4407 /**
4408  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4409  * @mtd: MTD device structure
4410  * @to: offset to write to
4411  * @ops: oob operation description structure
4412  */
4413 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4414                           struct mtd_oob_ops *ops)
4415 {
4416         struct nand_chip *chip = mtd_to_nand(mtd);
4417         int ret = 0;
4418
4419         ops->retlen = 0;
4420
4421         nand_get_device(chip);
4422
4423         switch (ops->mode) {
4424         case MTD_OPS_PLACE_OOB:
4425         case MTD_OPS_AUTO_OOB:
4426         case MTD_OPS_RAW:
4427                 break;
4428
4429         default:
4430                 goto out;
4431         }
4432
4433         if (!ops->datbuf)
4434                 ret = nand_do_write_oob(chip, to, ops);
4435         else
4436                 ret = nand_do_write_ops(chip, to, ops);
4437
4438 out:
4439         nand_release_device(chip);
4440         return ret;
4441 }
4442
4443 /**
4444  * nand_erase - [MTD Interface] erase block(s)
4445  * @mtd: MTD device structure
4446  * @instr: erase instruction
4447  *
4448  * Erase one ore more blocks.
4449  */
4450 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4451 {
4452         return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4453 }
4454
4455 /**
4456  * nand_erase_nand - [INTERN] erase block(s)
4457  * @chip: NAND chip object
4458  * @instr: erase instruction
4459  * @allowbbt: allow erasing the bbt area
4460  *
4461  * Erase one ore more blocks.
4462  */
4463 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4464                     int allowbbt)
4465 {
4466         int page, pages_per_block, ret, chipnr;
4467         loff_t len;
4468
4469         pr_debug("%s: start = 0x%012llx, len = %llu\n",
4470                         __func__, (unsigned long long)instr->addr,
4471                         (unsigned long long)instr->len);
4472
4473         if (check_offs_len(chip, instr->addr, instr->len))
4474                 return -EINVAL;
4475
4476         /* Check if the region is secured */
4477         if (nand_region_is_secured(chip, instr->addr, instr->len))
4478                 return -EIO;
4479
4480         /* Grab the lock and see if the device is available */
4481         nand_get_device(chip);
4482
4483         /* Shift to get first page */
4484         page = (int)(instr->addr >> chip->page_shift);
4485         chipnr = (int)(instr->addr >> chip->chip_shift);
4486
4487         /* Calculate pages in each block */
4488         pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4489
4490         /* Select the NAND device */
4491         nand_select_target(chip, chipnr);
4492
4493         /* Check, if it is write protected */
4494         if (nand_check_wp(chip)) {
4495                 pr_debug("%s: device is write protected!\n",
4496                                 __func__);
4497                 ret = -EIO;
4498                 goto erase_exit;
4499         }
4500
4501         /* Loop through the pages */
4502         len = instr->len;
4503
4504         while (len) {
4505                 /* Check if we have a bad block, we do not erase bad blocks! */
4506                 if (nand_block_checkbad(chip, ((loff_t) page) <<
4507                                         chip->page_shift, allowbbt)) {
4508                         pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4509                                     __func__, page);
4510                         ret = -EIO;
4511                         goto erase_exit;
4512                 }
4513
4514                 /*
4515                  * Invalidate the page cache, if we erase the block which
4516                  * contains the current cached page.
4517                  */
4518                 if (page <= chip->pagecache.page && chip->pagecache.page <
4519                     (page + pages_per_block))
4520                         chip->pagecache.page = -1;
4521
4522                 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4523                                     (chip->phys_erase_shift - chip->page_shift));
4524                 if (ret) {
4525                         pr_debug("%s: failed erase, page 0x%08x\n",
4526                                         __func__, page);
4527                         instr->fail_addr =
4528                                 ((loff_t)page << chip->page_shift);
4529                         goto erase_exit;
4530                 }
4531
4532                 /* Increment page address and decrement length */
4533                 len -= (1ULL << chip->phys_erase_shift);
4534                 page += pages_per_block;
4535
4536                 /* Check, if we cross a chip boundary */
4537                 if (len && !(page & chip->pagemask)) {
4538                         chipnr++;
4539                         nand_deselect_target(chip);
4540                         nand_select_target(chip, chipnr);
4541                 }
4542         }
4543
4544         ret = 0;
4545 erase_exit:
4546
4547         /* Deselect and wake up anyone waiting on the device */
4548         nand_deselect_target(chip);
4549         nand_release_device(chip);
4550
4551         /* Return more or less happy */
4552         return ret;
4553 }
4554
4555 /**
4556  * nand_sync - [MTD Interface] sync
4557  * @mtd: MTD device structure
4558  *
4559  * Sync is actually a wait for chip ready function.
4560  */
4561 static void nand_sync(struct mtd_info *mtd)
4562 {
4563         struct nand_chip *chip = mtd_to_nand(mtd);
4564
4565         pr_debug("%s: called\n", __func__);
4566
4567         /* Grab the lock and see if the device is available */
4568         nand_get_device(chip);
4569         /* Release it and go back */
4570         nand_release_device(chip);
4571 }
4572
4573 /**
4574  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4575  * @mtd: MTD device structure
4576  * @offs: offset relative to mtd start
4577  */
4578 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4579 {
4580         struct nand_chip *chip = mtd_to_nand(mtd);
4581         int chipnr = (int)(offs >> chip->chip_shift);
4582         int ret;
4583
4584         /* Select the NAND device */
4585         nand_get_device(chip);
4586
4587         nand_select_target(chip, chipnr);
4588
4589         ret = nand_block_checkbad(chip, offs, 0);
4590
4591         nand_deselect_target(chip);
4592         nand_release_device(chip);
4593
4594         return ret;
4595 }
4596
4597 /**
4598  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4599  * @mtd: MTD device structure
4600  * @ofs: offset relative to mtd start
4601  */
4602 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4603 {
4604         int ret;
4605
4606         ret = nand_block_isbad(mtd, ofs);
4607         if (ret) {
4608                 /* If it was bad already, return success and do nothing */
4609                 if (ret > 0)
4610                         return 0;
4611                 return ret;
4612         }
4613
4614         return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4615 }
4616
4617 /**
4618  * nand_suspend - [MTD Interface] Suspend the NAND flash
4619  * @mtd: MTD device structure
4620  *
4621  * Returns 0 for success or negative error code otherwise.
4622  */
4623 static int nand_suspend(struct mtd_info *mtd)
4624 {
4625         struct nand_chip *chip = mtd_to_nand(mtd);
4626         int ret = 0;
4627
4628         mutex_lock(&chip->lock);
4629         if (chip->ops.suspend)
4630                 ret = chip->ops.suspend(chip);
4631         if (!ret)
4632                 chip->suspended = 1;
4633         mutex_unlock(&chip->lock);
4634
4635         return ret;
4636 }
4637
4638 /**
4639  * nand_resume - [MTD Interface] Resume the NAND flash
4640  * @mtd: MTD device structure
4641  */
4642 static void nand_resume(struct mtd_info *mtd)
4643 {
4644         struct nand_chip *chip = mtd_to_nand(mtd);
4645
4646         mutex_lock(&chip->lock);
4647         if (chip->suspended) {
4648                 if (chip->ops.resume)
4649                         chip->ops.resume(chip);
4650                 chip->suspended = 0;
4651         } else {
4652                 pr_err("%s called for a chip which is not in suspended state\n",
4653                         __func__);
4654         }
4655         mutex_unlock(&chip->lock);
4656
4657         wake_up_all(&chip->resume_wq);
4658 }
4659
4660 /**
4661  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4662  *                 prevent further operations
4663  * @mtd: MTD device structure
4664  */
4665 static void nand_shutdown(struct mtd_info *mtd)
4666 {
4667         nand_suspend(mtd);
4668 }
4669
4670 /**
4671  * nand_lock - [MTD Interface] Lock the NAND flash
4672  * @mtd: MTD device structure
4673  * @ofs: offset byte address
4674  * @len: number of bytes to lock (must be a multiple of block/page size)
4675  */
4676 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4677 {
4678         struct nand_chip *chip = mtd_to_nand(mtd);
4679
4680         if (!chip->ops.lock_area)
4681                 return -ENOTSUPP;
4682
4683         return chip->ops.lock_area(chip, ofs, len);
4684 }
4685
4686 /**
4687  * nand_unlock - [MTD Interface] Unlock the NAND flash
4688  * @mtd: MTD device structure
4689  * @ofs: offset byte address
4690  * @len: number of bytes to unlock (must be a multiple of block/page size)
4691  */
4692 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4693 {
4694         struct nand_chip *chip = mtd_to_nand(mtd);
4695
4696         if (!chip->ops.unlock_area)
4697                 return -ENOTSUPP;
4698
4699         return chip->ops.unlock_area(chip, ofs, len);
4700 }
4701
4702 /* Set default functions */
4703 static void nand_set_defaults(struct nand_chip *chip)
4704 {
4705         /* If no controller is provided, use the dummy, legacy one. */
4706         if (!chip->controller) {
4707                 chip->controller = &chip->legacy.dummy_controller;
4708                 nand_controller_init(chip->controller);
4709         }
4710
4711         nand_legacy_set_defaults(chip);
4712
4713         if (!chip->buf_align)
4714                 chip->buf_align = 1;
4715 }
4716
4717 /* Sanitize ONFI strings so we can safely print them */
4718 void sanitize_string(uint8_t *s, size_t len)
4719 {
4720         ssize_t i;
4721
4722         /* Null terminate */
4723         s[len - 1] = 0;
4724
4725         /* Remove non printable chars */
4726         for (i = 0; i < len - 1; i++) {
4727                 if (s[i] < ' ' || s[i] > 127)
4728                         s[i] = '?';
4729         }
4730
4731         /* Remove trailing spaces */
4732         strim(s);
4733 }
4734
4735 /*
4736  * nand_id_has_period - Check if an ID string has a given wraparound period
4737  * @id_data: the ID string
4738  * @arrlen: the length of the @id_data array
4739  * @period: the period of repitition
4740  *
4741  * Check if an ID string is repeated within a given sequence of bytes at
4742  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4743  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4744  * if the repetition has a period of @period; otherwise, returns zero.
4745  */
4746 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4747 {
4748         int i, j;
4749         for (i = 0; i < period; i++)
4750                 for (j = i + period; j < arrlen; j += period)
4751                         if (id_data[i] != id_data[j])
4752                                 return 0;
4753         return 1;
4754 }
4755
4756 /*
4757  * nand_id_len - Get the length of an ID string returned by CMD_READID
4758  * @id_data: the ID string
4759  * @arrlen: the length of the @id_data array
4760
4761  * Returns the length of the ID string, according to known wraparound/trailing
4762  * zero patterns. If no pattern exists, returns the length of the array.
4763  */
4764 static int nand_id_len(u8 *id_data, int arrlen)
4765 {
4766         int last_nonzero, period;
4767
4768         /* Find last non-zero byte */
4769         for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4770                 if (id_data[last_nonzero])
4771                         break;
4772
4773         /* All zeros */
4774         if (last_nonzero < 0)
4775                 return 0;
4776
4777         /* Calculate wraparound period */
4778         for (period = 1; period < arrlen; period++)
4779                 if (nand_id_has_period(id_data, arrlen, period))
4780                         break;
4781
4782         /* There's a repeated pattern */
4783         if (period < arrlen)
4784                 return period;
4785
4786         /* There are trailing zeros */
4787         if (last_nonzero < arrlen - 1)
4788                 return last_nonzero + 1;
4789
4790         /* No pattern detected */
4791         return arrlen;
4792 }
4793
4794 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4795 static int nand_get_bits_per_cell(u8 cellinfo)
4796 {
4797         int bits;
4798
4799         bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4800         bits >>= NAND_CI_CELLTYPE_SHIFT;
4801         return bits + 1;
4802 }
4803
4804 /*
4805  * Many new NAND share similar device ID codes, which represent the size of the
4806  * chip. The rest of the parameters must be decoded according to generic or
4807  * manufacturer-specific "extended ID" decoding patterns.
4808  */
4809 void nand_decode_ext_id(struct nand_chip *chip)
4810 {
4811         struct nand_memory_organization *memorg;
4812         struct mtd_info *mtd = nand_to_mtd(chip);
4813         int extid;
4814         u8 *id_data = chip->id.data;
4815
4816         memorg = nanddev_get_memorg(&chip->base);
4817
4818         /* The 3rd id byte holds MLC / multichip data */
4819         memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4820         /* The 4th id byte is the important one */
4821         extid = id_data[3];
4822
4823         /* Calc pagesize */
4824         memorg->pagesize = 1024 << (extid & 0x03);
4825         mtd->writesize = memorg->pagesize;
4826         extid >>= 2;
4827         /* Calc oobsize */
4828         memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4829         mtd->oobsize = memorg->oobsize;
4830         extid >>= 2;
4831         /* Calc blocksize. Blocksize is multiples of 64KiB */
4832         memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4833                                        memorg->pagesize;
4834         mtd->erasesize = (64 * 1024) << (extid & 0x03);
4835         extid >>= 2;
4836         /* Get buswidth information */
4837         if (extid & 0x1)
4838                 chip->options |= NAND_BUSWIDTH_16;
4839 }
4840 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4841
4842 /*
4843  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4844  * decodes a matching ID table entry and assigns the MTD size parameters for
4845  * the chip.
4846  */
4847 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4848 {
4849         struct mtd_info *mtd = nand_to_mtd(chip);
4850         struct nand_memory_organization *memorg;
4851
4852         memorg = nanddev_get_memorg(&chip->base);
4853
4854         memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4855         mtd->erasesize = type->erasesize;
4856         memorg->pagesize = type->pagesize;
4857         mtd->writesize = memorg->pagesize;
4858         memorg->oobsize = memorg->pagesize / 32;
4859         mtd->oobsize = memorg->oobsize;
4860
4861         /* All legacy ID NAND are small-page, SLC */
4862         memorg->bits_per_cell = 1;
4863 }
4864
4865 /*
4866  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4867  * heuristic patterns using various detected parameters (e.g., manufacturer,
4868  * page size, cell-type information).
4869  */
4870 static void nand_decode_bbm_options(struct nand_chip *chip)
4871 {
4872         struct mtd_info *mtd = nand_to_mtd(chip);
4873
4874         /* Set the bad block position */
4875         if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4876                 chip->badblockpos = NAND_BBM_POS_LARGE;
4877         else
4878                 chip->badblockpos = NAND_BBM_POS_SMALL;
4879 }
4880
4881 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4882 {
4883         return type->id_len;
4884 }
4885
4886 static bool find_full_id_nand(struct nand_chip *chip,
4887                               struct nand_flash_dev *type)
4888 {
4889         struct nand_device *base = &chip->base;
4890         struct nand_ecc_props requirements;
4891         struct mtd_info *mtd = nand_to_mtd(chip);
4892         struct nand_memory_organization *memorg;
4893         u8 *id_data = chip->id.data;
4894
4895         memorg = nanddev_get_memorg(&chip->base);
4896
4897         if (!strncmp(type->id, id_data, type->id_len)) {
4898                 memorg->pagesize = type->pagesize;
4899                 mtd->writesize = memorg->pagesize;
4900                 memorg->pages_per_eraseblock = type->erasesize /
4901                                                type->pagesize;
4902                 mtd->erasesize = type->erasesize;
4903                 memorg->oobsize = type->oobsize;
4904                 mtd->oobsize = memorg->oobsize;
4905
4906                 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4907                 memorg->eraseblocks_per_lun =
4908                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4909                                            memorg->pagesize *
4910                                            memorg->pages_per_eraseblock);
4911                 chip->options |= type->options;
4912                 requirements.strength = NAND_ECC_STRENGTH(type);
4913                 requirements.step_size = NAND_ECC_STEP(type);
4914                 nanddev_set_ecc_requirements(base, &requirements);
4915
4916                 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4917                 if (!chip->parameters.model)
4918                         return false;
4919
4920                 return true;
4921         }
4922         return false;
4923 }
4924
4925 /*
4926  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4927  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4928  * table.
4929  */
4930 static void nand_manufacturer_detect(struct nand_chip *chip)
4931 {
4932         /*
4933          * Try manufacturer detection if available and use
4934          * nand_decode_ext_id() otherwise.
4935          */
4936         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4937             chip->manufacturer.desc->ops->detect) {
4938                 struct nand_memory_organization *memorg;
4939
4940                 memorg = nanddev_get_memorg(&chip->base);
4941
4942                 /* The 3rd id byte holds MLC / multichip data */
4943                 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4944                 chip->manufacturer.desc->ops->detect(chip);
4945         } else {
4946                 nand_decode_ext_id(chip);
4947         }
4948 }
4949
4950 /*
4951  * Manufacturer initialization. This function is called for all NANDs including
4952  * ONFI and JEDEC compliant ones.
4953  * Manufacturer drivers should put all their specific initialization code in
4954  * their ->init() hook.
4955  */
4956 static int nand_manufacturer_init(struct nand_chip *chip)
4957 {
4958         if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4959             !chip->manufacturer.desc->ops->init)
4960                 return 0;
4961
4962         return chip->manufacturer.desc->ops->init(chip);
4963 }
4964
4965 /*
4966  * Manufacturer cleanup. This function is called for all NANDs including
4967  * ONFI and JEDEC compliant ones.
4968  * Manufacturer drivers should put all their specific cleanup code in their
4969  * ->cleanup() hook.
4970  */
4971 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4972 {
4973         /* Release manufacturer private data */
4974         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4975             chip->manufacturer.desc->ops->cleanup)
4976                 chip->manufacturer.desc->ops->cleanup(chip);
4977 }
4978
4979 static const char *
4980 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4981 {
4982         return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4983 }
4984
4985 /*
4986  * Get the flash and manufacturer id and lookup if the type is supported.
4987  */
4988 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4989 {
4990         const struct nand_manufacturer_desc *manufacturer_desc;
4991         struct mtd_info *mtd = nand_to_mtd(chip);
4992         struct nand_memory_organization *memorg;
4993         int busw, ret;
4994         u8 *id_data = chip->id.data;
4995         u8 maf_id, dev_id;
4996         u64 targetsize;
4997
4998         /*
4999          * Let's start by initializing memorg fields that might be left
5000          * unassigned by the ID-based detection logic.
5001          */
5002         memorg = nanddev_get_memorg(&chip->base);
5003         memorg->planes_per_lun = 1;
5004         memorg->luns_per_target = 1;
5005
5006         /*
5007          * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
5008          * after power-up.
5009          */
5010         ret = nand_reset(chip, 0);
5011         if (ret)
5012                 return ret;
5013
5014         /* Select the device */
5015         nand_select_target(chip, 0);
5016
5017         /* Send the command for reading device ID */
5018         ret = nand_readid_op(chip, 0, id_data, 2);
5019         if (ret)
5020                 return ret;
5021
5022         /* Read manufacturer and device IDs */
5023         maf_id = id_data[0];
5024         dev_id = id_data[1];
5025
5026         /*
5027          * Try again to make sure, as some systems the bus-hold or other
5028          * interface concerns can cause random data which looks like a
5029          * possibly credible NAND flash to appear. If the two results do
5030          * not match, ignore the device completely.
5031          */
5032
5033         /* Read entire ID string */
5034         ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5035         if (ret)
5036                 return ret;
5037
5038         if (id_data[0] != maf_id || id_data[1] != dev_id) {
5039                 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5040                         maf_id, dev_id, id_data[0], id_data[1]);
5041                 return -ENODEV;
5042         }
5043
5044         chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5045
5046         /* Try to identify manufacturer */
5047         manufacturer_desc = nand_get_manufacturer_desc(maf_id);
5048         chip->manufacturer.desc = manufacturer_desc;
5049
5050         if (!type)
5051                 type = nand_flash_ids;
5052
5053         /*
5054          * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
5055          * override it.
5056          * This is required to make sure initial NAND bus width set by the
5057          * NAND controller driver is coherent with the real NAND bus width
5058          * (extracted by auto-detection code).
5059          */
5060         busw = chip->options & NAND_BUSWIDTH_16;
5061
5062         /*
5063          * The flag is only set (never cleared), reset it to its default value
5064          * before starting auto-detection.
5065          */
5066         chip->options &= ~NAND_BUSWIDTH_16;
5067
5068         for (; type->name != NULL; type++) {
5069                 if (is_full_id_nand(type)) {
5070                         if (find_full_id_nand(chip, type))
5071                                 goto ident_done;
5072                 } else if (dev_id == type->dev_id) {
5073                         break;
5074                 }
5075         }
5076
5077         if (!type->name || !type->pagesize) {
5078                 /* Check if the chip is ONFI compliant */
5079                 ret = nand_onfi_detect(chip);
5080                 if (ret < 0)
5081                         return ret;
5082                 else if (ret)
5083                         goto ident_done;
5084
5085                 /* Check if the chip is JEDEC compliant */
5086                 ret = nand_jedec_detect(chip);
5087                 if (ret < 0)
5088                         return ret;
5089                 else if (ret)
5090                         goto ident_done;
5091         }
5092
5093         if (!type->name)
5094                 return -ENODEV;
5095
5096         chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
5097         if (!chip->parameters.model)
5098                 return -ENOMEM;
5099
5100         if (!type->pagesize)
5101                 nand_manufacturer_detect(chip);
5102         else
5103                 nand_decode_id(chip, type);
5104
5105         /* Get chip options */
5106         chip->options |= type->options;
5107
5108         memorg->eraseblocks_per_lun =
5109                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
5110                                            memorg->pagesize *
5111                                            memorg->pages_per_eraseblock);
5112
5113 ident_done:
5114         if (!mtd->name)
5115                 mtd->name = chip->parameters.model;
5116
5117         if (chip->options & NAND_BUSWIDTH_AUTO) {
5118                 WARN_ON(busw & NAND_BUSWIDTH_16);
5119                 nand_set_defaults(chip);
5120         } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5121                 /*
5122                  * Check, if buswidth is correct. Hardware drivers should set
5123                  * chip correct!
5124                  */
5125                 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5126                         maf_id, dev_id);
5127                 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5128                         mtd->name);
5129                 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5130                         (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5131                 ret = -EINVAL;
5132
5133                 goto free_detect_allocation;
5134         }
5135
5136         nand_decode_bbm_options(chip);
5137
5138         /* Calculate the address shift from the page size */
5139         chip->page_shift = ffs(mtd->writesize) - 1;
5140         /* Convert chipsize to number of pages per chip -1 */
5141         targetsize = nanddev_target_size(&chip->base);
5142         chip->pagemask = (targetsize >> chip->page_shift) - 1;
5143
5144         chip->bbt_erase_shift = chip->phys_erase_shift =
5145                 ffs(mtd->erasesize) - 1;
5146         if (targetsize & 0xffffffff)
5147                 chip->chip_shift = ffs((unsigned)targetsize) - 1;
5148         else {
5149                 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
5150                 chip->chip_shift += 32 - 1;
5151         }
5152
5153         if (chip->chip_shift - chip->page_shift > 16)
5154                 chip->options |= NAND_ROW_ADDR_3;
5155
5156         chip->badblockbits = 8;
5157
5158         nand_legacy_adjust_cmdfunc(chip);
5159
5160         pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5161                 maf_id, dev_id);
5162         pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5163                 chip->parameters.model);
5164         pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5165                 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5166                 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5167         return 0;
5168
5169 free_detect_allocation:
5170         kfree(chip->parameters.model);
5171
5172         return ret;
5173 }
5174
5175 static enum nand_ecc_engine_type
5176 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
5177 {
5178         enum nand_ecc_legacy_mode {
5179                 NAND_ECC_INVALID,
5180                 NAND_ECC_NONE,
5181                 NAND_ECC_SOFT,
5182                 NAND_ECC_SOFT_BCH,
5183                 NAND_ECC_HW,
5184                 NAND_ECC_HW_SYNDROME,
5185                 NAND_ECC_ON_DIE,
5186         };
5187         const char * const nand_ecc_legacy_modes[] = {
5188                 [NAND_ECC_NONE]         = "none",
5189                 [NAND_ECC_SOFT]         = "soft",
5190                 [NAND_ECC_SOFT_BCH]     = "soft_bch",
5191                 [NAND_ECC_HW]           = "hw",
5192                 [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
5193                 [NAND_ECC_ON_DIE]       = "on-die",
5194         };
5195         enum nand_ecc_legacy_mode eng_type;
5196         const char *pm;
5197         int err;
5198
5199         err = of_property_read_string(np, "nand-ecc-mode", &pm);
5200         if (err)
5201                 return NAND_ECC_ENGINE_TYPE_INVALID;
5202
5203         for (eng_type = NAND_ECC_NONE;
5204              eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
5205                 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
5206                         switch (eng_type) {
5207                         case NAND_ECC_NONE:
5208                                 return NAND_ECC_ENGINE_TYPE_NONE;
5209                         case NAND_ECC_SOFT:
5210                         case NAND_ECC_SOFT_BCH:
5211                                 return NAND_ECC_ENGINE_TYPE_SOFT;
5212                         case NAND_ECC_HW:
5213                         case NAND_ECC_HW_SYNDROME:
5214                                 return NAND_ECC_ENGINE_TYPE_ON_HOST;
5215                         case NAND_ECC_ON_DIE:
5216                                 return NAND_ECC_ENGINE_TYPE_ON_DIE;
5217                         default:
5218                                 break;
5219                         }
5220                 }
5221         }
5222
5223         return NAND_ECC_ENGINE_TYPE_INVALID;
5224 }
5225
5226 static enum nand_ecc_placement
5227 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
5228 {
5229         const char *pm;
5230         int err;
5231
5232         err = of_property_read_string(np, "nand-ecc-mode", &pm);
5233         if (!err) {
5234                 if (!strcasecmp(pm, "hw_syndrome"))
5235                         return NAND_ECC_PLACEMENT_INTERLEAVED;
5236         }
5237
5238         return NAND_ECC_PLACEMENT_UNKNOWN;
5239 }
5240
5241 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
5242 {
5243         const char *pm;
5244         int err;
5245
5246         err = of_property_read_string(np, "nand-ecc-mode", &pm);
5247         if (!err) {
5248                 if (!strcasecmp(pm, "soft"))
5249                         return NAND_ECC_ALGO_HAMMING;
5250                 else if (!strcasecmp(pm, "soft_bch"))
5251                         return NAND_ECC_ALGO_BCH;
5252         }
5253
5254         return NAND_ECC_ALGO_UNKNOWN;
5255 }
5256
5257 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
5258 {
5259         struct device_node *dn = nand_get_flash_node(chip);
5260         struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
5261
5262         if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5263                 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
5264
5265         if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
5266                 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
5267
5268         if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
5269                 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
5270 }
5271
5272 static int of_get_nand_bus_width(struct nand_chip *chip)
5273 {
5274         struct device_node *dn = nand_get_flash_node(chip);
5275         u32 val;
5276         int ret;
5277
5278         ret = of_property_read_u32(dn, "nand-bus-width", &val);
5279         if (ret == -EINVAL)
5280                 /* Buswidth defaults to 8 if the property does not exist .*/
5281                 return 0;
5282         else if (ret)
5283                 return ret;
5284
5285         if (val == 16)
5286                 chip->options |= NAND_BUSWIDTH_16;
5287         else if (val != 8)
5288                 return -EINVAL;
5289         return 0;
5290 }
5291
5292 static int of_get_nand_secure_regions(struct nand_chip *chip)
5293 {
5294         struct device_node *dn = nand_get_flash_node(chip);
5295         struct property *prop;
5296         int nr_elem, i, j;
5297
5298         /* Only proceed if the "secure-regions" property is present in DT */
5299         prop = of_find_property(dn, "secure-regions", NULL);
5300         if (!prop)
5301                 return 0;
5302
5303         nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
5304         if (nr_elem <= 0)
5305                 return nr_elem;
5306
5307         chip->nr_secure_regions = nr_elem / 2;
5308         chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
5309                                        GFP_KERNEL);
5310         if (!chip->secure_regions)
5311                 return -ENOMEM;
5312
5313         for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
5314                 of_property_read_u64_index(dn, "secure-regions", j,
5315                                            &chip->secure_regions[i].offset);
5316                 of_property_read_u64_index(dn, "secure-regions", j + 1,
5317                                            &chip->secure_regions[i].size);
5318         }
5319
5320         return 0;
5321 }
5322
5323 /**
5324  * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
5325  * @dev: Device that will be parsed. Also used for managed allocations.
5326  * @cs_array: Array of GPIO desc pointers allocated on success
5327  * @ncs_array: Number of entries in @cs_array updated on success.
5328  * @return 0 on success, an error otherwise.
5329  */
5330 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
5331                              unsigned int *ncs_array)
5332 {
5333         struct device_node *np = dev->of_node;
5334         struct gpio_desc **descs;
5335         int ndescs, i;
5336
5337         ndescs = of_gpio_named_count(np, "cs-gpios");
5338         if (ndescs < 0) {
5339                 dev_dbg(dev, "No valid cs-gpios property\n");
5340                 return 0;
5341         }
5342
5343         descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
5344         if (!descs)
5345                 return -ENOMEM;
5346
5347         for (i = 0; i < ndescs; i++) {
5348                 descs[i] = gpiod_get_index_optional(dev, "cs", i,
5349                                                     GPIOD_OUT_HIGH);
5350                 if (IS_ERR(descs[i]))
5351                         return PTR_ERR(descs[i]);
5352         }
5353
5354         *ncs_array = ndescs;
5355         *cs_array = descs;
5356
5357         return 0;
5358 }
5359 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
5360
5361 static int rawnand_dt_init(struct nand_chip *chip)
5362 {
5363         struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5364         struct device_node *dn = nand_get_flash_node(chip);
5365         int ret;
5366
5367         if (!dn)
5368                 return 0;
5369
5370         ret = of_get_nand_bus_width(chip);
5371         if (ret)
5372                 return ret;
5373
5374         if (of_property_read_bool(dn, "nand-is-boot-medium"))
5375                 chip->options |= NAND_IS_BOOT_MEDIUM;
5376
5377         if (of_property_read_bool(dn, "nand-on-flash-bbt"))
5378                 chip->bbt_options |= NAND_BBT_USE_FLASH;
5379
5380         of_get_nand_ecc_user_config(nand);
5381         of_get_nand_ecc_legacy_user_config(chip);
5382
5383         /*
5384          * If neither the user nor the NAND controller have requested a specific
5385          * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5386          */
5387         nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5388
5389         /*
5390          * Use the user requested engine type, unless there is none, in this
5391          * case default to the NAND controller choice, otherwise fallback to
5392          * the raw NAND default one.
5393          */
5394         if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5395                 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5396         if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5397                 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5398
5399         chip->ecc.placement = nand->ecc.user_conf.placement;
5400         chip->ecc.algo = nand->ecc.user_conf.algo;
5401         chip->ecc.strength = nand->ecc.user_conf.strength;
5402         chip->ecc.size = nand->ecc.user_conf.step_size;
5403
5404         return 0;
5405 }
5406
5407 /**
5408  * nand_scan_ident - Scan for the NAND device
5409  * @chip: NAND chip object
5410  * @maxchips: number of chips to scan for
5411  * @table: alternative NAND ID table
5412  *
5413  * This is the first phase of the normal nand_scan() function. It reads the
5414  * flash ID and sets up MTD fields accordingly.
5415  *
5416  * This helper used to be called directly from controller drivers that needed
5417  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5418  * prevented dynamic allocations during this phase which was unconvenient and
5419  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5420  */
5421 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5422                            struct nand_flash_dev *table)
5423 {
5424         struct mtd_info *mtd = nand_to_mtd(chip);
5425         struct nand_memory_organization *memorg;
5426         int nand_maf_id, nand_dev_id;
5427         unsigned int i;
5428         int ret;
5429
5430         memorg = nanddev_get_memorg(&chip->base);
5431
5432         /* Assume all dies are deselected when we enter nand_scan_ident(). */
5433         chip->cur_cs = -1;
5434
5435         mutex_init(&chip->lock);
5436         init_waitqueue_head(&chip->resume_wq);
5437
5438         /* Enforce the right timings for reset/detection */
5439         chip->current_interface_config = nand_get_reset_interface_config();
5440
5441         ret = rawnand_dt_init(chip);
5442         if (ret)
5443                 return ret;
5444
5445         if (!mtd->name && mtd->dev.parent)
5446                 mtd->name = dev_name(mtd->dev.parent);
5447
5448         /* Set the default functions */
5449         nand_set_defaults(chip);
5450
5451         ret = nand_legacy_check_hooks(chip);
5452         if (ret)
5453                 return ret;
5454
5455         memorg->ntargets = maxchips;
5456
5457         /* Read the flash type */
5458         ret = nand_detect(chip, table);
5459         if (ret) {
5460                 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5461                         pr_warn("No NAND device found\n");
5462                 nand_deselect_target(chip);
5463                 return ret;
5464         }
5465
5466         nand_maf_id = chip->id.data[0];
5467         nand_dev_id = chip->id.data[1];
5468
5469         nand_deselect_target(chip);
5470
5471         /* Check for a chip array */
5472         for (i = 1; i < maxchips; i++) {
5473                 u8 id[2];
5474
5475                 /* See comment in nand_get_flash_type for reset */
5476                 ret = nand_reset(chip, i);
5477                 if (ret)
5478                         break;
5479
5480                 nand_select_target(chip, i);
5481                 /* Send the command for reading device ID */
5482                 ret = nand_readid_op(chip, 0, id, sizeof(id));
5483                 if (ret)
5484                         break;
5485                 /* Read manufacturer and device IDs */
5486                 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5487                         nand_deselect_target(chip);
5488                         break;
5489                 }
5490                 nand_deselect_target(chip);
5491         }
5492         if (i > 1)
5493                 pr_info("%d chips detected\n", i);
5494
5495         /* Store the number of chips and calc total size for mtd */
5496         memorg->ntargets = i;
5497         mtd->size = i * nanddev_target_size(&chip->base);
5498
5499         return 0;
5500 }
5501
5502 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5503 {
5504         kfree(chip->parameters.model);
5505         kfree(chip->parameters.onfi);
5506 }
5507
5508 int rawnand_sw_hamming_init(struct nand_chip *chip)
5509 {
5510         struct nand_ecc_sw_hamming_conf *engine_conf;
5511         struct nand_device *base = &chip->base;
5512         int ret;
5513
5514         base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5515         base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
5516         base->ecc.user_conf.strength = chip->ecc.strength;
5517         base->ecc.user_conf.step_size = chip->ecc.size;
5518
5519         ret = nand_ecc_sw_hamming_init_ctx(base);
5520         if (ret)
5521                 return ret;
5522
5523         engine_conf = base->ecc.ctx.priv;
5524
5525         if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
5526                 engine_conf->sm_order = true;
5527
5528         chip->ecc.size = base->ecc.ctx.conf.step_size;
5529         chip->ecc.strength = base->ecc.ctx.conf.strength;
5530         chip->ecc.total = base->ecc.ctx.total;
5531         chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5532         chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5533
5534         return 0;
5535 }
5536 EXPORT_SYMBOL(rawnand_sw_hamming_init);
5537
5538 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
5539                                  const unsigned char *buf,
5540                                  unsigned char *code)
5541 {
5542         struct nand_device *base = &chip->base;
5543
5544         return nand_ecc_sw_hamming_calculate(base, buf, code);
5545 }
5546 EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
5547
5548 int rawnand_sw_hamming_correct(struct nand_chip *chip,
5549                                unsigned char *buf,
5550                                unsigned char *read_ecc,
5551                                unsigned char *calc_ecc)
5552 {
5553         struct nand_device *base = &chip->base;
5554
5555         return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
5556 }
5557 EXPORT_SYMBOL(rawnand_sw_hamming_correct);
5558
5559 void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
5560 {
5561         struct nand_device *base = &chip->base;
5562
5563         nand_ecc_sw_hamming_cleanup_ctx(base);
5564 }
5565 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
5566
5567 int rawnand_sw_bch_init(struct nand_chip *chip)
5568 {
5569         struct nand_device *base = &chip->base;
5570         const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
5571         int ret;
5572
5573         base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5574         base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
5575         base->ecc.user_conf.step_size = chip->ecc.size;
5576         base->ecc.user_conf.strength = chip->ecc.strength;
5577
5578         ret = nand_ecc_sw_bch_init_ctx(base);
5579         if (ret)
5580                 return ret;
5581
5582         chip->ecc.size = ecc_conf->step_size;
5583         chip->ecc.strength = ecc_conf->strength;
5584         chip->ecc.total = base->ecc.ctx.total;
5585         chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5586         chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5587
5588         return 0;
5589 }
5590 EXPORT_SYMBOL(rawnand_sw_bch_init);
5591
5592 static int rawnand_sw_bch_calculate(struct nand_chip *chip,
5593                                     const unsigned char *buf,
5594                                     unsigned char *code)
5595 {
5596         struct nand_device *base = &chip->base;
5597
5598         return nand_ecc_sw_bch_calculate(base, buf, code);
5599 }
5600
5601 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
5602                            unsigned char *read_ecc, unsigned char *calc_ecc)
5603 {
5604         struct nand_device *base = &chip->base;
5605
5606         return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
5607 }
5608 EXPORT_SYMBOL(rawnand_sw_bch_correct);
5609
5610 void rawnand_sw_bch_cleanup(struct nand_chip *chip)
5611 {
5612         struct nand_device *base = &chip->base;
5613
5614         nand_ecc_sw_bch_cleanup_ctx(base);
5615 }
5616 EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
5617
5618 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5619 {
5620         struct nand_ecc_ctrl *ecc = &chip->ecc;
5621
5622         switch (ecc->placement) {
5623         case NAND_ECC_PLACEMENT_UNKNOWN:
5624         case NAND_ECC_PLACEMENT_OOB:
5625                 /* Use standard hwecc read page function? */
5626                 if (!ecc->read_page)
5627                         ecc->read_page = nand_read_page_hwecc;
5628                 if (!ecc->write_page)
5629                         ecc->write_page = nand_write_page_hwecc;
5630                 if (!ecc->read_page_raw)
5631                         ecc->read_page_raw = nand_read_page_raw;
5632                 if (!ecc->write_page_raw)
5633                         ecc->write_page_raw = nand_write_page_raw;
5634                 if (!ecc->read_oob)
5635                         ecc->read_oob = nand_read_oob_std;
5636                 if (!ecc->write_oob)
5637                         ecc->write_oob = nand_write_oob_std;
5638                 if (!ecc->read_subpage)
5639                         ecc->read_subpage = nand_read_subpage;
5640                 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5641                         ecc->write_subpage = nand_write_subpage_hwecc;
5642                 fallthrough;
5643
5644         case NAND_ECC_PLACEMENT_INTERLEAVED:
5645                 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5646                     (!ecc->read_page ||
5647                      ecc->read_page == nand_read_page_hwecc ||
5648                      !ecc->write_page ||
5649                      ecc->write_page == nand_write_page_hwecc)) {
5650                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5651                         return -EINVAL;
5652                 }
5653                 /* Use standard syndrome read/write page function? */
5654                 if (!ecc->read_page)
5655                         ecc->read_page = nand_read_page_syndrome;
5656                 if (!ecc->write_page)
5657                         ecc->write_page = nand_write_page_syndrome;
5658                 if (!ecc->read_page_raw)
5659                         ecc->read_page_raw = nand_read_page_raw_syndrome;
5660                 if (!ecc->write_page_raw)
5661                         ecc->write_page_raw = nand_write_page_raw_syndrome;
5662                 if (!ecc->read_oob)
5663                         ecc->read_oob = nand_read_oob_syndrome;
5664                 if (!ecc->write_oob)
5665                         ecc->write_oob = nand_write_oob_syndrome;
5666                 break;
5667
5668         default:
5669                 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5670                         ecc->placement);
5671                 return -EINVAL;
5672         }
5673
5674         return 0;
5675 }
5676
5677 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5678 {
5679         struct mtd_info *mtd = nand_to_mtd(chip);
5680         struct nand_device *nanddev = mtd_to_nanddev(mtd);
5681         struct nand_ecc_ctrl *ecc = &chip->ecc;
5682         int ret;
5683
5684         if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5685                 return -EINVAL;
5686
5687         switch (ecc->algo) {
5688         case NAND_ECC_ALGO_HAMMING:
5689                 ecc->calculate = rawnand_sw_hamming_calculate;
5690                 ecc->correct = rawnand_sw_hamming_correct;
5691                 ecc->read_page = nand_read_page_swecc;
5692                 ecc->read_subpage = nand_read_subpage;
5693                 ecc->write_page = nand_write_page_swecc;
5694                 if (!ecc->read_page_raw)
5695                         ecc->read_page_raw = nand_read_page_raw;
5696                 if (!ecc->write_page_raw)
5697                         ecc->write_page_raw = nand_write_page_raw;
5698                 ecc->read_oob = nand_read_oob_std;
5699                 ecc->write_oob = nand_write_oob_std;
5700                 if (!ecc->size)
5701                         ecc->size = 256;
5702                 ecc->bytes = 3;
5703                 ecc->strength = 1;
5704
5705                 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5706                         ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5707
5708                 ret = rawnand_sw_hamming_init(chip);
5709                 if (ret) {
5710                         WARN(1, "Hamming ECC initialization failed!\n");
5711                         return ret;
5712                 }
5713
5714                 return 0;
5715         case NAND_ECC_ALGO_BCH:
5716                 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
5717                         WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5718                         return -EINVAL;
5719                 }
5720                 ecc->calculate = rawnand_sw_bch_calculate;
5721                 ecc->correct = rawnand_sw_bch_correct;
5722                 ecc->read_page = nand_read_page_swecc;
5723                 ecc->read_subpage = nand_read_subpage;
5724                 ecc->write_page = nand_write_page_swecc;
5725                 if (!ecc->read_page_raw)
5726                         ecc->read_page_raw = nand_read_page_raw;
5727                 if (!ecc->write_page_raw)
5728                         ecc->write_page_raw = nand_write_page_raw;
5729                 ecc->read_oob = nand_read_oob_std;
5730                 ecc->write_oob = nand_write_oob_std;
5731
5732                 /*
5733                  * We can only maximize ECC config when the default layout is
5734                  * used, otherwise we don't know how many bytes can really be
5735                  * used.
5736                  */
5737                 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
5738                     mtd->ooblayout != nand_get_large_page_ooblayout())
5739                         nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
5740
5741                 ret = rawnand_sw_bch_init(chip);
5742                 if (ret) {
5743                         WARN(1, "BCH ECC initialization failed!\n");
5744                         return ret;
5745                 }
5746
5747                 return 0;
5748         default:
5749                 WARN(1, "Unsupported ECC algorithm!\n");
5750                 return -EINVAL;
5751         }
5752 }
5753
5754 /**
5755  * nand_check_ecc_caps - check the sanity of preset ECC settings
5756  * @chip: nand chip info structure
5757  * @caps: ECC caps info structure
5758  * @oobavail: OOB size that the ECC engine can use
5759  *
5760  * When ECC step size and strength are already set, check if they are supported
5761  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5762  * On success, the calculated ECC bytes is set.
5763  */
5764 static int
5765 nand_check_ecc_caps(struct nand_chip *chip,
5766                     const struct nand_ecc_caps *caps, int oobavail)
5767 {
5768         struct mtd_info *mtd = nand_to_mtd(chip);
5769         const struct nand_ecc_step_info *stepinfo;
5770         int preset_step = chip->ecc.size;
5771         int preset_strength = chip->ecc.strength;
5772         int ecc_bytes, nsteps = mtd->writesize / preset_step;
5773         int i, j;
5774
5775         for (i = 0; i < caps->nstepinfos; i++) {
5776                 stepinfo = &caps->stepinfos[i];
5777
5778                 if (stepinfo->stepsize != preset_step)
5779                         continue;
5780
5781                 for (j = 0; j < stepinfo->nstrengths; j++) {
5782                         if (stepinfo->strengths[j] != preset_strength)
5783                                 continue;
5784
5785                         ecc_bytes = caps->calc_ecc_bytes(preset_step,
5786                                                          preset_strength);
5787                         if (WARN_ON_ONCE(ecc_bytes < 0))
5788                                 return ecc_bytes;
5789
5790                         if (ecc_bytes * nsteps > oobavail) {
5791                                 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5792                                        preset_step, preset_strength);
5793                                 return -ENOSPC;
5794                         }
5795
5796                         chip->ecc.bytes = ecc_bytes;
5797
5798                         return 0;
5799                 }
5800         }
5801
5802         pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5803                preset_step, preset_strength);
5804
5805         return -ENOTSUPP;
5806 }
5807
5808 /**
5809  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5810  * @chip: nand chip info structure
5811  * @caps: ECC engine caps info structure
5812  * @oobavail: OOB size that the ECC engine can use
5813  *
5814  * If a chip's ECC requirement is provided, try to meet it with the least
5815  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5816  * On success, the chosen ECC settings are set.
5817  */
5818 static int
5819 nand_match_ecc_req(struct nand_chip *chip,
5820                    const struct nand_ecc_caps *caps, int oobavail)
5821 {
5822         const struct nand_ecc_props *requirements =
5823                 nanddev_get_ecc_requirements(&chip->base);
5824         struct mtd_info *mtd = nand_to_mtd(chip);
5825         const struct nand_ecc_step_info *stepinfo;
5826         int req_step = requirements->step_size;
5827         int req_strength = requirements->strength;
5828         int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5829         int best_step, best_strength, best_ecc_bytes;
5830         int best_ecc_bytes_total = INT_MAX;
5831         int i, j;
5832
5833         /* No information provided by the NAND chip */
5834         if (!req_step || !req_strength)
5835                 return -ENOTSUPP;
5836
5837         /* number of correctable bits the chip requires in a page */
5838         req_corr = mtd->writesize / req_step * req_strength;
5839
5840         for (i = 0; i < caps->nstepinfos; i++) {
5841                 stepinfo = &caps->stepinfos[i];
5842                 step_size = stepinfo->stepsize;
5843
5844                 for (j = 0; j < stepinfo->nstrengths; j++) {
5845                         strength = stepinfo->strengths[j];
5846
5847                         /*
5848                          * If both step size and strength are smaller than the
5849                          * chip's requirement, it is not easy to compare the
5850                          * resulted reliability.
5851                          */
5852                         if (step_size < req_step && strength < req_strength)
5853                                 continue;
5854
5855                         if (mtd->writesize % step_size)
5856                                 continue;
5857
5858                         nsteps = mtd->writesize / step_size;
5859
5860                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5861                         if (WARN_ON_ONCE(ecc_bytes < 0))
5862                                 continue;
5863                         ecc_bytes_total = ecc_bytes * nsteps;
5864
5865                         if (ecc_bytes_total > oobavail ||
5866                             strength * nsteps < req_corr)
5867                                 continue;
5868
5869                         /*
5870                          * We assume the best is to meet the chip's requrement
5871                          * with the least number of ECC bytes.
5872                          */
5873                         if (ecc_bytes_total < best_ecc_bytes_total) {
5874                                 best_ecc_bytes_total = ecc_bytes_total;
5875                                 best_step = step_size;
5876                                 best_strength = strength;
5877                                 best_ecc_bytes = ecc_bytes;
5878                         }
5879                 }
5880         }
5881
5882         if (best_ecc_bytes_total == INT_MAX)
5883                 return -ENOTSUPP;
5884
5885         chip->ecc.size = best_step;
5886         chip->ecc.strength = best_strength;
5887         chip->ecc.bytes = best_ecc_bytes;
5888
5889         return 0;
5890 }
5891
5892 /**
5893  * nand_maximize_ecc - choose the max ECC strength available
5894  * @chip: nand chip info structure
5895  * @caps: ECC engine caps info structure
5896  * @oobavail: OOB size that the ECC engine can use
5897  *
5898  * Choose the max ECC strength that is supported on the controller, and can fit
5899  * within the chip's OOB.  On success, the chosen ECC settings are set.
5900  */
5901 static int
5902 nand_maximize_ecc(struct nand_chip *chip,
5903                   const struct nand_ecc_caps *caps, int oobavail)
5904 {
5905         struct mtd_info *mtd = nand_to_mtd(chip);
5906         const struct nand_ecc_step_info *stepinfo;
5907         int step_size, strength, nsteps, ecc_bytes, corr;
5908         int best_corr = 0;
5909         int best_step = 0;
5910         int best_strength, best_ecc_bytes;
5911         int i, j;
5912
5913         for (i = 0; i < caps->nstepinfos; i++) {
5914                 stepinfo = &caps->stepinfos[i];
5915                 step_size = stepinfo->stepsize;
5916
5917                 /* If chip->ecc.size is already set, respect it */
5918                 if (chip->ecc.size && step_size != chip->ecc.size)
5919                         continue;
5920
5921                 for (j = 0; j < stepinfo->nstrengths; j++) {
5922                         strength = stepinfo->strengths[j];
5923
5924                         if (mtd->writesize % step_size)
5925                                 continue;
5926
5927                         nsteps = mtd->writesize / step_size;
5928
5929                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5930                         if (WARN_ON_ONCE(ecc_bytes < 0))
5931                                 continue;
5932
5933                         if (ecc_bytes * nsteps > oobavail)
5934                                 continue;
5935
5936                         corr = strength * nsteps;
5937
5938                         /*
5939                          * If the number of correctable bits is the same,
5940                          * bigger step_size has more reliability.
5941                          */
5942                         if (corr > best_corr ||
5943                             (corr == best_corr && step_size > best_step)) {
5944                                 best_corr = corr;
5945                                 best_step = step_size;
5946                                 best_strength = strength;
5947                                 best_ecc_bytes = ecc_bytes;
5948                         }
5949                 }
5950         }
5951
5952         if (!best_corr)
5953                 return -ENOTSUPP;
5954
5955         chip->ecc.size = best_step;
5956         chip->ecc.strength = best_strength;
5957         chip->ecc.bytes = best_ecc_bytes;
5958
5959         return 0;
5960 }
5961
5962 /**
5963  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5964  * @chip: nand chip info structure
5965  * @caps: ECC engine caps info structure
5966  * @oobavail: OOB size that the ECC engine can use
5967  *
5968  * Choose the ECC configuration according to following logic.
5969  *
5970  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5971  *    then check if it is supported by this controller.
5972  * 2. If the user provided the nand-ecc-maximize property, then select maximum
5973  *    ECC strength.
5974  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5975  *    to the chip's requirement. If available OOB size can't fit the chip
5976  *    requirement then fallback to the maximum ECC step size and ECC strength.
5977  *
5978  * On success, the chosen ECC settings are set.
5979  */
5980 int nand_ecc_choose_conf(struct nand_chip *chip,
5981                          const struct nand_ecc_caps *caps, int oobavail)
5982 {
5983         struct mtd_info *mtd = nand_to_mtd(chip);
5984         struct nand_device *nanddev = mtd_to_nanddev(mtd);
5985
5986         if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5987                 return -EINVAL;
5988
5989         if (chip->ecc.size && chip->ecc.strength)
5990                 return nand_check_ecc_caps(chip, caps, oobavail);
5991
5992         if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5993                 return nand_maximize_ecc(chip, caps, oobavail);
5994
5995         if (!nand_match_ecc_req(chip, caps, oobavail))
5996                 return 0;
5997
5998         return nand_maximize_ecc(chip, caps, oobavail);
5999 }
6000 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
6001
6002 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
6003 {
6004         struct nand_chip *chip = container_of(nand, struct nand_chip,
6005                                               base);
6006         unsigned int eb = nanddev_pos_to_row(nand, pos);
6007         int ret;
6008
6009         eb >>= nand->rowconv.eraseblock_addr_shift;
6010
6011         nand_select_target(chip, pos->target);
6012         ret = nand_erase_op(chip, eb);
6013         nand_deselect_target(chip);
6014
6015         return ret;
6016 }
6017
6018 static int rawnand_markbad(struct nand_device *nand,
6019                            const struct nand_pos *pos)
6020 {
6021         struct nand_chip *chip = container_of(nand, struct nand_chip,
6022                                               base);
6023
6024         return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6025 }
6026
6027 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
6028 {
6029         struct nand_chip *chip = container_of(nand, struct nand_chip,
6030                                               base);
6031         int ret;
6032
6033         nand_select_target(chip, pos->target);
6034         ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6035         nand_deselect_target(chip);
6036
6037         return ret;
6038 }
6039
6040 static const struct nand_ops rawnand_ops = {
6041         .erase = rawnand_erase,
6042         .markbad = rawnand_markbad,
6043         .isbad = rawnand_isbad,
6044 };
6045
6046 /**
6047  * nand_scan_tail - Scan for the NAND device
6048  * @chip: NAND chip object
6049  *
6050  * This is the second phase of the normal nand_scan() function. It fills out
6051  * all the uninitialized function pointers with the defaults and scans for a
6052  * bad block table if appropriate.
6053  */
6054 static int nand_scan_tail(struct nand_chip *chip)
6055 {
6056         struct mtd_info *mtd = nand_to_mtd(chip);
6057         struct nand_ecc_ctrl *ecc = &chip->ecc;
6058         int ret, i;
6059
6060         /* New bad blocks should be marked in OOB, flash-based BBT, or both */
6061         if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6062                    !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6063                 return -EINVAL;
6064         }
6065
6066         chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6067         if (!chip->data_buf)
6068                 return -ENOMEM;
6069
6070         /*
6071          * FIXME: some NAND manufacturer drivers expect the first die to be
6072          * selected when manufacturer->init() is called. They should be fixed
6073          * to explictly select the relevant die when interacting with the NAND
6074          * chip.
6075          */
6076         nand_select_target(chip, 0);
6077         ret = nand_manufacturer_init(chip);
6078         nand_deselect_target(chip);
6079         if (ret)
6080                 goto err_free_buf;
6081
6082         /* Set the internal oob buffer location, just after the page data */
6083         chip->oob_poi = chip->data_buf + mtd->writesize;
6084
6085         /*
6086          * If no default placement scheme is given, select an appropriate one.
6087          */
6088         if (!mtd->ooblayout &&
6089             !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6090               ecc->algo == NAND_ECC_ALGO_BCH) &&
6091             !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6092               ecc->algo == NAND_ECC_ALGO_HAMMING)) {
6093                 switch (mtd->oobsize) {
6094                 case 8:
6095                 case 16:
6096                         mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
6097                         break;
6098                 case 64:
6099                 case 128:
6100                         mtd_set_ooblayout(mtd,
6101                                           nand_get_large_page_hamming_ooblayout());
6102                         break;
6103                 default:
6104                         /*
6105                          * Expose the whole OOB area to users if ECC_NONE
6106                          * is passed. We could do that for all kind of
6107                          * ->oobsize, but we must keep the old large/small
6108                          * page with ECC layout when ->oobsize <= 128 for
6109                          * compatibility reasons.
6110                          */
6111                         if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
6112                                 mtd_set_ooblayout(mtd,
6113                                                   nand_get_large_page_ooblayout());
6114                                 break;
6115                         }
6116
6117                         WARN(1, "No oob scheme defined for oobsize %d\n",
6118                                 mtd->oobsize);
6119                         ret = -EINVAL;
6120                         goto err_nand_manuf_cleanup;
6121                 }
6122         }
6123
6124         /*
6125          * Check ECC mode, default to software if 3byte/512byte hardware ECC is
6126          * selected and we have 256 byte pagesize fallback to software ECC
6127          */
6128
6129         switch (ecc->engine_type) {
6130         case NAND_ECC_ENGINE_TYPE_ON_HOST:
6131                 ret = nand_set_ecc_on_host_ops(chip);
6132                 if (ret)
6133                         goto err_nand_manuf_cleanup;
6134
6135                 if (mtd->writesize >= ecc->size) {
6136                         if (!ecc->strength) {
6137                                 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6138                                 ret = -EINVAL;
6139                                 goto err_nand_manuf_cleanup;
6140                         }
6141                         break;
6142                 }
6143                 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6144                         ecc->size, mtd->writesize);
6145                 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
6146                 ecc->algo = NAND_ECC_ALGO_HAMMING;
6147                 fallthrough;
6148
6149         case NAND_ECC_ENGINE_TYPE_SOFT:
6150                 ret = nand_set_ecc_soft_ops(chip);
6151                 if (ret)
6152                         goto err_nand_manuf_cleanup;
6153                 break;
6154
6155         case NAND_ECC_ENGINE_TYPE_ON_DIE:
6156                 if (!ecc->read_page || !ecc->write_page) {
6157                         WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6158                         ret = -EINVAL;
6159                         goto err_nand_manuf_cleanup;
6160                 }
6161                 if (!ecc->read_oob)
6162                         ecc->read_oob = nand_read_oob_std;
6163                 if (!ecc->write_oob)
6164                         ecc->write_oob = nand_write_oob_std;
6165                 break;
6166
6167         case NAND_ECC_ENGINE_TYPE_NONE:
6168                 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
6169                 ecc->read_page = nand_read_page_raw;
6170                 ecc->write_page = nand_write_page_raw;
6171                 ecc->read_oob = nand_read_oob_std;
6172                 ecc->read_page_raw = nand_read_page_raw;
6173                 ecc->write_page_raw = nand_write_page_raw;
6174                 ecc->write_oob = nand_write_oob_std;
6175                 ecc->size = mtd->writesize;
6176                 ecc->bytes = 0;
6177                 ecc->strength = 0;
6178                 break;
6179
6180         default:
6181                 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
6182                 ret = -EINVAL;
6183                 goto err_nand_manuf_cleanup;
6184         }
6185
6186         if (ecc->correct || ecc->calculate) {
6187                 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6188                 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6189                 if (!ecc->calc_buf || !ecc->code_buf) {
6190                         ret = -ENOMEM;
6191                         goto err_nand_manuf_cleanup;
6192                 }
6193         }
6194
6195         /* For many systems, the standard OOB write also works for raw */
6196         if (!ecc->read_oob_raw)
6197                 ecc->read_oob_raw = ecc->read_oob;
6198         if (!ecc->write_oob_raw)
6199                 ecc->write_oob_raw = ecc->write_oob;
6200
6201         /* propagate ecc info to mtd_info */
6202         mtd->ecc_strength = ecc->strength;
6203         mtd->ecc_step_size = ecc->size;
6204
6205         /*
6206          * Set the number of read / write steps for one page depending on ECC
6207          * mode.
6208          */
6209         if (!ecc->steps)
6210                 ecc->steps = mtd->writesize / ecc->size;
6211         if (ecc->steps * ecc->size != mtd->writesize) {
6212                 WARN(1, "Invalid ECC parameters\n");
6213                 ret = -EINVAL;
6214                 goto err_nand_manuf_cleanup;
6215         }
6216
6217         if (!ecc->total) {
6218                 ecc->total = ecc->steps * ecc->bytes;
6219                 chip->base.ecc.ctx.total = ecc->total;
6220         }
6221
6222         if (ecc->total > mtd->oobsize) {
6223                 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6224                 ret = -EINVAL;
6225                 goto err_nand_manuf_cleanup;
6226         }
6227
6228         /*
6229          * The number of bytes available for a client to place data into
6230          * the out of band area.
6231          */
6232         ret = mtd_ooblayout_count_freebytes(mtd);
6233         if (ret < 0)
6234                 ret = 0;
6235
6236         mtd->oobavail = ret;
6237
6238         /* ECC sanity check: warn if it's too weak */
6239         if (!nand_ecc_is_strong_enough(&chip->base))
6240                 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
6241                         mtd->name, chip->ecc.strength, chip->ecc.size,
6242                         nanddev_get_ecc_requirements(&chip->base)->strength,
6243                         nanddev_get_ecc_requirements(&chip->base)->step_size);
6244
6245         /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
6246         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6247                 switch (ecc->steps) {
6248                 case 2:
6249                         mtd->subpage_sft = 1;
6250                         break;
6251                 case 4:
6252                 case 8:
6253                 case 16:
6254                         mtd->subpage_sft = 2;
6255                         break;
6256                 }
6257         }
6258         chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6259
6260         /* Invalidate the pagebuffer reference */
6261         chip->pagecache.page = -1;
6262
6263         /* Large page NAND with SOFT_ECC should support subpage reads */
6264         switch (ecc->engine_type) {
6265         case NAND_ECC_ENGINE_TYPE_SOFT:
6266                 if (chip->page_shift > 9)
6267                         chip->options |= NAND_SUBPAGE_READ;
6268                 break;
6269
6270         default:
6271                 break;
6272         }
6273
6274         ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
6275         if (ret)
6276                 goto err_nand_manuf_cleanup;
6277
6278         /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
6279         if (chip->options & NAND_ROM)
6280                 mtd->flags = MTD_CAP_ROM;
6281
6282         /* Fill in remaining MTD driver data */
6283         mtd->_erase = nand_erase;
6284         mtd->_point = NULL;
6285         mtd->_unpoint = NULL;
6286         mtd->_panic_write = panic_nand_write;
6287         mtd->_read_oob = nand_read_oob;
6288         mtd->_write_oob = nand_write_oob;
6289         mtd->_sync = nand_sync;
6290         mtd->_lock = nand_lock;
6291         mtd->_unlock = nand_unlock;
6292         mtd->_suspend = nand_suspend;
6293         mtd->_resume = nand_resume;
6294         mtd->_reboot = nand_shutdown;
6295         mtd->_block_isreserved = nand_block_isreserved;
6296         mtd->_block_isbad = nand_block_isbad;
6297         mtd->_block_markbad = nand_block_markbad;
6298         mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
6299
6300         /*
6301          * Initialize bitflip_threshold to its default prior scan_bbt() call.
6302          * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
6303          * properly set.
6304          */
6305         if (!mtd->bitflip_threshold)
6306                 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6307
6308         /* Find the fastest data interface for this chip */
6309         ret = nand_choose_interface_config(chip);
6310         if (ret)
6311                 goto err_nanddev_cleanup;
6312
6313         /* Enter fastest possible mode on all dies. */
6314         for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
6315                 ret = nand_setup_interface(chip, i);
6316                 if (ret)
6317                         goto err_free_interface_config;
6318         }
6319
6320         /*
6321          * Look for secure regions in the NAND chip. These regions are supposed
6322          * to be protected by a secure element like Trustzone. So the read/write
6323          * accesses to these regions will be blocked in the runtime by this
6324          * driver.
6325          */
6326         ret = of_get_nand_secure_regions(chip);
6327         if (ret)
6328                 goto err_free_interface_config;
6329
6330         /* Check, if we should skip the bad block table scan */
6331         if (chip->options & NAND_SKIP_BBTSCAN)
6332                 return 0;
6333
6334         /* Build bad block table */
6335         ret = nand_create_bbt(chip);
6336         if (ret)
6337                 goto err_free_secure_regions;
6338
6339         return 0;
6340
6341 err_free_secure_regions:
6342         kfree(chip->secure_regions);
6343
6344 err_free_interface_config:
6345         kfree(chip->best_interface_config);
6346
6347 err_nanddev_cleanup:
6348         nanddev_cleanup(&chip->base);
6349
6350 err_nand_manuf_cleanup:
6351         nand_manufacturer_cleanup(chip);
6352
6353 err_free_buf:
6354         kfree(chip->data_buf);
6355         kfree(ecc->code_buf);
6356         kfree(ecc->calc_buf);
6357
6358         return ret;
6359 }
6360
6361 static int nand_attach(struct nand_chip *chip)
6362 {
6363         if (chip->controller->ops && chip->controller->ops->attach_chip)
6364                 return chip->controller->ops->attach_chip(chip);
6365
6366         return 0;
6367 }
6368
6369 static void nand_detach(struct nand_chip *chip)
6370 {
6371         if (chip->controller->ops && chip->controller->ops->detach_chip)
6372                 chip->controller->ops->detach_chip(chip);
6373 }
6374
6375 /**
6376  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
6377  * @chip: NAND chip object
6378  * @maxchips: number of chips to scan for.
6379  * @ids: optional flash IDs table
6380  *
6381  * This fills out all the uninitialized function pointers with the defaults.
6382  * The flash ID is read and the mtd/chip structures are filled with the
6383  * appropriate values.
6384  */
6385 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6386                        struct nand_flash_dev *ids)
6387 {
6388         int ret;
6389
6390         if (!maxchips)
6391                 return -EINVAL;
6392
6393         ret = nand_scan_ident(chip, maxchips, ids);
6394         if (ret)
6395                 return ret;
6396
6397         ret = nand_attach(chip);
6398         if (ret)
6399                 goto cleanup_ident;
6400
6401         ret = nand_scan_tail(chip);
6402         if (ret)
6403                 goto detach_chip;
6404
6405         return 0;
6406
6407 detach_chip:
6408         nand_detach(chip);
6409 cleanup_ident:
6410         nand_scan_ident_cleanup(chip);
6411
6412         return ret;
6413 }
6414 EXPORT_SYMBOL(nand_scan_with_ids);
6415
6416 /**
6417  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6418  * @chip: NAND chip object
6419  */
6420 void nand_cleanup(struct nand_chip *chip)
6421 {
6422         if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
6423                 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
6424                         rawnand_sw_hamming_cleanup(chip);
6425                 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
6426                         rawnand_sw_bch_cleanup(chip);
6427         }
6428
6429         nanddev_cleanup(&chip->base);
6430
6431         /* Free secure regions data */
6432         kfree(chip->secure_regions);
6433
6434         /* Free bad block table memory */
6435         kfree(chip->bbt);
6436         kfree(chip->data_buf);
6437         kfree(chip->ecc.code_buf);
6438         kfree(chip->ecc.calc_buf);
6439
6440         /* Free bad block descriptor memory */
6441         if (chip->badblock_pattern && chip->badblock_pattern->options
6442                         & NAND_BBT_DYNAMICSTRUCT)
6443                 kfree(chip->badblock_pattern);
6444
6445         /* Free the data interface */
6446         kfree(chip->best_interface_config);
6447
6448         /* Free manufacturer priv data. */
6449         nand_manufacturer_cleanup(chip);
6450
6451         /* Free controller specific allocations after chip identification */
6452         nand_detach(chip);
6453
6454         /* Free identification phase allocations */
6455         nand_scan_ident_cleanup(chip);
6456 }
6457
6458 EXPORT_SYMBOL_GPL(nand_cleanup);
6459
6460 MODULE_LICENSE("GPL");
6461 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6462 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6463 MODULE_DESCRIPTION("Generic NAND flash driver code");