clk: mediatek: using CLK_MUX_ROUND_CLOSEST for the clock of dpi1_sel
[linux-2.6-microblaze.git] / drivers / mtd / nand / raw / nand_base.c
1 /*
2  *  Overview:
3  *   This is the generic MTD driver for NAND flash devices. It should be
4  *   capable of working with almost all NAND chips currently available.
5  *
6  *      Additional technical information is available on
7  *      http://www.linux-mtd.infradead.org/doc/nand.html
8  *
9  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10  *                2002-2006 Thomas Gleixner (tglx@linutronix.de)
11  *
12  *  Credits:
13  *      David Woodhouse for adding multichip support
14  *
15  *      Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16  *      rework for 2K page size chips
17  *
18  *  TODO:
19  *      Enable cached programming for 2k page size chips
20  *      Check, if mtd->ecctype should be set to MTD_ECC_HW
21  *      if we have HW ECC support.
22  *      BBT table is not serialized, has to be fixed
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand_ecc.h>
42 #include <linux/mtd/nand_bch.h>
43 #include <linux/interrupt.h>
44 #include <linux/bitops.h>
45 #include <linux/io.h>
46 #include <linux/mtd/partitions.h>
47 #include <linux/of.h>
48 #include <linux/gpio/consumer.h>
49
50 #include "internals.h"
51
52 /* Define default oob placement schemes for large and small page devices */
53 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
54                                  struct mtd_oob_region *oobregion)
55 {
56         struct nand_chip *chip = mtd_to_nand(mtd);
57         struct nand_ecc_ctrl *ecc = &chip->ecc;
58
59         if (section > 1)
60                 return -ERANGE;
61
62         if (!section) {
63                 oobregion->offset = 0;
64                 if (mtd->oobsize == 16)
65                         oobregion->length = 4;
66                 else
67                         oobregion->length = 3;
68         } else {
69                 if (mtd->oobsize == 8)
70                         return -ERANGE;
71
72                 oobregion->offset = 6;
73                 oobregion->length = ecc->total - 4;
74         }
75
76         return 0;
77 }
78
79 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
80                                   struct mtd_oob_region *oobregion)
81 {
82         if (section > 1)
83                 return -ERANGE;
84
85         if (mtd->oobsize == 16) {
86                 if (section)
87                         return -ERANGE;
88
89                 oobregion->length = 8;
90                 oobregion->offset = 8;
91         } else {
92                 oobregion->length = 2;
93                 if (!section)
94                         oobregion->offset = 3;
95                 else
96                         oobregion->offset = 6;
97         }
98
99         return 0;
100 }
101
102 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
103         .ecc = nand_ooblayout_ecc_sp,
104         .free = nand_ooblayout_free_sp,
105 };
106 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
107
108 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
109                                  struct mtd_oob_region *oobregion)
110 {
111         struct nand_chip *chip = mtd_to_nand(mtd);
112         struct nand_ecc_ctrl *ecc = &chip->ecc;
113
114         if (section || !ecc->total)
115                 return -ERANGE;
116
117         oobregion->length = ecc->total;
118         oobregion->offset = mtd->oobsize - oobregion->length;
119
120         return 0;
121 }
122
123 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
124                                   struct mtd_oob_region *oobregion)
125 {
126         struct nand_chip *chip = mtd_to_nand(mtd);
127         struct nand_ecc_ctrl *ecc = &chip->ecc;
128
129         if (section)
130                 return -ERANGE;
131
132         oobregion->length = mtd->oobsize - ecc->total - 2;
133         oobregion->offset = 2;
134
135         return 0;
136 }
137
138 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
139         .ecc = nand_ooblayout_ecc_lp,
140         .free = nand_ooblayout_free_lp,
141 };
142 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
143
144 /*
145  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
146  * are placed at a fixed offset.
147  */
148 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
149                                          struct mtd_oob_region *oobregion)
150 {
151         struct nand_chip *chip = mtd_to_nand(mtd);
152         struct nand_ecc_ctrl *ecc = &chip->ecc;
153
154         if (section)
155                 return -ERANGE;
156
157         switch (mtd->oobsize) {
158         case 64:
159                 oobregion->offset = 40;
160                 break;
161         case 128:
162                 oobregion->offset = 80;
163                 break;
164         default:
165                 return -EINVAL;
166         }
167
168         oobregion->length = ecc->total;
169         if (oobregion->offset + oobregion->length > mtd->oobsize)
170                 return -ERANGE;
171
172         return 0;
173 }
174
175 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
176                                           struct mtd_oob_region *oobregion)
177 {
178         struct nand_chip *chip = mtd_to_nand(mtd);
179         struct nand_ecc_ctrl *ecc = &chip->ecc;
180         int ecc_offset = 0;
181
182         if (section < 0 || section > 1)
183                 return -ERANGE;
184
185         switch (mtd->oobsize) {
186         case 64:
187                 ecc_offset = 40;
188                 break;
189         case 128:
190                 ecc_offset = 80;
191                 break;
192         default:
193                 return -EINVAL;
194         }
195
196         if (section == 0) {
197                 oobregion->offset = 2;
198                 oobregion->length = ecc_offset - 2;
199         } else {
200                 oobregion->offset = ecc_offset + ecc->total;
201                 oobregion->length = mtd->oobsize - oobregion->offset;
202         }
203
204         return 0;
205 }
206
207 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
208         .ecc = nand_ooblayout_ecc_lp_hamming,
209         .free = nand_ooblayout_free_lp_hamming,
210 };
211
212 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
213 {
214         int ret = 0;
215
216         /* Start address must align on block boundary */
217         if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218                 pr_debug("%s: unaligned address\n", __func__);
219                 ret = -EINVAL;
220         }
221
222         /* Length must align on block boundary */
223         if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224                 pr_debug("%s: length not block aligned\n", __func__);
225                 ret = -EINVAL;
226         }
227
228         return ret;
229 }
230
231 /**
232  * nand_select_target() - Select a NAND target (A.K.A. die)
233  * @chip: NAND chip object
234  * @cs: the CS line to select. Note that this CS id is always from the chip
235  *      PoV, not the controller one
236  *
237  * Select a NAND target so that further operations executed on @chip go to the
238  * selected NAND target.
239  */
240 void nand_select_target(struct nand_chip *chip, unsigned int cs)
241 {
242         /*
243          * cs should always lie between 0 and chip->numchips, when that's not
244          * the case it's a bug and the caller should be fixed.
245          */
246         if (WARN_ON(cs > chip->numchips))
247                 return;
248
249         chip->cur_cs = cs;
250
251         if (chip->legacy.select_chip)
252                 chip->legacy.select_chip(chip, cs);
253 }
254 EXPORT_SYMBOL_GPL(nand_select_target);
255
256 /**
257  * nand_deselect_target() - Deselect the currently selected target
258  * @chip: NAND chip object
259  *
260  * Deselect the currently selected NAND target. The result of operations
261  * executed on @chip after the target has been deselected is undefined.
262  */
263 void nand_deselect_target(struct nand_chip *chip)
264 {
265         if (chip->legacy.select_chip)
266                 chip->legacy.select_chip(chip, -1);
267
268         chip->cur_cs = -1;
269 }
270 EXPORT_SYMBOL_GPL(nand_deselect_target);
271
272 /**
273  * nand_release_device - [GENERIC] release chip
274  * @chip: NAND chip object
275  *
276  * Release chip lock and wake up anyone waiting on the device.
277  */
278 static void nand_release_device(struct nand_chip *chip)
279 {
280         /* Release the controller and the chip */
281         spin_lock(&chip->controller->lock);
282         chip->controller->active = NULL;
283         chip->state = FL_READY;
284         wake_up(&chip->controller->wq);
285         spin_unlock(&chip->controller->lock);
286 }
287
288 /**
289  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
290  * @chip: NAND chip object
291  * @ofs: offset from device start
292  *
293  * Check, if the block is bad.
294  */
295 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
296 {
297         struct mtd_info *mtd = nand_to_mtd(chip);
298         int page, page_end, res;
299         u8 bad;
300
301         if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
302                 ofs += mtd->erasesize - mtd->writesize;
303
304         page = (int)(ofs >> chip->page_shift) & chip->pagemask;
305         page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
306
307         for (; page < page_end; page++) {
308                 res = chip->ecc.read_oob(chip, page);
309                 if (res < 0)
310                         return res;
311
312                 bad = chip->oob_poi[chip->badblockpos];
313
314                 if (likely(chip->badblockbits == 8))
315                         res = bad != 0xFF;
316                 else
317                         res = hweight8(bad) < chip->badblockbits;
318                 if (res)
319                         return res;
320         }
321
322         return 0;
323 }
324
325 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
326 {
327         if (chip->legacy.block_bad)
328                 return chip->legacy.block_bad(chip, ofs);
329
330         return nand_block_bad(chip, ofs);
331 }
332
333 /**
334  * panic_nand_get_device - [GENERIC] Get chip for selected access
335  * @chip: the nand chip descriptor
336  * @new_state: the state which is requested
337  *
338  * Used when in panic, no locks are taken.
339  */
340 static void panic_nand_get_device(struct nand_chip *chip, int new_state)
341 {
342         /* Hardware controller shared among independent devices */
343         chip->controller->active = chip;
344         chip->state = new_state;
345 }
346
347 /**
348  * nand_get_device - [GENERIC] Get chip for selected access
349  * @chip: NAND chip structure
350  * @new_state: the state which is requested
351  *
352  * Get the device and lock it for exclusive access
353  */
354 static int
355 nand_get_device(struct nand_chip *chip, int new_state)
356 {
357         spinlock_t *lock = &chip->controller->lock;
358         wait_queue_head_t *wq = &chip->controller->wq;
359         DECLARE_WAITQUEUE(wait, current);
360 retry:
361         spin_lock(lock);
362
363         /* Hardware controller shared among independent devices */
364         if (!chip->controller->active)
365                 chip->controller->active = chip;
366
367         if (chip->controller->active == chip && chip->state == FL_READY) {
368                 chip->state = new_state;
369                 spin_unlock(lock);
370                 return 0;
371         }
372         if (new_state == FL_PM_SUSPENDED) {
373                 if (chip->controller->active->state == FL_PM_SUSPENDED) {
374                         chip->state = FL_PM_SUSPENDED;
375                         spin_unlock(lock);
376                         return 0;
377                 }
378         }
379         set_current_state(TASK_UNINTERRUPTIBLE);
380         add_wait_queue(wq, &wait);
381         spin_unlock(lock);
382         schedule();
383         remove_wait_queue(wq, &wait);
384         goto retry;
385 }
386
387 /**
388  * nand_check_wp - [GENERIC] check if the chip is write protected
389  * @chip: NAND chip object
390  *
391  * Check, if the device is write protected. The function expects, that the
392  * device is already selected.
393  */
394 static int nand_check_wp(struct nand_chip *chip)
395 {
396         u8 status;
397         int ret;
398
399         /* Broken xD cards report WP despite being writable */
400         if (chip->options & NAND_BROKEN_XD)
401                 return 0;
402
403         /* Check the WP bit */
404         ret = nand_status_op(chip, &status);
405         if (ret)
406                 return ret;
407
408         return status & NAND_STATUS_WP ? 0 : 1;
409 }
410
411 /**
412  * nand_fill_oob - [INTERN] Transfer client buffer to oob
413  * @oob: oob data buffer
414  * @len: oob data write length
415  * @ops: oob ops structure
416  */
417 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
418                               struct mtd_oob_ops *ops)
419 {
420         struct mtd_info *mtd = nand_to_mtd(chip);
421         int ret;
422
423         /*
424          * Initialise to all 0xFF, to avoid the possibility of left over OOB
425          * data from a previous OOB read.
426          */
427         memset(chip->oob_poi, 0xff, mtd->oobsize);
428
429         switch (ops->mode) {
430
431         case MTD_OPS_PLACE_OOB:
432         case MTD_OPS_RAW:
433                 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
434                 return oob + len;
435
436         case MTD_OPS_AUTO_OOB:
437                 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
438                                                   ops->ooboffs, len);
439                 BUG_ON(ret);
440                 return oob + len;
441
442         default:
443                 BUG();
444         }
445         return NULL;
446 }
447
448 /**
449  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
450  * @chip: NAND chip object
451  * @to: offset to write to
452  * @ops: oob operation description structure
453  *
454  * NAND write out-of-band.
455  */
456 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
457                              struct mtd_oob_ops *ops)
458 {
459         struct mtd_info *mtd = nand_to_mtd(chip);
460         int chipnr, page, status, len;
461
462         pr_debug("%s: to = 0x%08x, len = %i\n",
463                          __func__, (unsigned int)to, (int)ops->ooblen);
464
465         len = mtd_oobavail(mtd, ops);
466
467         /* Do not allow write past end of page */
468         if ((ops->ooboffs + ops->ooblen) > len) {
469                 pr_debug("%s: attempt to write past end of page\n",
470                                 __func__);
471                 return -EINVAL;
472         }
473
474         chipnr = (int)(to >> chip->chip_shift);
475
476         /*
477          * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
478          * of my DiskOnChip 2000 test units) will clear the whole data page too
479          * if we don't do this. I have no clue why, but I seem to have 'fixed'
480          * it in the doc2000 driver in August 1999.  dwmw2.
481          */
482         nand_reset(chip, chipnr);
483
484         nand_select_target(chip, chipnr);
485
486         /* Shift to get page */
487         page = (int)(to >> chip->page_shift);
488
489         /* Check, if it is write protected */
490         if (nand_check_wp(chip)) {
491                 nand_deselect_target(chip);
492                 return -EROFS;
493         }
494
495         /* Invalidate the page cache, if we write to the cached page */
496         if (page == chip->pagebuf)
497                 chip->pagebuf = -1;
498
499         nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
500
501         if (ops->mode == MTD_OPS_RAW)
502                 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
503         else
504                 status = chip->ecc.write_oob(chip, page & chip->pagemask);
505
506         nand_deselect_target(chip);
507
508         if (status)
509                 return status;
510
511         ops->oobretlen = ops->ooblen;
512
513         return 0;
514 }
515
516 /**
517  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
518  * @chip: NAND chip object
519  * @ofs: offset from device start
520  *
521  * This is the default implementation, which can be overridden by a hardware
522  * specific driver. It provides the details for writing a bad block marker to a
523  * block.
524  */
525 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
526 {
527         struct mtd_info *mtd = nand_to_mtd(chip);
528         struct mtd_oob_ops ops;
529         uint8_t buf[2] = { 0, 0 };
530         int ret = 0, res, i = 0;
531
532         memset(&ops, 0, sizeof(ops));
533         ops.oobbuf = buf;
534         ops.ooboffs = chip->badblockpos;
535         if (chip->options & NAND_BUSWIDTH_16) {
536                 ops.ooboffs &= ~0x01;
537                 ops.len = ops.ooblen = 2;
538         } else {
539                 ops.len = ops.ooblen = 1;
540         }
541         ops.mode = MTD_OPS_PLACE_OOB;
542
543         /* Write to first/last page(s) if necessary */
544         if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
545                 ofs += mtd->erasesize - mtd->writesize;
546         do {
547                 res = nand_do_write_oob(chip, ofs, &ops);
548                 if (!ret)
549                         ret = res;
550
551                 i++;
552                 ofs += mtd->writesize;
553         } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
554
555         return ret;
556 }
557
558 /**
559  * nand_markbad_bbm - mark a block by updating the BBM
560  * @chip: NAND chip object
561  * @ofs: offset of the block to mark bad
562  */
563 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
564 {
565         if (chip->legacy.block_markbad)
566                 return chip->legacy.block_markbad(chip, ofs);
567
568         return nand_default_block_markbad(chip, ofs);
569 }
570
571 /**
572  * nand_block_markbad_lowlevel - mark a block bad
573  * @chip: NAND chip object
574  * @ofs: offset from device start
575  *
576  * This function performs the generic NAND bad block marking steps (i.e., bad
577  * block table(s) and/or marker(s)). We only allow the hardware driver to
578  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
579  *
580  * We try operations in the following order:
581  *
582  *  (1) erase the affected block, to allow OOB marker to be written cleanly
583  *  (2) write bad block marker to OOB area of affected block (unless flag
584  *      NAND_BBT_NO_OOB_BBM is present)
585  *  (3) update the BBT
586  *
587  * Note that we retain the first error encountered in (2) or (3), finish the
588  * procedures, and dump the error in the end.
589 */
590 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
591 {
592         struct mtd_info *mtd = nand_to_mtd(chip);
593         int res, ret = 0;
594
595         if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
596                 struct erase_info einfo;
597
598                 /* Attempt erase before marking OOB */
599                 memset(&einfo, 0, sizeof(einfo));
600                 einfo.addr = ofs;
601                 einfo.len = 1ULL << chip->phys_erase_shift;
602                 nand_erase_nand(chip, &einfo, 0);
603
604                 /* Write bad block marker to OOB */
605                 nand_get_device(chip, FL_WRITING);
606                 ret = nand_markbad_bbm(chip, ofs);
607                 nand_release_device(chip);
608         }
609
610         /* Mark block bad in BBT */
611         if (chip->bbt) {
612                 res = nand_markbad_bbt(chip, ofs);
613                 if (!ret)
614                         ret = res;
615         }
616
617         if (!ret)
618                 mtd->ecc_stats.badblocks++;
619
620         return ret;
621 }
622
623 /**
624  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
625  * @mtd: MTD device structure
626  * @ofs: offset from device start
627  *
628  * Check if the block is marked as reserved.
629  */
630 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
631 {
632         struct nand_chip *chip = mtd_to_nand(mtd);
633
634         if (!chip->bbt)
635                 return 0;
636         /* Return info from the table */
637         return nand_isreserved_bbt(chip, ofs);
638 }
639
640 /**
641  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
642  * @chip: NAND chip object
643  * @ofs: offset from device start
644  * @allowbbt: 1, if its allowed to access the bbt area
645  *
646  * Check, if the block is bad. Either by reading the bad block table or
647  * calling of the scan function.
648  */
649 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
650 {
651         /* Return info from the table */
652         if (chip->bbt)
653                 return nand_isbad_bbt(chip, ofs, allowbbt);
654
655         return nand_isbad_bbm(chip, ofs);
656 }
657
658 /**
659  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
660  * @chip: NAND chip structure
661  * @timeout_ms: Timeout in ms
662  *
663  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
664  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
665  * returned.
666  *
667  * This helper is intended to be used when the controller does not have access
668  * to the NAND R/B pin.
669  *
670  * Be aware that calling this helper from an ->exec_op() implementation means
671  * ->exec_op() must be re-entrant.
672  *
673  * Return 0 if the NAND chip is ready, a negative error otherwise.
674  */
675 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
676 {
677         const struct nand_sdr_timings *timings;
678         u8 status = 0;
679         int ret;
680
681         if (!nand_has_exec_op(chip))
682                 return -ENOTSUPP;
683
684         /* Wait tWB before polling the STATUS reg. */
685         timings = nand_get_sdr_timings(&chip->data_interface);
686         ndelay(PSEC_TO_NSEC(timings->tWB_max));
687
688         ret = nand_status_op(chip, NULL);
689         if (ret)
690                 return ret;
691
692         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
693         do {
694                 ret = nand_read_data_op(chip, &status, sizeof(status), true);
695                 if (ret)
696                         break;
697
698                 if (status & NAND_STATUS_READY)
699                         break;
700
701                 /*
702                  * Typical lowest execution time for a tR on most NANDs is 10us,
703                  * use this as polling delay before doing something smarter (ie.
704                  * deriving a delay from the timeout value, timeout_ms/ratio).
705                  */
706                 udelay(10);
707         } while (time_before(jiffies, timeout_ms));
708
709         /*
710          * We have to exit READ_STATUS mode in order to read real data on the
711          * bus in case the WAITRDY instruction is preceding a DATA_IN
712          * instruction.
713          */
714         nand_exit_status_op(chip);
715
716         if (ret)
717                 return ret;
718
719         return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
720 };
721 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
722
723 /**
724  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
725  * @chip: NAND chip structure
726  * @gpiod: GPIO descriptor of R/B pin
727  * @timeout_ms: Timeout in ms
728  *
729  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
730  * whitin the specified timeout, -ETIMEDOUT is returned.
731  *
732  * This helper is intended to be used when the controller has access to the
733  * NAND R/B pin over GPIO.
734  *
735  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
736  */
737 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
738                       unsigned long timeout_ms)
739 {
740         /* Wait until R/B pin indicates chip is ready or timeout occurs */
741         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
742         do {
743                 if (gpiod_get_value_cansleep(gpiod))
744                         return 0;
745
746                 cond_resched();
747         } while (time_before(jiffies, timeout_ms));
748
749         return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
750 };
751 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
752
753 /**
754  * panic_nand_wait - [GENERIC] wait until the command is done
755  * @chip: NAND chip structure
756  * @timeo: timeout
757  *
758  * Wait for command done. This is a helper function for nand_wait used when
759  * we are in interrupt context. May happen when in panic and trying to write
760  * an oops through mtdoops.
761  */
762 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
763 {
764         int i;
765         for (i = 0; i < timeo; i++) {
766                 if (chip->legacy.dev_ready) {
767                         if (chip->legacy.dev_ready(chip))
768                                 break;
769                 } else {
770                         int ret;
771                         u8 status;
772
773                         ret = nand_read_data_op(chip, &status, sizeof(status),
774                                                 true);
775                         if (ret)
776                                 return;
777
778                         if (status & NAND_STATUS_READY)
779                                 break;
780                 }
781                 mdelay(1);
782         }
783 }
784
785 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
786 {
787         return (chip->parameters.supports_set_get_features &&
788                 test_bit(addr, chip->parameters.get_feature_list));
789 }
790
791 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
792 {
793         return (chip->parameters.supports_set_get_features &&
794                 test_bit(addr, chip->parameters.set_feature_list));
795 }
796
797 /**
798  * nand_reset_data_interface - Reset data interface and timings
799  * @chip: The NAND chip
800  * @chipnr: Internal die id
801  *
802  * Reset the Data interface and timings to ONFI mode 0.
803  *
804  * Returns 0 for success or negative error code otherwise.
805  */
806 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
807 {
808         int ret;
809
810         if (!nand_has_setup_data_iface(chip))
811                 return 0;
812
813         /*
814          * The ONFI specification says:
815          * "
816          * To transition from NV-DDR or NV-DDR2 to the SDR data
817          * interface, the host shall use the Reset (FFh) command
818          * using SDR timing mode 0. A device in any timing mode is
819          * required to recognize Reset (FFh) command issued in SDR
820          * timing mode 0.
821          * "
822          *
823          * Configure the data interface in SDR mode and set the
824          * timings to timing mode 0.
825          */
826
827         onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
828         ret = chip->controller->ops->setup_data_interface(chip, chipnr,
829                                                         &chip->data_interface);
830         if (ret)
831                 pr_err("Failed to configure data interface to SDR timing mode 0\n");
832
833         return ret;
834 }
835
836 /**
837  * nand_setup_data_interface - Setup the best data interface and timings
838  * @chip: The NAND chip
839  * @chipnr: Internal die id
840  *
841  * Find and configure the best data interface and NAND timings supported by
842  * the chip and the driver.
843  * First tries to retrieve supported timing modes from ONFI information,
844  * and if the NAND chip does not support ONFI, relies on the
845  * ->onfi_timing_mode_default specified in the nand_ids table.
846  *
847  * Returns 0 for success or negative error code otherwise.
848  */
849 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
850 {
851         u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
852                 chip->onfi_timing_mode_default,
853         };
854         int ret;
855
856         if (!nand_has_setup_data_iface(chip))
857                 return 0;
858
859         /* Change the mode on the chip side (if supported by the NAND chip) */
860         if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
861                 nand_select_target(chip, chipnr);
862                 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
863                                         tmode_param);
864                 nand_deselect_target(chip);
865                 if (ret)
866                         return ret;
867         }
868
869         /* Change the mode on the controller side */
870         ret = chip->controller->ops->setup_data_interface(chip, chipnr,
871                                                         &chip->data_interface);
872         if (ret)
873                 return ret;
874
875         /* Check the mode has been accepted by the chip, if supported */
876         if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
877                 return 0;
878
879         memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
880         nand_select_target(chip, chipnr);
881         ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
882                                 tmode_param);
883         nand_deselect_target(chip);
884         if (ret)
885                 goto err_reset_chip;
886
887         if (tmode_param[0] != chip->onfi_timing_mode_default) {
888                 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
889                         chip->onfi_timing_mode_default);
890                 goto err_reset_chip;
891         }
892
893         return 0;
894
895 err_reset_chip:
896         /*
897          * Fallback to mode 0 if the chip explicitly did not ack the chosen
898          * timing mode.
899          */
900         nand_reset_data_interface(chip, chipnr);
901         nand_select_target(chip, chipnr);
902         nand_reset_op(chip);
903         nand_deselect_target(chip);
904
905         return ret;
906 }
907
908 /**
909  * nand_init_data_interface - find the best data interface and timings
910  * @chip: The NAND chip
911  *
912  * Find the best data interface and NAND timings supported by the chip
913  * and the driver.
914  * First tries to retrieve supported timing modes from ONFI information,
915  * and if the NAND chip does not support ONFI, relies on the
916  * ->onfi_timing_mode_default specified in the nand_ids table. After this
917  * function nand_chip->data_interface is initialized with the best timing mode
918  * available.
919  *
920  * Returns 0 for success or negative error code otherwise.
921  */
922 static int nand_init_data_interface(struct nand_chip *chip)
923 {
924         int modes, mode, ret;
925
926         if (!nand_has_setup_data_iface(chip))
927                 return 0;
928
929         /*
930          * First try to identify the best timings from ONFI parameters and
931          * if the NAND does not support ONFI, fallback to the default ONFI
932          * timing mode.
933          */
934         if (chip->parameters.onfi) {
935                 modes = chip->parameters.onfi->async_timing_mode;
936         } else {
937                 if (!chip->onfi_timing_mode_default)
938                         return 0;
939
940                 modes = GENMASK(chip->onfi_timing_mode_default, 0);
941         }
942
943         for (mode = fls(modes) - 1; mode >= 0; mode--) {
944                 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
945                 if (ret)
946                         continue;
947
948                 /*
949                  * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
950                  * controller supports the requested timings.
951                  */
952                 ret = chip->controller->ops->setup_data_interface(chip,
953                                                  NAND_DATA_IFACE_CHECK_ONLY,
954                                                  &chip->data_interface);
955                 if (!ret) {
956                         chip->onfi_timing_mode_default = mode;
957                         break;
958                 }
959         }
960
961         return 0;
962 }
963
964 /**
965  * nand_fill_column_cycles - fill the column cycles of an address
966  * @chip: The NAND chip
967  * @addrs: Array of address cycles to fill
968  * @offset_in_page: The offset in the page
969  *
970  * Fills the first or the first two bytes of the @addrs field depending
971  * on the NAND bus width and the page size.
972  *
973  * Returns the number of cycles needed to encode the column, or a negative
974  * error code in case one of the arguments is invalid.
975  */
976 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
977                                    unsigned int offset_in_page)
978 {
979         struct mtd_info *mtd = nand_to_mtd(chip);
980
981         /* Make sure the offset is less than the actual page size. */
982         if (offset_in_page > mtd->writesize + mtd->oobsize)
983                 return -EINVAL;
984
985         /*
986          * On small page NANDs, there's a dedicated command to access the OOB
987          * area, and the column address is relative to the start of the OOB
988          * area, not the start of the page. Asjust the address accordingly.
989          */
990         if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
991                 offset_in_page -= mtd->writesize;
992
993         /*
994          * The offset in page is expressed in bytes, if the NAND bus is 16-bit
995          * wide, then it must be divided by 2.
996          */
997         if (chip->options & NAND_BUSWIDTH_16) {
998                 if (WARN_ON(offset_in_page % 2))
999                         return -EINVAL;
1000
1001                 offset_in_page /= 2;
1002         }
1003
1004         addrs[0] = offset_in_page;
1005
1006         /*
1007          * Small page NANDs use 1 cycle for the columns, while large page NANDs
1008          * need 2
1009          */
1010         if (mtd->writesize <= 512)
1011                 return 1;
1012
1013         addrs[1] = offset_in_page >> 8;
1014
1015         return 2;
1016 }
1017
1018 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1019                                      unsigned int offset_in_page, void *buf,
1020                                      unsigned int len)
1021 {
1022         struct mtd_info *mtd = nand_to_mtd(chip);
1023         const struct nand_sdr_timings *sdr =
1024                 nand_get_sdr_timings(&chip->data_interface);
1025         u8 addrs[4];
1026         struct nand_op_instr instrs[] = {
1027                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1028                 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1029                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1030                                  PSEC_TO_NSEC(sdr->tRR_min)),
1031                 NAND_OP_DATA_IN(len, buf, 0),
1032         };
1033         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1034         int ret;
1035
1036         /* Drop the DATA_IN instruction if len is set to 0. */
1037         if (!len)
1038                 op.ninstrs--;
1039
1040         if (offset_in_page >= mtd->writesize)
1041                 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1042         else if (offset_in_page >= 256 &&
1043                  !(chip->options & NAND_BUSWIDTH_16))
1044                 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1045
1046         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1047         if (ret < 0)
1048                 return ret;
1049
1050         addrs[1] = page;
1051         addrs[2] = page >> 8;
1052
1053         if (chip->options & NAND_ROW_ADDR_3) {
1054                 addrs[3] = page >> 16;
1055                 instrs[1].ctx.addr.naddrs++;
1056         }
1057
1058         return nand_exec_op(chip, &op);
1059 }
1060
1061 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1062                                      unsigned int offset_in_page, void *buf,
1063                                      unsigned int len)
1064 {
1065         const struct nand_sdr_timings *sdr =
1066                 nand_get_sdr_timings(&chip->data_interface);
1067         u8 addrs[5];
1068         struct nand_op_instr instrs[] = {
1069                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1070                 NAND_OP_ADDR(4, addrs, 0),
1071                 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1072                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1073                                  PSEC_TO_NSEC(sdr->tRR_min)),
1074                 NAND_OP_DATA_IN(len, buf, 0),
1075         };
1076         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1077         int ret;
1078
1079         /* Drop the DATA_IN instruction if len is set to 0. */
1080         if (!len)
1081                 op.ninstrs--;
1082
1083         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1084         if (ret < 0)
1085                 return ret;
1086
1087         addrs[2] = page;
1088         addrs[3] = page >> 8;
1089
1090         if (chip->options & NAND_ROW_ADDR_3) {
1091                 addrs[4] = page >> 16;
1092                 instrs[1].ctx.addr.naddrs++;
1093         }
1094
1095         return nand_exec_op(chip, &op);
1096 }
1097
1098 /**
1099  * nand_read_page_op - Do a READ PAGE operation
1100  * @chip: The NAND chip
1101  * @page: page to read
1102  * @offset_in_page: offset within the page
1103  * @buf: buffer used to store the data
1104  * @len: length of the buffer
1105  *
1106  * This function issues a READ PAGE operation.
1107  * This function does not select/unselect the CS line.
1108  *
1109  * Returns 0 on success, a negative error code otherwise.
1110  */
1111 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1112                       unsigned int offset_in_page, void *buf, unsigned int len)
1113 {
1114         struct mtd_info *mtd = nand_to_mtd(chip);
1115
1116         if (len && !buf)
1117                 return -EINVAL;
1118
1119         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1120                 return -EINVAL;
1121
1122         if (nand_has_exec_op(chip)) {
1123                 if (mtd->writesize > 512)
1124                         return nand_lp_exec_read_page_op(chip, page,
1125                                                          offset_in_page, buf,
1126                                                          len);
1127
1128                 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1129                                                  buf, len);
1130         }
1131
1132         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1133         if (len)
1134                 chip->legacy.read_buf(chip, buf, len);
1135
1136         return 0;
1137 }
1138 EXPORT_SYMBOL_GPL(nand_read_page_op);
1139
1140 /**
1141  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1142  * @chip: The NAND chip
1143  * @page: parameter page to read
1144  * @buf: buffer used to store the data
1145  * @len: length of the buffer
1146  *
1147  * This function issues a READ PARAMETER PAGE operation.
1148  * This function does not select/unselect the CS line.
1149  *
1150  * Returns 0 on success, a negative error code otherwise.
1151  */
1152 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1153                             unsigned int len)
1154 {
1155         unsigned int i;
1156         u8 *p = buf;
1157
1158         if (len && !buf)
1159                 return -EINVAL;
1160
1161         if (nand_has_exec_op(chip)) {
1162                 const struct nand_sdr_timings *sdr =
1163                         nand_get_sdr_timings(&chip->data_interface);
1164                 struct nand_op_instr instrs[] = {
1165                         NAND_OP_CMD(NAND_CMD_PARAM, 0),
1166                         NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1167                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1168                                          PSEC_TO_NSEC(sdr->tRR_min)),
1169                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1170                 };
1171                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1172
1173                 /* Drop the DATA_IN instruction if len is set to 0. */
1174                 if (!len)
1175                         op.ninstrs--;
1176
1177                 return nand_exec_op(chip, &op);
1178         }
1179
1180         chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1181         for (i = 0; i < len; i++)
1182                 p[i] = chip->legacy.read_byte(chip);
1183
1184         return 0;
1185 }
1186
1187 /**
1188  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1189  * @chip: The NAND chip
1190  * @offset_in_page: offset within the page
1191  * @buf: buffer used to store the data
1192  * @len: length of the buffer
1193  * @force_8bit: force 8-bit bus access
1194  *
1195  * This function issues a CHANGE READ COLUMN operation.
1196  * This function does not select/unselect the CS line.
1197  *
1198  * Returns 0 on success, a negative error code otherwise.
1199  */
1200 int nand_change_read_column_op(struct nand_chip *chip,
1201                                unsigned int offset_in_page, void *buf,
1202                                unsigned int len, bool force_8bit)
1203 {
1204         struct mtd_info *mtd = nand_to_mtd(chip);
1205
1206         if (len && !buf)
1207                 return -EINVAL;
1208
1209         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1210                 return -EINVAL;
1211
1212         /* Small page NANDs do not support column change. */
1213         if (mtd->writesize <= 512)
1214                 return -ENOTSUPP;
1215
1216         if (nand_has_exec_op(chip)) {
1217                 const struct nand_sdr_timings *sdr =
1218                         nand_get_sdr_timings(&chip->data_interface);
1219                 u8 addrs[2] = {};
1220                 struct nand_op_instr instrs[] = {
1221                         NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1222                         NAND_OP_ADDR(2, addrs, 0),
1223                         NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1224                                     PSEC_TO_NSEC(sdr->tCCS_min)),
1225                         NAND_OP_DATA_IN(len, buf, 0),
1226                 };
1227                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1228                 int ret;
1229
1230                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1231                 if (ret < 0)
1232                         return ret;
1233
1234                 /* Drop the DATA_IN instruction if len is set to 0. */
1235                 if (!len)
1236                         op.ninstrs--;
1237
1238                 instrs[3].ctx.data.force_8bit = force_8bit;
1239
1240                 return nand_exec_op(chip, &op);
1241         }
1242
1243         chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1244         if (len)
1245                 chip->legacy.read_buf(chip, buf, len);
1246
1247         return 0;
1248 }
1249 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1250
1251 /**
1252  * nand_read_oob_op - Do a READ OOB operation
1253  * @chip: The NAND chip
1254  * @page: page to read
1255  * @offset_in_oob: offset within the OOB area
1256  * @buf: buffer used to store the data
1257  * @len: length of the buffer
1258  *
1259  * This function issues a READ OOB operation.
1260  * This function does not select/unselect the CS line.
1261  *
1262  * Returns 0 on success, a negative error code otherwise.
1263  */
1264 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1265                      unsigned int offset_in_oob, void *buf, unsigned int len)
1266 {
1267         struct mtd_info *mtd = nand_to_mtd(chip);
1268
1269         if (len && !buf)
1270                 return -EINVAL;
1271
1272         if (offset_in_oob + len > mtd->oobsize)
1273                 return -EINVAL;
1274
1275         if (nand_has_exec_op(chip))
1276                 return nand_read_page_op(chip, page,
1277                                          mtd->writesize + offset_in_oob,
1278                                          buf, len);
1279
1280         chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1281         if (len)
1282                 chip->legacy.read_buf(chip, buf, len);
1283
1284         return 0;
1285 }
1286 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1287
1288 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1289                                   unsigned int offset_in_page, const void *buf,
1290                                   unsigned int len, bool prog)
1291 {
1292         struct mtd_info *mtd = nand_to_mtd(chip);
1293         const struct nand_sdr_timings *sdr =
1294                 nand_get_sdr_timings(&chip->data_interface);
1295         u8 addrs[5] = {};
1296         struct nand_op_instr instrs[] = {
1297                 /*
1298                  * The first instruction will be dropped if we're dealing
1299                  * with a large page NAND and adjusted if we're dealing
1300                  * with a small page NAND and the page offset is > 255.
1301                  */
1302                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1303                 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1304                 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1305                 NAND_OP_DATA_OUT(len, buf, 0),
1306                 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1307                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1308         };
1309         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1310         int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1311         int ret;
1312         u8 status;
1313
1314         if (naddrs < 0)
1315                 return naddrs;
1316
1317         addrs[naddrs++] = page;
1318         addrs[naddrs++] = page >> 8;
1319         if (chip->options & NAND_ROW_ADDR_3)
1320                 addrs[naddrs++] = page >> 16;
1321
1322         instrs[2].ctx.addr.naddrs = naddrs;
1323
1324         /* Drop the last two instructions if we're not programming the page. */
1325         if (!prog) {
1326                 op.ninstrs -= 2;
1327                 /* Also drop the DATA_OUT instruction if empty. */
1328                 if (!len)
1329                         op.ninstrs--;
1330         }
1331
1332         if (mtd->writesize <= 512) {
1333                 /*
1334                  * Small pages need some more tweaking: we have to adjust the
1335                  * first instruction depending on the page offset we're trying
1336                  * to access.
1337                  */
1338                 if (offset_in_page >= mtd->writesize)
1339                         instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1340                 else if (offset_in_page >= 256 &&
1341                          !(chip->options & NAND_BUSWIDTH_16))
1342                         instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1343         } else {
1344                 /*
1345                  * Drop the first command if we're dealing with a large page
1346                  * NAND.
1347                  */
1348                 op.instrs++;
1349                 op.ninstrs--;
1350         }
1351
1352         ret = nand_exec_op(chip, &op);
1353         if (!prog || ret)
1354                 return ret;
1355
1356         ret = nand_status_op(chip, &status);
1357         if (ret)
1358                 return ret;
1359
1360         return status;
1361 }
1362
1363 /**
1364  * nand_prog_page_begin_op - starts a PROG PAGE operation
1365  * @chip: The NAND chip
1366  * @page: page to write
1367  * @offset_in_page: offset within the page
1368  * @buf: buffer containing the data to write to the page
1369  * @len: length of the buffer
1370  *
1371  * This function issues the first half of a PROG PAGE operation.
1372  * This function does not select/unselect the CS line.
1373  *
1374  * Returns 0 on success, a negative error code otherwise.
1375  */
1376 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1377                             unsigned int offset_in_page, const void *buf,
1378                             unsigned int len)
1379 {
1380         struct mtd_info *mtd = nand_to_mtd(chip);
1381
1382         if (len && !buf)
1383                 return -EINVAL;
1384
1385         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1386                 return -EINVAL;
1387
1388         if (nand_has_exec_op(chip))
1389                 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1390                                               len, false);
1391
1392         chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1393
1394         if (buf)
1395                 chip->legacy.write_buf(chip, buf, len);
1396
1397         return 0;
1398 }
1399 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1400
1401 /**
1402  * nand_prog_page_end_op - ends a PROG PAGE operation
1403  * @chip: The NAND chip
1404  *
1405  * This function issues the second half of a PROG PAGE operation.
1406  * This function does not select/unselect the CS line.
1407  *
1408  * Returns 0 on success, a negative error code otherwise.
1409  */
1410 int nand_prog_page_end_op(struct nand_chip *chip)
1411 {
1412         int ret;
1413         u8 status;
1414
1415         if (nand_has_exec_op(chip)) {
1416                 const struct nand_sdr_timings *sdr =
1417                         nand_get_sdr_timings(&chip->data_interface);
1418                 struct nand_op_instr instrs[] = {
1419                         NAND_OP_CMD(NAND_CMD_PAGEPROG,
1420                                     PSEC_TO_NSEC(sdr->tWB_max)),
1421                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1422                 };
1423                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1424
1425                 ret = nand_exec_op(chip, &op);
1426                 if (ret)
1427                         return ret;
1428
1429                 ret = nand_status_op(chip, &status);
1430                 if (ret)
1431                         return ret;
1432         } else {
1433                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1434                 ret = chip->legacy.waitfunc(chip);
1435                 if (ret < 0)
1436                         return ret;
1437
1438                 status = ret;
1439         }
1440
1441         if (status & NAND_STATUS_FAIL)
1442                 return -EIO;
1443
1444         return 0;
1445 }
1446 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1447
1448 /**
1449  * nand_prog_page_op - Do a full PROG PAGE operation
1450  * @chip: The NAND chip
1451  * @page: page to write
1452  * @offset_in_page: offset within the page
1453  * @buf: buffer containing the data to write to the page
1454  * @len: length of the buffer
1455  *
1456  * This function issues a full PROG PAGE operation.
1457  * This function does not select/unselect the CS line.
1458  *
1459  * Returns 0 on success, a negative error code otherwise.
1460  */
1461 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1462                       unsigned int offset_in_page, const void *buf,
1463                       unsigned int len)
1464 {
1465         struct mtd_info *mtd = nand_to_mtd(chip);
1466         int status;
1467
1468         if (!len || !buf)
1469                 return -EINVAL;
1470
1471         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1472                 return -EINVAL;
1473
1474         if (nand_has_exec_op(chip)) {
1475                 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1476                                                 len, true);
1477         } else {
1478                 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1479                                      page);
1480                 chip->legacy.write_buf(chip, buf, len);
1481                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1482                 status = chip->legacy.waitfunc(chip);
1483         }
1484
1485         if (status & NAND_STATUS_FAIL)
1486                 return -EIO;
1487
1488         return 0;
1489 }
1490 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1491
1492 /**
1493  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1494  * @chip: The NAND chip
1495  * @offset_in_page: offset within the page
1496  * @buf: buffer containing the data to send to the NAND
1497  * @len: length of the buffer
1498  * @force_8bit: force 8-bit bus access
1499  *
1500  * This function issues a CHANGE WRITE COLUMN operation.
1501  * This function does not select/unselect the CS line.
1502  *
1503  * Returns 0 on success, a negative error code otherwise.
1504  */
1505 int nand_change_write_column_op(struct nand_chip *chip,
1506                                 unsigned int offset_in_page,
1507                                 const void *buf, unsigned int len,
1508                                 bool force_8bit)
1509 {
1510         struct mtd_info *mtd = nand_to_mtd(chip);
1511
1512         if (len && !buf)
1513                 return -EINVAL;
1514
1515         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1516                 return -EINVAL;
1517
1518         /* Small page NANDs do not support column change. */
1519         if (mtd->writesize <= 512)
1520                 return -ENOTSUPP;
1521
1522         if (nand_has_exec_op(chip)) {
1523                 const struct nand_sdr_timings *sdr =
1524                         nand_get_sdr_timings(&chip->data_interface);
1525                 u8 addrs[2];
1526                 struct nand_op_instr instrs[] = {
1527                         NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1528                         NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1529                         NAND_OP_DATA_OUT(len, buf, 0),
1530                 };
1531                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1532                 int ret;
1533
1534                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1535                 if (ret < 0)
1536                         return ret;
1537
1538                 instrs[2].ctx.data.force_8bit = force_8bit;
1539
1540                 /* Drop the DATA_OUT instruction if len is set to 0. */
1541                 if (!len)
1542                         op.ninstrs--;
1543
1544                 return nand_exec_op(chip, &op);
1545         }
1546
1547         chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1548         if (len)
1549                 chip->legacy.write_buf(chip, buf, len);
1550
1551         return 0;
1552 }
1553 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1554
1555 /**
1556  * nand_readid_op - Do a READID operation
1557  * @chip: The NAND chip
1558  * @addr: address cycle to pass after the READID command
1559  * @buf: buffer used to store the ID
1560  * @len: length of the buffer
1561  *
1562  * This function sends a READID command and reads back the ID returned by the
1563  * NAND.
1564  * This function does not select/unselect the CS line.
1565  *
1566  * Returns 0 on success, a negative error code otherwise.
1567  */
1568 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1569                    unsigned int len)
1570 {
1571         unsigned int i;
1572         u8 *id = buf;
1573
1574         if (len && !buf)
1575                 return -EINVAL;
1576
1577         if (nand_has_exec_op(chip)) {
1578                 const struct nand_sdr_timings *sdr =
1579                         nand_get_sdr_timings(&chip->data_interface);
1580                 struct nand_op_instr instrs[] = {
1581                         NAND_OP_CMD(NAND_CMD_READID, 0),
1582                         NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1583                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1584                 };
1585                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1586
1587                 /* Drop the DATA_IN instruction if len is set to 0. */
1588                 if (!len)
1589                         op.ninstrs--;
1590
1591                 return nand_exec_op(chip, &op);
1592         }
1593
1594         chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1595
1596         for (i = 0; i < len; i++)
1597                 id[i] = chip->legacy.read_byte(chip);
1598
1599         return 0;
1600 }
1601 EXPORT_SYMBOL_GPL(nand_readid_op);
1602
1603 /**
1604  * nand_status_op - Do a STATUS operation
1605  * @chip: The NAND chip
1606  * @status: out variable to store the NAND status
1607  *
1608  * This function sends a STATUS command and reads back the status returned by
1609  * the NAND.
1610  * This function does not select/unselect the CS line.
1611  *
1612  * Returns 0 on success, a negative error code otherwise.
1613  */
1614 int nand_status_op(struct nand_chip *chip, u8 *status)
1615 {
1616         if (nand_has_exec_op(chip)) {
1617                 const struct nand_sdr_timings *sdr =
1618                         nand_get_sdr_timings(&chip->data_interface);
1619                 struct nand_op_instr instrs[] = {
1620                         NAND_OP_CMD(NAND_CMD_STATUS,
1621                                     PSEC_TO_NSEC(sdr->tADL_min)),
1622                         NAND_OP_8BIT_DATA_IN(1, status, 0),
1623                 };
1624                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1625
1626                 if (!status)
1627                         op.ninstrs--;
1628
1629                 return nand_exec_op(chip, &op);
1630         }
1631
1632         chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1633         if (status)
1634                 *status = chip->legacy.read_byte(chip);
1635
1636         return 0;
1637 }
1638 EXPORT_SYMBOL_GPL(nand_status_op);
1639
1640 /**
1641  * nand_exit_status_op - Exit a STATUS operation
1642  * @chip: The NAND chip
1643  *
1644  * This function sends a READ0 command to cancel the effect of the STATUS
1645  * command to avoid reading only the status until a new read command is sent.
1646  *
1647  * This function does not select/unselect the CS line.
1648  *
1649  * Returns 0 on success, a negative error code otherwise.
1650  */
1651 int nand_exit_status_op(struct nand_chip *chip)
1652 {
1653         if (nand_has_exec_op(chip)) {
1654                 struct nand_op_instr instrs[] = {
1655                         NAND_OP_CMD(NAND_CMD_READ0, 0),
1656                 };
1657                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1658
1659                 return nand_exec_op(chip, &op);
1660         }
1661
1662         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1663
1664         return 0;
1665 }
1666
1667 /**
1668  * nand_erase_op - Do an erase operation
1669  * @chip: The NAND chip
1670  * @eraseblock: block to erase
1671  *
1672  * This function sends an ERASE command and waits for the NAND to be ready
1673  * before returning.
1674  * This function does not select/unselect the CS line.
1675  *
1676  * Returns 0 on success, a negative error code otherwise.
1677  */
1678 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1679 {
1680         unsigned int page = eraseblock <<
1681                             (chip->phys_erase_shift - chip->page_shift);
1682         int ret;
1683         u8 status;
1684
1685         if (nand_has_exec_op(chip)) {
1686                 const struct nand_sdr_timings *sdr =
1687                         nand_get_sdr_timings(&chip->data_interface);
1688                 u8 addrs[3] = { page, page >> 8, page >> 16 };
1689                 struct nand_op_instr instrs[] = {
1690                         NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1691                         NAND_OP_ADDR(2, addrs, 0),
1692                         NAND_OP_CMD(NAND_CMD_ERASE2,
1693                                     PSEC_TO_MSEC(sdr->tWB_max)),
1694                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1695                 };
1696                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1697
1698                 if (chip->options & NAND_ROW_ADDR_3)
1699                         instrs[1].ctx.addr.naddrs++;
1700
1701                 ret = nand_exec_op(chip, &op);
1702                 if (ret)
1703                         return ret;
1704
1705                 ret = nand_status_op(chip, &status);
1706                 if (ret)
1707                         return ret;
1708         } else {
1709                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1710                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1711
1712                 ret = chip->legacy.waitfunc(chip);
1713                 if (ret < 0)
1714                         return ret;
1715
1716                 status = ret;
1717         }
1718
1719         if (status & NAND_STATUS_FAIL)
1720                 return -EIO;
1721
1722         return 0;
1723 }
1724 EXPORT_SYMBOL_GPL(nand_erase_op);
1725
1726 /**
1727  * nand_set_features_op - Do a SET FEATURES operation
1728  * @chip: The NAND chip
1729  * @feature: feature id
1730  * @data: 4 bytes of data
1731  *
1732  * This function sends a SET FEATURES command and waits for the NAND to be
1733  * ready before returning.
1734  * This function does not select/unselect the CS line.
1735  *
1736  * Returns 0 on success, a negative error code otherwise.
1737  */
1738 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1739                                 const void *data)
1740 {
1741         const u8 *params = data;
1742         int i, ret;
1743
1744         if (nand_has_exec_op(chip)) {
1745                 const struct nand_sdr_timings *sdr =
1746                         nand_get_sdr_timings(&chip->data_interface);
1747                 struct nand_op_instr instrs[] = {
1748                         NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1749                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1750                         NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1751                                               PSEC_TO_NSEC(sdr->tWB_max)),
1752                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1753                 };
1754                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1755
1756                 return nand_exec_op(chip, &op);
1757         }
1758
1759         chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1760         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1761                 chip->legacy.write_byte(chip, params[i]);
1762
1763         ret = chip->legacy.waitfunc(chip);
1764         if (ret < 0)
1765                 return ret;
1766
1767         if (ret & NAND_STATUS_FAIL)
1768                 return -EIO;
1769
1770         return 0;
1771 }
1772
1773 /**
1774  * nand_get_features_op - Do a GET FEATURES operation
1775  * @chip: The NAND chip
1776  * @feature: feature id
1777  * @data: 4 bytes of data
1778  *
1779  * This function sends a GET FEATURES command and waits for the NAND to be
1780  * ready before returning.
1781  * This function does not select/unselect the CS line.
1782  *
1783  * Returns 0 on success, a negative error code otherwise.
1784  */
1785 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1786                                 void *data)
1787 {
1788         u8 *params = data;
1789         int i;
1790
1791         if (nand_has_exec_op(chip)) {
1792                 const struct nand_sdr_timings *sdr =
1793                         nand_get_sdr_timings(&chip->data_interface);
1794                 struct nand_op_instr instrs[] = {
1795                         NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1796                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1797                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1798                                          PSEC_TO_NSEC(sdr->tRR_min)),
1799                         NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1800                                              data, 0),
1801                 };
1802                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1803
1804                 return nand_exec_op(chip, &op);
1805         }
1806
1807         chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1808         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1809                 params[i] = chip->legacy.read_byte(chip);
1810
1811         return 0;
1812 }
1813
1814 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1815                             unsigned int delay_ns)
1816 {
1817         if (nand_has_exec_op(chip)) {
1818                 struct nand_op_instr instrs[] = {
1819                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1820                                          PSEC_TO_NSEC(delay_ns)),
1821                 };
1822                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1823
1824                 return nand_exec_op(chip, &op);
1825         }
1826
1827         /* Apply delay or wait for ready/busy pin */
1828         if (!chip->legacy.dev_ready)
1829                 udelay(chip->legacy.chip_delay);
1830         else
1831                 nand_wait_ready(chip);
1832
1833         return 0;
1834 }
1835
1836 /**
1837  * nand_reset_op - Do a reset operation
1838  * @chip: The NAND chip
1839  *
1840  * This function sends a RESET command and waits for the NAND to be ready
1841  * before returning.
1842  * This function does not select/unselect the CS line.
1843  *
1844  * Returns 0 on success, a negative error code otherwise.
1845  */
1846 int nand_reset_op(struct nand_chip *chip)
1847 {
1848         if (nand_has_exec_op(chip)) {
1849                 const struct nand_sdr_timings *sdr =
1850                         nand_get_sdr_timings(&chip->data_interface);
1851                 struct nand_op_instr instrs[] = {
1852                         NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1853                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1854                 };
1855                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1856
1857                 return nand_exec_op(chip, &op);
1858         }
1859
1860         chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1861
1862         return 0;
1863 }
1864 EXPORT_SYMBOL_GPL(nand_reset_op);
1865
1866 /**
1867  * nand_read_data_op - Read data from the NAND
1868  * @chip: The NAND chip
1869  * @buf: buffer used to store the data
1870  * @len: length of the buffer
1871  * @force_8bit: force 8-bit bus access
1872  *
1873  * This function does a raw data read on the bus. Usually used after launching
1874  * another NAND operation like nand_read_page_op().
1875  * This function does not select/unselect the CS line.
1876  *
1877  * Returns 0 on success, a negative error code otherwise.
1878  */
1879 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1880                       bool force_8bit)
1881 {
1882         if (!len || !buf)
1883                 return -EINVAL;
1884
1885         if (nand_has_exec_op(chip)) {
1886                 struct nand_op_instr instrs[] = {
1887                         NAND_OP_DATA_IN(len, buf, 0),
1888                 };
1889                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1890
1891                 instrs[0].ctx.data.force_8bit = force_8bit;
1892
1893                 return nand_exec_op(chip, &op);
1894         }
1895
1896         if (force_8bit) {
1897                 u8 *p = buf;
1898                 unsigned int i;
1899
1900                 for (i = 0; i < len; i++)
1901                         p[i] = chip->legacy.read_byte(chip);
1902         } else {
1903                 chip->legacy.read_buf(chip, buf, len);
1904         }
1905
1906         return 0;
1907 }
1908 EXPORT_SYMBOL_GPL(nand_read_data_op);
1909
1910 /**
1911  * nand_write_data_op - Write data from the NAND
1912  * @chip: The NAND chip
1913  * @buf: buffer containing the data to send on the bus
1914  * @len: length of the buffer
1915  * @force_8bit: force 8-bit bus access
1916  *
1917  * This function does a raw data write on the bus. Usually used after launching
1918  * another NAND operation like nand_write_page_begin_op().
1919  * This function does not select/unselect the CS line.
1920  *
1921  * Returns 0 on success, a negative error code otherwise.
1922  */
1923 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1924                        unsigned int len, bool force_8bit)
1925 {
1926         if (!len || !buf)
1927                 return -EINVAL;
1928
1929         if (nand_has_exec_op(chip)) {
1930                 struct nand_op_instr instrs[] = {
1931                         NAND_OP_DATA_OUT(len, buf, 0),
1932                 };
1933                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1934
1935                 instrs[0].ctx.data.force_8bit = force_8bit;
1936
1937                 return nand_exec_op(chip, &op);
1938         }
1939
1940         if (force_8bit) {
1941                 const u8 *p = buf;
1942                 unsigned int i;
1943
1944                 for (i = 0; i < len; i++)
1945                         chip->legacy.write_byte(chip, p[i]);
1946         } else {
1947                 chip->legacy.write_buf(chip, buf, len);
1948         }
1949
1950         return 0;
1951 }
1952 EXPORT_SYMBOL_GPL(nand_write_data_op);
1953
1954 /**
1955  * struct nand_op_parser_ctx - Context used by the parser
1956  * @instrs: array of all the instructions that must be addressed
1957  * @ninstrs: length of the @instrs array
1958  * @subop: Sub-operation to be passed to the NAND controller
1959  *
1960  * This structure is used by the core to split NAND operations into
1961  * sub-operations that can be handled by the NAND controller.
1962  */
1963 struct nand_op_parser_ctx {
1964         const struct nand_op_instr *instrs;
1965         unsigned int ninstrs;
1966         struct nand_subop subop;
1967 };
1968
1969 /**
1970  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1971  * @pat: the parser pattern element that matches @instr
1972  * @instr: pointer to the instruction to check
1973  * @start_offset: this is an in/out parameter. If @instr has already been
1974  *                split, then @start_offset is the offset from which to start
1975  *                (either an address cycle or an offset in the data buffer).
1976  *                Conversely, if the function returns true (ie. instr must be
1977  *                split), this parameter is updated to point to the first
1978  *                data/address cycle that has not been taken care of.
1979  *
1980  * Some NAND controllers are limited and cannot send X address cycles with a
1981  * unique operation, or cannot read/write more than Y bytes at the same time.
1982  * In this case, split the instruction that does not fit in a single
1983  * controller-operation into two or more chunks.
1984  *
1985  * Returns true if the instruction must be split, false otherwise.
1986  * The @start_offset parameter is also updated to the offset at which the next
1987  * bundle of instruction must start (if an address or a data instruction).
1988  */
1989 static bool
1990 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1991                                 const struct nand_op_instr *instr,
1992                                 unsigned int *start_offset)
1993 {
1994         switch (pat->type) {
1995         case NAND_OP_ADDR_INSTR:
1996                 if (!pat->ctx.addr.maxcycles)
1997                         break;
1998
1999                 if (instr->ctx.addr.naddrs - *start_offset >
2000                     pat->ctx.addr.maxcycles) {
2001                         *start_offset += pat->ctx.addr.maxcycles;
2002                         return true;
2003                 }
2004                 break;
2005
2006         case NAND_OP_DATA_IN_INSTR:
2007         case NAND_OP_DATA_OUT_INSTR:
2008                 if (!pat->ctx.data.maxlen)
2009                         break;
2010
2011                 if (instr->ctx.data.len - *start_offset >
2012                     pat->ctx.data.maxlen) {
2013                         *start_offset += pat->ctx.data.maxlen;
2014                         return true;
2015                 }
2016                 break;
2017
2018         default:
2019                 break;
2020         }
2021
2022         return false;
2023 }
2024
2025 /**
2026  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2027  *                            remaining in the parser context
2028  * @pat: the pattern to test
2029  * @ctx: the parser context structure to match with the pattern @pat
2030  *
2031  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2032  * Returns true if this is the case, false ortherwise. When true is returned,
2033  * @ctx->subop is updated with the set of instructions to be passed to the
2034  * controller driver.
2035  */
2036 static bool
2037 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2038                          struct nand_op_parser_ctx *ctx)
2039 {
2040         unsigned int instr_offset = ctx->subop.first_instr_start_off;
2041         const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2042         const struct nand_op_instr *instr = ctx->subop.instrs;
2043         unsigned int i, ninstrs;
2044
2045         for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2046                 /*
2047                  * The pattern instruction does not match the operation
2048                  * instruction. If the instruction is marked optional in the
2049                  * pattern definition, we skip the pattern element and continue
2050                  * to the next one. If the element is mandatory, there's no
2051                  * match and we can return false directly.
2052                  */
2053                 if (instr->type != pat->elems[i].type) {
2054                         if (!pat->elems[i].optional)
2055                                 return false;
2056
2057                         continue;
2058                 }
2059
2060                 /*
2061                  * Now check the pattern element constraints. If the pattern is
2062                  * not able to handle the whole instruction in a single step,
2063                  * we have to split it.
2064                  * The last_instr_end_off value comes back updated to point to
2065                  * the position where we have to split the instruction (the
2066                  * start of the next subop chunk).
2067                  */
2068                 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2069                                                     &instr_offset)) {
2070                         ninstrs++;
2071                         i++;
2072                         break;
2073                 }
2074
2075                 instr++;
2076                 ninstrs++;
2077                 instr_offset = 0;
2078         }
2079
2080         /*
2081          * This can happen if all instructions of a pattern are optional.
2082          * Still, if there's not at least one instruction handled by this
2083          * pattern, this is not a match, and we should try the next one (if
2084          * any).
2085          */
2086         if (!ninstrs)
2087                 return false;
2088
2089         /*
2090          * We had a match on the pattern head, but the pattern may be longer
2091          * than the instructions we're asked to execute. We need to make sure
2092          * there's no mandatory elements in the pattern tail.
2093          */
2094         for (; i < pat->nelems; i++) {
2095                 if (!pat->elems[i].optional)
2096                         return false;
2097         }
2098
2099         /*
2100          * We have a match: update the subop structure accordingly and return
2101          * true.
2102          */
2103         ctx->subop.ninstrs = ninstrs;
2104         ctx->subop.last_instr_end_off = instr_offset;
2105
2106         return true;
2107 }
2108
2109 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2110 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2111 {
2112         const struct nand_op_instr *instr;
2113         char *prefix = "      ";
2114         unsigned int i;
2115
2116         pr_debug("executing subop:\n");
2117
2118         for (i = 0; i < ctx->ninstrs; i++) {
2119                 instr = &ctx->instrs[i];
2120
2121                 if (instr == &ctx->subop.instrs[0])
2122                         prefix = "    ->";
2123
2124                 switch (instr->type) {
2125                 case NAND_OP_CMD_INSTR:
2126                         pr_debug("%sCMD      [0x%02x]\n", prefix,
2127                                  instr->ctx.cmd.opcode);
2128                         break;
2129                 case NAND_OP_ADDR_INSTR:
2130                         pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
2131                                  instr->ctx.addr.naddrs,
2132                                  instr->ctx.addr.naddrs < 64 ?
2133                                  instr->ctx.addr.naddrs : 64,
2134                                  instr->ctx.addr.addrs);
2135                         break;
2136                 case NAND_OP_DATA_IN_INSTR:
2137                         pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
2138                                  instr->ctx.data.len,
2139                                  instr->ctx.data.force_8bit ?
2140                                  ", force 8-bit" : "");
2141                         break;
2142                 case NAND_OP_DATA_OUT_INSTR:
2143                         pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2144                                  instr->ctx.data.len,
2145                                  instr->ctx.data.force_8bit ?
2146                                  ", force 8-bit" : "");
2147                         break;
2148                 case NAND_OP_WAITRDY_INSTR:
2149                         pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
2150                                  instr->ctx.waitrdy.timeout_ms);
2151                         break;
2152                 }
2153
2154                 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2155                         prefix = "      ";
2156         }
2157 }
2158 #else
2159 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2160 {
2161         /* NOP */
2162 }
2163 #endif
2164
2165 /**
2166  * nand_op_parser_exec_op - exec_op parser
2167  * @chip: the NAND chip
2168  * @parser: patterns description provided by the controller driver
2169  * @op: the NAND operation to address
2170  * @check_only: when true, the function only checks if @op can be handled but
2171  *              does not execute the operation
2172  *
2173  * Helper function designed to ease integration of NAND controller drivers that
2174  * only support a limited set of instruction sequences. The supported sequences
2175  * are described in @parser, and the framework takes care of splitting @op into
2176  * multiple sub-operations (if required) and pass them back to the ->exec()
2177  * callback of the matching pattern if @check_only is set to false.
2178  *
2179  * NAND controller drivers should call this function from their own ->exec_op()
2180  * implementation.
2181  *
2182  * Returns 0 on success, a negative error code otherwise. A failure can be
2183  * caused by an unsupported operation (none of the supported patterns is able
2184  * to handle the requested operation), or an error returned by one of the
2185  * matching pattern->exec() hook.
2186  */
2187 int nand_op_parser_exec_op(struct nand_chip *chip,
2188                            const struct nand_op_parser *parser,
2189                            const struct nand_operation *op, bool check_only)
2190 {
2191         struct nand_op_parser_ctx ctx = {
2192                 .subop.instrs = op->instrs,
2193                 .instrs = op->instrs,
2194                 .ninstrs = op->ninstrs,
2195         };
2196         unsigned int i;
2197
2198         while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2199                 int ret;
2200
2201                 for (i = 0; i < parser->npatterns; i++) {
2202                         const struct nand_op_parser_pattern *pattern;
2203
2204                         pattern = &parser->patterns[i];
2205                         if (!nand_op_parser_match_pat(pattern, &ctx))
2206                                 continue;
2207
2208                         nand_op_parser_trace(&ctx);
2209
2210                         if (check_only)
2211                                 break;
2212
2213                         ret = pattern->exec(chip, &ctx.subop);
2214                         if (ret)
2215                                 return ret;
2216
2217                         break;
2218                 }
2219
2220                 if (i == parser->npatterns) {
2221                         pr_debug("->exec_op() parser: pattern not found!\n");
2222                         return -ENOTSUPP;
2223                 }
2224
2225                 /*
2226                  * Update the context structure by pointing to the start of the
2227                  * next subop.
2228                  */
2229                 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2230                 if (ctx.subop.last_instr_end_off)
2231                         ctx.subop.instrs -= 1;
2232
2233                 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2234         }
2235
2236         return 0;
2237 }
2238 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2239
2240 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2241 {
2242         return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2243                          instr->type == NAND_OP_DATA_OUT_INSTR);
2244 }
2245
2246 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2247                                       unsigned int instr_idx)
2248 {
2249         return subop && instr_idx < subop->ninstrs;
2250 }
2251
2252 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2253                                              unsigned int instr_idx)
2254 {
2255         if (instr_idx)
2256                 return 0;
2257
2258         return subop->first_instr_start_off;
2259 }
2260
2261 /**
2262  * nand_subop_get_addr_start_off - Get the start offset in an address array
2263  * @subop: The entire sub-operation
2264  * @instr_idx: Index of the instruction inside the sub-operation
2265  *
2266  * During driver development, one could be tempted to directly use the
2267  * ->addr.addrs field of address instructions. This is wrong as address
2268  * instructions might be split.
2269  *
2270  * Given an address instruction, returns the offset of the first cycle to issue.
2271  */
2272 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2273                                            unsigned int instr_idx)
2274 {
2275         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2276                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2277                 return 0;
2278
2279         return nand_subop_get_start_off(subop, instr_idx);
2280 }
2281 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2282
2283 /**
2284  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2285  * @subop: The entire sub-operation
2286  * @instr_idx: Index of the instruction inside the sub-operation
2287  *
2288  * During driver development, one could be tempted to directly use the
2289  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2290  * might be split.
2291  *
2292  * Given an address instruction, returns the number of address cycle to issue.
2293  */
2294 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2295                                          unsigned int instr_idx)
2296 {
2297         int start_off, end_off;
2298
2299         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2300                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2301                 return 0;
2302
2303         start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2304
2305         if (instr_idx == subop->ninstrs - 1 &&
2306             subop->last_instr_end_off)
2307                 end_off = subop->last_instr_end_off;
2308         else
2309                 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2310
2311         return end_off - start_off;
2312 }
2313 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2314
2315 /**
2316  * nand_subop_get_data_start_off - Get the start offset in a data array
2317  * @subop: The entire sub-operation
2318  * @instr_idx: Index of the instruction inside the sub-operation
2319  *
2320  * During driver development, one could be tempted to directly use the
2321  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2322  * instructions might be split.
2323  *
2324  * Given a data instruction, returns the offset to start from.
2325  */
2326 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2327                                            unsigned int instr_idx)
2328 {
2329         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2330                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2331                 return 0;
2332
2333         return nand_subop_get_start_off(subop, instr_idx);
2334 }
2335 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2336
2337 /**
2338  * nand_subop_get_data_len - Get the number of bytes to retrieve
2339  * @subop: The entire sub-operation
2340  * @instr_idx: Index of the instruction inside the sub-operation
2341  *
2342  * During driver development, one could be tempted to directly use the
2343  * ->data->len field of a data instruction. This is wrong as data instructions
2344  * might be split.
2345  *
2346  * Returns the length of the chunk of data to send/receive.
2347  */
2348 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2349                                      unsigned int instr_idx)
2350 {
2351         int start_off = 0, end_off;
2352
2353         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2354                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2355                 return 0;
2356
2357         start_off = nand_subop_get_data_start_off(subop, instr_idx);
2358
2359         if (instr_idx == subop->ninstrs - 1 &&
2360             subop->last_instr_end_off)
2361                 end_off = subop->last_instr_end_off;
2362         else
2363                 end_off = subop->instrs[instr_idx].ctx.data.len;
2364
2365         return end_off - start_off;
2366 }
2367 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2368
2369 /**
2370  * nand_reset - Reset and initialize a NAND device
2371  * @chip: The NAND chip
2372  * @chipnr: Internal die id
2373  *
2374  * Save the timings data structure, then apply SDR timings mode 0 (see
2375  * nand_reset_data_interface for details), do the reset operation, and
2376  * apply back the previous timings.
2377  *
2378  * Returns 0 on success, a negative error code otherwise.
2379  */
2380 int nand_reset(struct nand_chip *chip, int chipnr)
2381 {
2382         struct nand_data_interface saved_data_intf = chip->data_interface;
2383         int ret;
2384
2385         ret = nand_reset_data_interface(chip, chipnr);
2386         if (ret)
2387                 return ret;
2388
2389         /*
2390          * The CS line has to be released before we can apply the new NAND
2391          * interface settings, hence this weird nand_select_target()
2392          * nand_deselect_target() dance.
2393          */
2394         nand_select_target(chip, chipnr);
2395         ret = nand_reset_op(chip);
2396         nand_deselect_target(chip);
2397         if (ret)
2398                 return ret;
2399
2400         /*
2401          * A nand_reset_data_interface() put both the NAND chip and the NAND
2402          * controller in timings mode 0. If the default mode for this chip is
2403          * also 0, no need to proceed to the change again. Plus, at probe time,
2404          * nand_setup_data_interface() uses ->set/get_features() which would
2405          * fail anyway as the parameter page is not available yet.
2406          */
2407         if (!chip->onfi_timing_mode_default)
2408                 return 0;
2409
2410         chip->data_interface = saved_data_intf;
2411         ret = nand_setup_data_interface(chip, chipnr);
2412         if (ret)
2413                 return ret;
2414
2415         return 0;
2416 }
2417 EXPORT_SYMBOL_GPL(nand_reset);
2418
2419 /**
2420  * nand_get_features - wrapper to perform a GET_FEATURE
2421  * @chip: NAND chip info structure
2422  * @addr: feature address
2423  * @subfeature_param: the subfeature parameters, a four bytes array
2424  *
2425  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2426  * operation cannot be handled.
2427  */
2428 int nand_get_features(struct nand_chip *chip, int addr,
2429                       u8 *subfeature_param)
2430 {
2431         if (!nand_supports_get_features(chip, addr))
2432                 return -ENOTSUPP;
2433
2434         if (chip->legacy.get_features)
2435                 return chip->legacy.get_features(chip, addr, subfeature_param);
2436
2437         return nand_get_features_op(chip, addr, subfeature_param);
2438 }
2439
2440 /**
2441  * nand_set_features - wrapper to perform a SET_FEATURE
2442  * @chip: NAND chip info structure
2443  * @addr: feature address
2444  * @subfeature_param: the subfeature parameters, a four bytes array
2445  *
2446  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2447  * operation cannot be handled.
2448  */
2449 int nand_set_features(struct nand_chip *chip, int addr,
2450                       u8 *subfeature_param)
2451 {
2452         if (!nand_supports_set_features(chip, addr))
2453                 return -ENOTSUPP;
2454
2455         if (chip->legacy.set_features)
2456                 return chip->legacy.set_features(chip, addr, subfeature_param);
2457
2458         return nand_set_features_op(chip, addr, subfeature_param);
2459 }
2460
2461 /**
2462  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2463  * @buf: buffer to test
2464  * @len: buffer length
2465  * @bitflips_threshold: maximum number of bitflips
2466  *
2467  * Check if a buffer contains only 0xff, which means the underlying region
2468  * has been erased and is ready to be programmed.
2469  * The bitflips_threshold specify the maximum number of bitflips before
2470  * considering the region is not erased.
2471  * Note: The logic of this function has been extracted from the memweight
2472  * implementation, except that nand_check_erased_buf function exit before
2473  * testing the whole buffer if the number of bitflips exceed the
2474  * bitflips_threshold value.
2475  *
2476  * Returns a positive number of bitflips less than or equal to
2477  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2478  * threshold.
2479  */
2480 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2481 {
2482         const unsigned char *bitmap = buf;
2483         int bitflips = 0;
2484         int weight;
2485
2486         for (; len && ((uintptr_t)bitmap) % sizeof(long);
2487              len--, bitmap++) {
2488                 weight = hweight8(*bitmap);
2489                 bitflips += BITS_PER_BYTE - weight;
2490                 if (unlikely(bitflips > bitflips_threshold))
2491                         return -EBADMSG;
2492         }
2493
2494         for (; len >= sizeof(long);
2495              len -= sizeof(long), bitmap += sizeof(long)) {
2496                 unsigned long d = *((unsigned long *)bitmap);
2497                 if (d == ~0UL)
2498                         continue;
2499                 weight = hweight_long(d);
2500                 bitflips += BITS_PER_LONG - weight;
2501                 if (unlikely(bitflips > bitflips_threshold))
2502                         return -EBADMSG;
2503         }
2504
2505         for (; len > 0; len--, bitmap++) {
2506                 weight = hweight8(*bitmap);
2507                 bitflips += BITS_PER_BYTE - weight;
2508                 if (unlikely(bitflips > bitflips_threshold))
2509                         return -EBADMSG;
2510         }
2511
2512         return bitflips;
2513 }
2514
2515 /**
2516  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2517  *                               0xff data
2518  * @data: data buffer to test
2519  * @datalen: data length
2520  * @ecc: ECC buffer
2521  * @ecclen: ECC length
2522  * @extraoob: extra OOB buffer
2523  * @extraooblen: extra OOB length
2524  * @bitflips_threshold: maximum number of bitflips
2525  *
2526  * Check if a data buffer and its associated ECC and OOB data contains only
2527  * 0xff pattern, which means the underlying region has been erased and is
2528  * ready to be programmed.
2529  * The bitflips_threshold specify the maximum number of bitflips before
2530  * considering the region as not erased.
2531  *
2532  * Note:
2533  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2534  *    different from the NAND page size. When fixing bitflips, ECC engines will
2535  *    report the number of errors per chunk, and the NAND core infrastructure
2536  *    expect you to return the maximum number of bitflips for the whole page.
2537  *    This is why you should always use this function on a single chunk and
2538  *    not on the whole page. After checking each chunk you should update your
2539  *    max_bitflips value accordingly.
2540  * 2/ When checking for bitflips in erased pages you should not only check
2541  *    the payload data but also their associated ECC data, because a user might
2542  *    have programmed almost all bits to 1 but a few. In this case, we
2543  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2544  *    this case.
2545  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2546  *    data are protected by the ECC engine.
2547  *    It could also be used if you support subpages and want to attach some
2548  *    extra OOB data to an ECC chunk.
2549  *
2550  * Returns a positive number of bitflips less than or equal to
2551  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2552  * threshold. In case of success, the passed buffers are filled with 0xff.
2553  */
2554 int nand_check_erased_ecc_chunk(void *data, int datalen,
2555                                 void *ecc, int ecclen,
2556                                 void *extraoob, int extraooblen,
2557                                 int bitflips_threshold)
2558 {
2559         int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2560
2561         data_bitflips = nand_check_erased_buf(data, datalen,
2562                                               bitflips_threshold);
2563         if (data_bitflips < 0)
2564                 return data_bitflips;
2565
2566         bitflips_threshold -= data_bitflips;
2567
2568         ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2569         if (ecc_bitflips < 0)
2570                 return ecc_bitflips;
2571
2572         bitflips_threshold -= ecc_bitflips;
2573
2574         extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2575                                                   bitflips_threshold);
2576         if (extraoob_bitflips < 0)
2577                 return extraoob_bitflips;
2578
2579         if (data_bitflips)
2580                 memset(data, 0xff, datalen);
2581
2582         if (ecc_bitflips)
2583                 memset(ecc, 0xff, ecclen);
2584
2585         if (extraoob_bitflips)
2586                 memset(extraoob, 0xff, extraooblen);
2587
2588         return data_bitflips + ecc_bitflips + extraoob_bitflips;
2589 }
2590 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2591
2592 /**
2593  * nand_read_page_raw_notsupp - dummy read raw page function
2594  * @chip: nand chip info structure
2595  * @buf: buffer to store read data
2596  * @oob_required: caller requires OOB data read to chip->oob_poi
2597  * @page: page number to read
2598  *
2599  * Returns -ENOTSUPP unconditionally.
2600  */
2601 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2602                                int oob_required, int page)
2603 {
2604         return -ENOTSUPP;
2605 }
2606
2607 /**
2608  * nand_read_page_raw - [INTERN] read raw page data without ecc
2609  * @chip: nand chip info structure
2610  * @buf: buffer to store read data
2611  * @oob_required: caller requires OOB data read to chip->oob_poi
2612  * @page: page number to read
2613  *
2614  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2615  */
2616 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2617                        int page)
2618 {
2619         struct mtd_info *mtd = nand_to_mtd(chip);
2620         int ret;
2621
2622         ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2623         if (ret)
2624                 return ret;
2625
2626         if (oob_required) {
2627                 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2628                                         false);
2629                 if (ret)
2630                         return ret;
2631         }
2632
2633         return 0;
2634 }
2635 EXPORT_SYMBOL(nand_read_page_raw);
2636
2637 /**
2638  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2639  * @chip: nand chip info structure
2640  * @buf: buffer to store read data
2641  * @oob_required: caller requires OOB data read to chip->oob_poi
2642  * @page: page number to read
2643  *
2644  * We need a special oob layout and handling even when OOB isn't used.
2645  */
2646 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2647                                        int oob_required, int page)
2648 {
2649         struct mtd_info *mtd = nand_to_mtd(chip);
2650         int eccsize = chip->ecc.size;
2651         int eccbytes = chip->ecc.bytes;
2652         uint8_t *oob = chip->oob_poi;
2653         int steps, size, ret;
2654
2655         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2656         if (ret)
2657                 return ret;
2658
2659         for (steps = chip->ecc.steps; steps > 0; steps--) {
2660                 ret = nand_read_data_op(chip, buf, eccsize, false);
2661                 if (ret)
2662                         return ret;
2663
2664                 buf += eccsize;
2665
2666                 if (chip->ecc.prepad) {
2667                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2668                                                 false);
2669                         if (ret)
2670                                 return ret;
2671
2672                         oob += chip->ecc.prepad;
2673                 }
2674
2675                 ret = nand_read_data_op(chip, oob, eccbytes, false);
2676                 if (ret)
2677                         return ret;
2678
2679                 oob += eccbytes;
2680
2681                 if (chip->ecc.postpad) {
2682                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2683                                                 false);
2684                         if (ret)
2685                                 return ret;
2686
2687                         oob += chip->ecc.postpad;
2688                 }
2689         }
2690
2691         size = mtd->oobsize - (oob - chip->oob_poi);
2692         if (size) {
2693                 ret = nand_read_data_op(chip, oob, size, false);
2694                 if (ret)
2695                         return ret;
2696         }
2697
2698         return 0;
2699 }
2700
2701 /**
2702  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2703  * @chip: nand chip info structure
2704  * @buf: buffer to store read data
2705  * @oob_required: caller requires OOB data read to chip->oob_poi
2706  * @page: page number to read
2707  */
2708 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2709                                 int oob_required, int page)
2710 {
2711         struct mtd_info *mtd = nand_to_mtd(chip);
2712         int i, eccsize = chip->ecc.size, ret;
2713         int eccbytes = chip->ecc.bytes;
2714         int eccsteps = chip->ecc.steps;
2715         uint8_t *p = buf;
2716         uint8_t *ecc_calc = chip->ecc.calc_buf;
2717         uint8_t *ecc_code = chip->ecc.code_buf;
2718         unsigned int max_bitflips = 0;
2719
2720         chip->ecc.read_page_raw(chip, buf, 1, page);
2721
2722         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2723                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2724
2725         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2726                                          chip->ecc.total);
2727         if (ret)
2728                 return ret;
2729
2730         eccsteps = chip->ecc.steps;
2731         p = buf;
2732
2733         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2734                 int stat;
2735
2736                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2737                 if (stat < 0) {
2738                         mtd->ecc_stats.failed++;
2739                 } else {
2740                         mtd->ecc_stats.corrected += stat;
2741                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2742                 }
2743         }
2744         return max_bitflips;
2745 }
2746
2747 /**
2748  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2749  * @chip: nand chip info structure
2750  * @data_offs: offset of requested data within the page
2751  * @readlen: data length
2752  * @bufpoi: buffer to store read data
2753  * @page: page number to read
2754  */
2755 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2756                              uint32_t readlen, uint8_t *bufpoi, int page)
2757 {
2758         struct mtd_info *mtd = nand_to_mtd(chip);
2759         int start_step, end_step, num_steps, ret;
2760         uint8_t *p;
2761         int data_col_addr, i, gaps = 0;
2762         int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2763         int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2764         int index, section = 0;
2765         unsigned int max_bitflips = 0;
2766         struct mtd_oob_region oobregion = { };
2767
2768         /* Column address within the page aligned to ECC size (256bytes) */
2769         start_step = data_offs / chip->ecc.size;
2770         end_step = (data_offs + readlen - 1) / chip->ecc.size;
2771         num_steps = end_step - start_step + 1;
2772         index = start_step * chip->ecc.bytes;
2773
2774         /* Data size aligned to ECC ecc.size */
2775         datafrag_len = num_steps * chip->ecc.size;
2776         eccfrag_len = num_steps * chip->ecc.bytes;
2777
2778         data_col_addr = start_step * chip->ecc.size;
2779         /* If we read not a page aligned data */
2780         p = bufpoi + data_col_addr;
2781         ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2782         if (ret)
2783                 return ret;
2784
2785         /* Calculate ECC */
2786         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2787                 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2788
2789         /*
2790          * The performance is faster if we position offsets according to
2791          * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2792          */
2793         ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2794         if (ret)
2795                 return ret;
2796
2797         if (oobregion.length < eccfrag_len)
2798                 gaps = 1;
2799
2800         if (gaps) {
2801                 ret = nand_change_read_column_op(chip, mtd->writesize,
2802                                                  chip->oob_poi, mtd->oobsize,
2803                                                  false);
2804                 if (ret)
2805                         return ret;
2806         } else {
2807                 /*
2808                  * Send the command to read the particular ECC bytes take care
2809                  * about buswidth alignment in read_buf.
2810                  */
2811                 aligned_pos = oobregion.offset & ~(busw - 1);
2812                 aligned_len = eccfrag_len;
2813                 if (oobregion.offset & (busw - 1))
2814                         aligned_len++;
2815                 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2816                     (busw - 1))
2817                         aligned_len++;
2818
2819                 ret = nand_change_read_column_op(chip,
2820                                                  mtd->writesize + aligned_pos,
2821                                                  &chip->oob_poi[aligned_pos],
2822                                                  aligned_len, false);
2823                 if (ret)
2824                         return ret;
2825         }
2826
2827         ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2828                                          chip->oob_poi, index, eccfrag_len);
2829         if (ret)
2830                 return ret;
2831
2832         p = bufpoi + data_col_addr;
2833         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2834                 int stat;
2835
2836                 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2837                                          &chip->ecc.calc_buf[i]);
2838                 if (stat == -EBADMSG &&
2839                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2840                         /* check for empty pages with bitflips */
2841                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2842                                                 &chip->ecc.code_buf[i],
2843                                                 chip->ecc.bytes,
2844                                                 NULL, 0,
2845                                                 chip->ecc.strength);
2846                 }
2847
2848                 if (stat < 0) {
2849                         mtd->ecc_stats.failed++;
2850                 } else {
2851                         mtd->ecc_stats.corrected += stat;
2852                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2853                 }
2854         }
2855         return max_bitflips;
2856 }
2857
2858 /**
2859  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2860  * @chip: nand chip info structure
2861  * @buf: buffer to store read data
2862  * @oob_required: caller requires OOB data read to chip->oob_poi
2863  * @page: page number to read
2864  *
2865  * Not for syndrome calculating ECC controllers which need a special oob layout.
2866  */
2867 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2868                                 int oob_required, int page)
2869 {
2870         struct mtd_info *mtd = nand_to_mtd(chip);
2871         int i, eccsize = chip->ecc.size, ret;
2872         int eccbytes = chip->ecc.bytes;
2873         int eccsteps = chip->ecc.steps;
2874         uint8_t *p = buf;
2875         uint8_t *ecc_calc = chip->ecc.calc_buf;
2876         uint8_t *ecc_code = chip->ecc.code_buf;
2877         unsigned int max_bitflips = 0;
2878
2879         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2880         if (ret)
2881                 return ret;
2882
2883         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2884                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2885
2886                 ret = nand_read_data_op(chip, p, eccsize, false);
2887                 if (ret)
2888                         return ret;
2889
2890                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2891         }
2892
2893         ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2894         if (ret)
2895                 return ret;
2896
2897         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2898                                          chip->ecc.total);
2899         if (ret)
2900                 return ret;
2901
2902         eccsteps = chip->ecc.steps;
2903         p = buf;
2904
2905         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2906                 int stat;
2907
2908                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2909                 if (stat == -EBADMSG &&
2910                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2911                         /* check for empty pages with bitflips */
2912                         stat = nand_check_erased_ecc_chunk(p, eccsize,
2913                                                 &ecc_code[i], eccbytes,
2914                                                 NULL, 0,
2915                                                 chip->ecc.strength);
2916                 }
2917
2918                 if (stat < 0) {
2919                         mtd->ecc_stats.failed++;
2920                 } else {
2921                         mtd->ecc_stats.corrected += stat;
2922                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2923                 }
2924         }
2925         return max_bitflips;
2926 }
2927
2928 /**
2929  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2930  * @chip: nand chip info structure
2931  * @buf: buffer to store read data
2932  * @oob_required: caller requires OOB data read to chip->oob_poi
2933  * @page: page number to read
2934  *
2935  * Hardware ECC for large page chips, require OOB to be read first. For this
2936  * ECC mode, the write_page method is re-used from ECC_HW. These methods
2937  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2938  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2939  * the data area, by overwriting the NAND manufacturer bad block markings.
2940  */
2941 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2942                                           int oob_required, int page)
2943 {
2944         struct mtd_info *mtd = nand_to_mtd(chip);
2945         int i, eccsize = chip->ecc.size, ret;
2946         int eccbytes = chip->ecc.bytes;
2947         int eccsteps = chip->ecc.steps;
2948         uint8_t *p = buf;
2949         uint8_t *ecc_code = chip->ecc.code_buf;
2950         uint8_t *ecc_calc = chip->ecc.calc_buf;
2951         unsigned int max_bitflips = 0;
2952
2953         /* Read the OOB area first */
2954         ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2955         if (ret)
2956                 return ret;
2957
2958         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2959         if (ret)
2960                 return ret;
2961
2962         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2963                                          chip->ecc.total);
2964         if (ret)
2965                 return ret;
2966
2967         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2968                 int stat;
2969
2970                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2971
2972                 ret = nand_read_data_op(chip, p, eccsize, false);
2973                 if (ret)
2974                         return ret;
2975
2976                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2977
2978                 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2979                 if (stat == -EBADMSG &&
2980                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2981                         /* check for empty pages with bitflips */
2982                         stat = nand_check_erased_ecc_chunk(p, eccsize,
2983                                                 &ecc_code[i], eccbytes,
2984                                                 NULL, 0,
2985                                                 chip->ecc.strength);
2986                 }
2987
2988                 if (stat < 0) {
2989                         mtd->ecc_stats.failed++;
2990                 } else {
2991                         mtd->ecc_stats.corrected += stat;
2992                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2993                 }
2994         }
2995         return max_bitflips;
2996 }
2997
2998 /**
2999  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
3000  * @chip: nand chip info structure
3001  * @buf: buffer to store read data
3002  * @oob_required: caller requires OOB data read to chip->oob_poi
3003  * @page: page number to read
3004  *
3005  * The hw generator calculates the error syndrome automatically. Therefore we
3006  * need a special oob layout and handling.
3007  */
3008 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3009                                    int oob_required, int page)
3010 {
3011         struct mtd_info *mtd = nand_to_mtd(chip);
3012         int ret, i, eccsize = chip->ecc.size;
3013         int eccbytes = chip->ecc.bytes;
3014         int eccsteps = chip->ecc.steps;
3015         int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3016         uint8_t *p = buf;
3017         uint8_t *oob = chip->oob_poi;
3018         unsigned int max_bitflips = 0;
3019
3020         ret = nand_read_page_op(chip, page, 0, NULL, 0);
3021         if (ret)
3022                 return ret;
3023
3024         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3025                 int stat;
3026
3027                 chip->ecc.hwctl(chip, NAND_ECC_READ);
3028
3029                 ret = nand_read_data_op(chip, p, eccsize, false);
3030                 if (ret)
3031                         return ret;
3032
3033                 if (chip->ecc.prepad) {
3034                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3035                                                 false);
3036                         if (ret)
3037                                 return ret;
3038
3039                         oob += chip->ecc.prepad;
3040                 }
3041
3042                 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3043
3044                 ret = nand_read_data_op(chip, oob, eccbytes, false);
3045                 if (ret)
3046                         return ret;
3047
3048                 stat = chip->ecc.correct(chip, p, oob, NULL);
3049
3050                 oob += eccbytes;
3051
3052                 if (chip->ecc.postpad) {
3053                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3054                                                 false);
3055                         if (ret)
3056                                 return ret;
3057
3058                         oob += chip->ecc.postpad;
3059                 }
3060
3061                 if (stat == -EBADMSG &&
3062                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3063                         /* check for empty pages with bitflips */
3064                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3065                                                            oob - eccpadbytes,
3066                                                            eccpadbytes,
3067                                                            NULL, 0,
3068                                                            chip->ecc.strength);
3069                 }
3070
3071                 if (stat < 0) {
3072                         mtd->ecc_stats.failed++;
3073                 } else {
3074                         mtd->ecc_stats.corrected += stat;
3075                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3076                 }
3077         }
3078
3079         /* Calculate remaining oob bytes */
3080         i = mtd->oobsize - (oob - chip->oob_poi);
3081         if (i) {
3082                 ret = nand_read_data_op(chip, oob, i, false);
3083                 if (ret)
3084                         return ret;
3085         }
3086
3087         return max_bitflips;
3088 }
3089
3090 /**
3091  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3092  * @chip: NAND chip object
3093  * @oob: oob destination address
3094  * @ops: oob ops structure
3095  * @len: size of oob to transfer
3096  */
3097 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3098                                   struct mtd_oob_ops *ops, size_t len)
3099 {
3100         struct mtd_info *mtd = nand_to_mtd(chip);
3101         int ret;
3102
3103         switch (ops->mode) {
3104
3105         case MTD_OPS_PLACE_OOB:
3106         case MTD_OPS_RAW:
3107                 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3108                 return oob + len;
3109
3110         case MTD_OPS_AUTO_OOB:
3111                 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3112                                                   ops->ooboffs, len);
3113                 BUG_ON(ret);
3114                 return oob + len;
3115
3116         default:
3117                 BUG();
3118         }
3119         return NULL;
3120 }
3121
3122 /**
3123  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3124  * @chip: NAND chip object
3125  * @retry_mode: the retry mode to use
3126  *
3127  * Some vendors supply a special command to shift the Vt threshold, to be used
3128  * when there are too many bitflips in a page (i.e., ECC error). After setting
3129  * a new threshold, the host should retry reading the page.
3130  */
3131 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3132 {
3133         pr_debug("setting READ RETRY mode %d\n", retry_mode);
3134
3135         if (retry_mode >= chip->read_retries)
3136                 return -EINVAL;
3137
3138         if (!chip->setup_read_retry)
3139                 return -EOPNOTSUPP;
3140
3141         return chip->setup_read_retry(chip, retry_mode);
3142 }
3143
3144 static void nand_wait_readrdy(struct nand_chip *chip)
3145 {
3146         const struct nand_sdr_timings *sdr;
3147
3148         if (!(chip->options & NAND_NEED_READRDY))
3149                 return;
3150
3151         sdr = nand_get_sdr_timings(&chip->data_interface);
3152         WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3153 }
3154
3155 /**
3156  * nand_do_read_ops - [INTERN] Read data with ECC
3157  * @chip: NAND chip object
3158  * @from: offset to read from
3159  * @ops: oob ops structure
3160  *
3161  * Internal function. Called with chip held.
3162  */
3163 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3164                             struct mtd_oob_ops *ops)
3165 {
3166         int chipnr, page, realpage, col, bytes, aligned, oob_required;
3167         struct mtd_info *mtd = nand_to_mtd(chip);
3168         int ret = 0;
3169         uint32_t readlen = ops->len;
3170         uint32_t oobreadlen = ops->ooblen;
3171         uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3172
3173         uint8_t *bufpoi, *oob, *buf;
3174         int use_bufpoi;
3175         unsigned int max_bitflips = 0;
3176         int retry_mode = 0;
3177         bool ecc_fail = false;
3178
3179         chipnr = (int)(from >> chip->chip_shift);
3180         nand_select_target(chip, chipnr);
3181
3182         realpage = (int)(from >> chip->page_shift);
3183         page = realpage & chip->pagemask;
3184
3185         col = (int)(from & (mtd->writesize - 1));
3186
3187         buf = ops->datbuf;
3188         oob = ops->oobbuf;
3189         oob_required = oob ? 1 : 0;
3190
3191         while (1) {
3192                 unsigned int ecc_failures = mtd->ecc_stats.failed;
3193
3194                 bytes = min(mtd->writesize - col, readlen);
3195                 aligned = (bytes == mtd->writesize);
3196
3197                 if (!aligned)
3198                         use_bufpoi = 1;
3199                 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3200                         use_bufpoi = !virt_addr_valid(buf) ||
3201                                      !IS_ALIGNED((unsigned long)buf,
3202                                                  chip->buf_align);
3203                 else
3204                         use_bufpoi = 0;
3205
3206                 /* Is the current page in the buffer? */
3207                 if (realpage != chip->pagebuf || oob) {
3208                         bufpoi = use_bufpoi ? chip->data_buf : buf;
3209
3210                         if (use_bufpoi && aligned)
3211                                 pr_debug("%s: using read bounce buffer for buf@%p\n",
3212                                                  __func__, buf);
3213
3214 read_retry:
3215                         /*
3216                          * Now read the page into the buffer.  Absent an error,
3217                          * the read methods return max bitflips per ecc step.
3218                          */
3219                         if (unlikely(ops->mode == MTD_OPS_RAW))
3220                                 ret = chip->ecc.read_page_raw(chip, bufpoi,
3221                                                               oob_required,
3222                                                               page);
3223                         else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3224                                  !oob)
3225                                 ret = chip->ecc.read_subpage(chip, col, bytes,
3226                                                              bufpoi, page);
3227                         else
3228                                 ret = chip->ecc.read_page(chip, bufpoi,
3229                                                           oob_required, page);
3230                         if (ret < 0) {
3231                                 if (use_bufpoi)
3232                                         /* Invalidate page cache */
3233                                         chip->pagebuf = -1;
3234                                 break;
3235                         }
3236
3237                         /* Transfer not aligned data */
3238                         if (use_bufpoi) {
3239                                 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3240                                     !(mtd->ecc_stats.failed - ecc_failures) &&
3241                                     (ops->mode != MTD_OPS_RAW)) {
3242                                         chip->pagebuf = realpage;
3243                                         chip->pagebuf_bitflips = ret;
3244                                 } else {
3245                                         /* Invalidate page cache */
3246                                         chip->pagebuf = -1;
3247                                 }
3248                                 memcpy(buf, chip->data_buf + col, bytes);
3249                         }
3250
3251                         if (unlikely(oob)) {
3252                                 int toread = min(oobreadlen, max_oobsize);
3253
3254                                 if (toread) {
3255                                         oob = nand_transfer_oob(chip, oob, ops,
3256                                                                 toread);
3257                                         oobreadlen -= toread;
3258                                 }
3259                         }
3260
3261                         nand_wait_readrdy(chip);
3262
3263                         if (mtd->ecc_stats.failed - ecc_failures) {
3264                                 if (retry_mode + 1 < chip->read_retries) {
3265                                         retry_mode++;
3266                                         ret = nand_setup_read_retry(chip,
3267                                                         retry_mode);
3268                                         if (ret < 0)
3269                                                 break;
3270
3271                                         /* Reset failures; retry */
3272                                         mtd->ecc_stats.failed = ecc_failures;
3273                                         goto read_retry;
3274                                 } else {
3275                                         /* No more retry modes; real failure */
3276                                         ecc_fail = true;
3277                                 }
3278                         }
3279
3280                         buf += bytes;
3281                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
3282                 } else {
3283                         memcpy(buf, chip->data_buf + col, bytes);
3284                         buf += bytes;
3285                         max_bitflips = max_t(unsigned int, max_bitflips,
3286                                              chip->pagebuf_bitflips);
3287                 }
3288
3289                 readlen -= bytes;
3290
3291                 /* Reset to retry mode 0 */
3292                 if (retry_mode) {
3293                         ret = nand_setup_read_retry(chip, 0);
3294                         if (ret < 0)
3295                                 break;
3296                         retry_mode = 0;
3297                 }
3298
3299                 if (!readlen)
3300                         break;
3301
3302                 /* For subsequent reads align to page boundary */
3303                 col = 0;
3304                 /* Increment page address */
3305                 realpage++;
3306
3307                 page = realpage & chip->pagemask;
3308                 /* Check, if we cross a chip boundary */
3309                 if (!page) {
3310                         chipnr++;
3311                         nand_deselect_target(chip);
3312                         nand_select_target(chip, chipnr);
3313                 }
3314         }
3315         nand_deselect_target(chip);
3316
3317         ops->retlen = ops->len - (size_t) readlen;
3318         if (oob)
3319                 ops->oobretlen = ops->ooblen - oobreadlen;
3320
3321         if (ret < 0)
3322                 return ret;
3323
3324         if (ecc_fail)
3325                 return -EBADMSG;
3326
3327         return max_bitflips;
3328 }
3329
3330 /**
3331  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3332  * @chip: nand chip info structure
3333  * @page: page number to read
3334  */
3335 int nand_read_oob_std(struct nand_chip *chip, int page)
3336 {
3337         struct mtd_info *mtd = nand_to_mtd(chip);
3338
3339         return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3340 }
3341 EXPORT_SYMBOL(nand_read_oob_std);
3342
3343 /**
3344  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3345  *                          with syndromes
3346  * @chip: nand chip info structure
3347  * @page: page number to read
3348  */
3349 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3350 {
3351         struct mtd_info *mtd = nand_to_mtd(chip);
3352         int length = mtd->oobsize;
3353         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3354         int eccsize = chip->ecc.size;
3355         uint8_t *bufpoi = chip->oob_poi;
3356         int i, toread, sndrnd = 0, pos, ret;
3357
3358         ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3359         if (ret)
3360                 return ret;
3361
3362         for (i = 0; i < chip->ecc.steps; i++) {
3363                 if (sndrnd) {
3364                         int ret;
3365
3366                         pos = eccsize + i * (eccsize + chunk);
3367                         if (mtd->writesize > 512)
3368                                 ret = nand_change_read_column_op(chip, pos,
3369                                                                  NULL, 0,
3370                                                                  false);
3371                         else
3372                                 ret = nand_read_page_op(chip, page, pos, NULL,
3373                                                         0);
3374
3375                         if (ret)
3376                                 return ret;
3377                 } else
3378                         sndrnd = 1;
3379                 toread = min_t(int, length, chunk);
3380
3381                 ret = nand_read_data_op(chip, bufpoi, toread, false);
3382                 if (ret)
3383                         return ret;
3384
3385                 bufpoi += toread;
3386                 length -= toread;
3387         }
3388         if (length > 0) {
3389                 ret = nand_read_data_op(chip, bufpoi, length, false);
3390                 if (ret)
3391                         return ret;
3392         }
3393
3394         return 0;
3395 }
3396
3397 /**
3398  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3399  * @chip: nand chip info structure
3400  * @page: page number to write
3401  */
3402 int nand_write_oob_std(struct nand_chip *chip, int page)
3403 {
3404         struct mtd_info *mtd = nand_to_mtd(chip);
3405
3406         return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3407                                  mtd->oobsize);
3408 }
3409 EXPORT_SYMBOL(nand_write_oob_std);
3410
3411 /**
3412  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3413  *                           with syndrome - only for large page flash
3414  * @chip: nand chip info structure
3415  * @page: page number to write
3416  */
3417 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3418 {
3419         struct mtd_info *mtd = nand_to_mtd(chip);
3420         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3421         int eccsize = chip->ecc.size, length = mtd->oobsize;
3422         int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3423         const uint8_t *bufpoi = chip->oob_poi;
3424
3425         /*
3426          * data-ecc-data-ecc ... ecc-oob
3427          * or
3428          * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3429          */
3430         if (!chip->ecc.prepad && !chip->ecc.postpad) {
3431                 pos = steps * (eccsize + chunk);
3432                 steps = 0;
3433         } else
3434                 pos = eccsize;
3435
3436         ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3437         if (ret)
3438                 return ret;
3439
3440         for (i = 0; i < steps; i++) {
3441                 if (sndcmd) {
3442                         if (mtd->writesize <= 512) {
3443                                 uint32_t fill = 0xFFFFFFFF;
3444
3445                                 len = eccsize;
3446                                 while (len > 0) {
3447                                         int num = min_t(int, len, 4);
3448
3449                                         ret = nand_write_data_op(chip, &fill,
3450                                                                  num, false);
3451                                         if (ret)
3452                                                 return ret;
3453
3454                                         len -= num;
3455                                 }
3456                         } else {
3457                                 pos = eccsize + i * (eccsize + chunk);
3458                                 ret = nand_change_write_column_op(chip, pos,
3459                                                                   NULL, 0,
3460                                                                   false);
3461                                 if (ret)
3462                                         return ret;
3463                         }
3464                 } else
3465                         sndcmd = 1;
3466                 len = min_t(int, length, chunk);
3467
3468                 ret = nand_write_data_op(chip, bufpoi, len, false);
3469                 if (ret)
3470                         return ret;
3471
3472                 bufpoi += len;
3473                 length -= len;
3474         }
3475         if (length > 0) {
3476                 ret = nand_write_data_op(chip, bufpoi, length, false);
3477                 if (ret)
3478                         return ret;
3479         }
3480
3481         return nand_prog_page_end_op(chip);
3482 }
3483
3484 /**
3485  * nand_do_read_oob - [INTERN] NAND read out-of-band
3486  * @chip: NAND chip object
3487  * @from: offset to read from
3488  * @ops: oob operations description structure
3489  *
3490  * NAND read out-of-band data from the spare area.
3491  */
3492 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3493                             struct mtd_oob_ops *ops)
3494 {
3495         struct mtd_info *mtd = nand_to_mtd(chip);
3496         unsigned int max_bitflips = 0;
3497         int page, realpage, chipnr;
3498         struct mtd_ecc_stats stats;
3499         int readlen = ops->ooblen;
3500         int len;
3501         uint8_t *buf = ops->oobbuf;
3502         int ret = 0;
3503
3504         pr_debug("%s: from = 0x%08Lx, len = %i\n",
3505                         __func__, (unsigned long long)from, readlen);
3506
3507         stats = mtd->ecc_stats;
3508
3509         len = mtd_oobavail(mtd, ops);
3510
3511         chipnr = (int)(from >> chip->chip_shift);
3512         nand_select_target(chip, chipnr);
3513
3514         /* Shift to get page */
3515         realpage = (int)(from >> chip->page_shift);
3516         page = realpage & chip->pagemask;
3517
3518         while (1) {
3519                 if (ops->mode == MTD_OPS_RAW)
3520                         ret = chip->ecc.read_oob_raw(chip, page);
3521                 else
3522                         ret = chip->ecc.read_oob(chip, page);
3523
3524                 if (ret < 0)
3525                         break;
3526
3527                 len = min(len, readlen);
3528                 buf = nand_transfer_oob(chip, buf, ops, len);
3529
3530                 nand_wait_readrdy(chip);
3531
3532                 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3533
3534                 readlen -= len;
3535                 if (!readlen)
3536                         break;
3537
3538                 /* Increment page address */
3539                 realpage++;
3540
3541                 page = realpage & chip->pagemask;
3542                 /* Check, if we cross a chip boundary */
3543                 if (!page) {
3544                         chipnr++;
3545                         nand_deselect_target(chip);
3546                         nand_select_target(chip, chipnr);
3547                 }
3548         }
3549         nand_deselect_target(chip);
3550
3551         ops->oobretlen = ops->ooblen - readlen;
3552
3553         if (ret < 0)
3554                 return ret;
3555
3556         if (mtd->ecc_stats.failed - stats.failed)
3557                 return -EBADMSG;
3558
3559         return max_bitflips;
3560 }
3561
3562 /**
3563  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3564  * @mtd: MTD device structure
3565  * @from: offset to read from
3566  * @ops: oob operation description structure
3567  *
3568  * NAND read data and/or out-of-band data.
3569  */
3570 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3571                          struct mtd_oob_ops *ops)
3572 {
3573         struct nand_chip *chip = mtd_to_nand(mtd);
3574         int ret;
3575
3576         ops->retlen = 0;
3577
3578         if (ops->mode != MTD_OPS_PLACE_OOB &&
3579             ops->mode != MTD_OPS_AUTO_OOB &&
3580             ops->mode != MTD_OPS_RAW)
3581                 return -ENOTSUPP;
3582
3583         nand_get_device(chip, FL_READING);
3584
3585         if (!ops->datbuf)
3586                 ret = nand_do_read_oob(chip, from, ops);
3587         else
3588                 ret = nand_do_read_ops(chip, from, ops);
3589
3590         nand_release_device(chip);
3591         return ret;
3592 }
3593
3594 /**
3595  * nand_write_page_raw_notsupp - dummy raw page write function
3596  * @chip: nand chip info structure
3597  * @buf: data buffer
3598  * @oob_required: must write chip->oob_poi to OOB
3599  * @page: page number to write
3600  *
3601  * Returns -ENOTSUPP unconditionally.
3602  */
3603 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3604                                 int oob_required, int page)
3605 {
3606         return -ENOTSUPP;
3607 }
3608
3609 /**
3610  * nand_write_page_raw - [INTERN] raw page write function
3611  * @chip: nand chip info structure
3612  * @buf: data buffer
3613  * @oob_required: must write chip->oob_poi to OOB
3614  * @page: page number to write
3615  *
3616  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3617  */
3618 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3619                         int oob_required, int page)
3620 {
3621         struct mtd_info *mtd = nand_to_mtd(chip);
3622         int ret;
3623
3624         ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3625         if (ret)
3626                 return ret;
3627
3628         if (oob_required) {
3629                 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3630                                          false);
3631                 if (ret)
3632                         return ret;
3633         }
3634
3635         return nand_prog_page_end_op(chip);
3636 }
3637 EXPORT_SYMBOL(nand_write_page_raw);
3638
3639 /**
3640  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3641  * @chip: nand chip info structure
3642  * @buf: data buffer
3643  * @oob_required: must write chip->oob_poi to OOB
3644  * @page: page number to write
3645  *
3646  * We need a special oob layout and handling even when ECC isn't checked.
3647  */
3648 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3649                                         const uint8_t *buf, int oob_required,
3650                                         int page)
3651 {
3652         struct mtd_info *mtd = nand_to_mtd(chip);
3653         int eccsize = chip->ecc.size;
3654         int eccbytes = chip->ecc.bytes;
3655         uint8_t *oob = chip->oob_poi;
3656         int steps, size, ret;
3657
3658         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3659         if (ret)
3660                 return ret;
3661
3662         for (steps = chip->ecc.steps; steps > 0; steps--) {
3663                 ret = nand_write_data_op(chip, buf, eccsize, false);
3664                 if (ret)
3665                         return ret;
3666
3667                 buf += eccsize;
3668
3669                 if (chip->ecc.prepad) {
3670                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3671                                                  false);
3672                         if (ret)
3673                                 return ret;
3674
3675                         oob += chip->ecc.prepad;
3676                 }
3677
3678                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3679                 if (ret)
3680                         return ret;
3681
3682                 oob += eccbytes;
3683
3684                 if (chip->ecc.postpad) {
3685                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3686                                                  false);
3687                         if (ret)
3688                                 return ret;
3689
3690                         oob += chip->ecc.postpad;
3691                 }
3692         }
3693
3694         size = mtd->oobsize - (oob - chip->oob_poi);
3695         if (size) {
3696                 ret = nand_write_data_op(chip, oob, size, false);
3697                 if (ret)
3698                         return ret;
3699         }
3700
3701         return nand_prog_page_end_op(chip);
3702 }
3703 /**
3704  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3705  * @chip: nand chip info structure
3706  * @buf: data buffer
3707  * @oob_required: must write chip->oob_poi to OOB
3708  * @page: page number to write
3709  */
3710 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3711                                  int oob_required, int page)
3712 {
3713         struct mtd_info *mtd = nand_to_mtd(chip);
3714         int i, eccsize = chip->ecc.size, ret;
3715         int eccbytes = chip->ecc.bytes;
3716         int eccsteps = chip->ecc.steps;
3717         uint8_t *ecc_calc = chip->ecc.calc_buf;
3718         const uint8_t *p = buf;
3719
3720         /* Software ECC calculation */
3721         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3722                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3723
3724         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3725                                          chip->ecc.total);
3726         if (ret)
3727                 return ret;
3728
3729         return chip->ecc.write_page_raw(chip, buf, 1, page);
3730 }
3731
3732 /**
3733  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3734  * @chip: nand chip info structure
3735  * @buf: data buffer
3736  * @oob_required: must write chip->oob_poi to OOB
3737  * @page: page number to write
3738  */
3739 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3740                                  int oob_required, int page)
3741 {
3742         struct mtd_info *mtd = nand_to_mtd(chip);
3743         int i, eccsize = chip->ecc.size, ret;
3744         int eccbytes = chip->ecc.bytes;
3745         int eccsteps = chip->ecc.steps;
3746         uint8_t *ecc_calc = chip->ecc.calc_buf;
3747         const uint8_t *p = buf;
3748
3749         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3750         if (ret)
3751                 return ret;
3752
3753         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3754                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3755
3756                 ret = nand_write_data_op(chip, p, eccsize, false);
3757                 if (ret)
3758                         return ret;
3759
3760                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3761         }
3762
3763         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3764                                          chip->ecc.total);
3765         if (ret)
3766                 return ret;
3767
3768         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3769         if (ret)
3770                 return ret;
3771
3772         return nand_prog_page_end_op(chip);
3773 }
3774
3775
3776 /**
3777  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3778  * @chip:       nand chip info structure
3779  * @offset:     column address of subpage within the page
3780  * @data_len:   data length
3781  * @buf:        data buffer
3782  * @oob_required: must write chip->oob_poi to OOB
3783  * @page: page number to write
3784  */
3785 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3786                                     uint32_t data_len, const uint8_t *buf,
3787                                     int oob_required, int page)
3788 {
3789         struct mtd_info *mtd = nand_to_mtd(chip);
3790         uint8_t *oob_buf  = chip->oob_poi;
3791         uint8_t *ecc_calc = chip->ecc.calc_buf;
3792         int ecc_size      = chip->ecc.size;
3793         int ecc_bytes     = chip->ecc.bytes;
3794         int ecc_steps     = chip->ecc.steps;
3795         uint32_t start_step = offset / ecc_size;
3796         uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3797         int oob_bytes       = mtd->oobsize / ecc_steps;
3798         int step, ret;
3799
3800         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3801         if (ret)
3802                 return ret;
3803
3804         for (step = 0; step < ecc_steps; step++) {
3805                 /* configure controller for WRITE access */
3806                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3807
3808                 /* write data (untouched subpages already masked by 0xFF) */
3809                 ret = nand_write_data_op(chip, buf, ecc_size, false);
3810                 if (ret)
3811                         return ret;
3812
3813                 /* mask ECC of un-touched subpages by padding 0xFF */
3814                 if ((step < start_step) || (step > end_step))
3815                         memset(ecc_calc, 0xff, ecc_bytes);
3816                 else
3817                         chip->ecc.calculate(chip, buf, ecc_calc);
3818
3819                 /* mask OOB of un-touched subpages by padding 0xFF */
3820                 /* if oob_required, preserve OOB metadata of written subpage */
3821                 if (!oob_required || (step < start_step) || (step > end_step))
3822                         memset(oob_buf, 0xff, oob_bytes);
3823
3824                 buf += ecc_size;
3825                 ecc_calc += ecc_bytes;
3826                 oob_buf  += oob_bytes;
3827         }
3828
3829         /* copy calculated ECC for whole page to chip->buffer->oob */
3830         /* this include masked-value(0xFF) for unwritten subpages */
3831         ecc_calc = chip->ecc.calc_buf;
3832         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3833                                          chip->ecc.total);
3834         if (ret)
3835                 return ret;
3836
3837         /* write OOB buffer to NAND device */
3838         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3839         if (ret)
3840                 return ret;
3841
3842         return nand_prog_page_end_op(chip);
3843 }
3844
3845
3846 /**
3847  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3848  * @chip: nand chip info structure
3849  * @buf: data buffer
3850  * @oob_required: must write chip->oob_poi to OOB
3851  * @page: page number to write
3852  *
3853  * The hw generator calculates the error syndrome automatically. Therefore we
3854  * need a special oob layout and handling.
3855  */
3856 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3857                                     int oob_required, int page)
3858 {
3859         struct mtd_info *mtd = nand_to_mtd(chip);
3860         int i, eccsize = chip->ecc.size;
3861         int eccbytes = chip->ecc.bytes;
3862         int eccsteps = chip->ecc.steps;
3863         const uint8_t *p = buf;
3864         uint8_t *oob = chip->oob_poi;
3865         int ret;
3866
3867         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3868         if (ret)
3869                 return ret;
3870
3871         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3872                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3873
3874                 ret = nand_write_data_op(chip, p, eccsize, false);
3875                 if (ret)
3876                         return ret;
3877
3878                 if (chip->ecc.prepad) {
3879                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3880                                                  false);
3881                         if (ret)
3882                                 return ret;
3883
3884                         oob += chip->ecc.prepad;
3885                 }
3886
3887                 chip->ecc.calculate(chip, p, oob);
3888
3889                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3890                 if (ret)
3891                         return ret;
3892
3893                 oob += eccbytes;
3894
3895                 if (chip->ecc.postpad) {
3896                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3897                                                  false);
3898                         if (ret)
3899                                 return ret;
3900
3901                         oob += chip->ecc.postpad;
3902                 }
3903         }
3904
3905         /* Calculate remaining oob bytes */
3906         i = mtd->oobsize - (oob - chip->oob_poi);
3907         if (i) {
3908                 ret = nand_write_data_op(chip, oob, i, false);
3909                 if (ret)
3910                         return ret;
3911         }
3912
3913         return nand_prog_page_end_op(chip);
3914 }
3915
3916 /**
3917  * nand_write_page - write one page
3918  * @chip: NAND chip descriptor
3919  * @offset: address offset within the page
3920  * @data_len: length of actual data to be written
3921  * @buf: the data to write
3922  * @oob_required: must write chip->oob_poi to OOB
3923  * @page: page number to write
3924  * @raw: use _raw version of write_page
3925  */
3926 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3927                            int data_len, const uint8_t *buf, int oob_required,
3928                            int page, int raw)
3929 {
3930         struct mtd_info *mtd = nand_to_mtd(chip);
3931         int status, subpage;
3932
3933         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3934                 chip->ecc.write_subpage)
3935                 subpage = offset || (data_len < mtd->writesize);
3936         else
3937                 subpage = 0;
3938
3939         if (unlikely(raw))
3940                 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3941                                                   page);
3942         else if (subpage)
3943                 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3944                                                  oob_required, page);
3945         else
3946                 status = chip->ecc.write_page(chip, buf, oob_required, page);
3947
3948         if (status < 0)
3949                 return status;
3950
3951         return 0;
3952 }
3953
3954 #define NOTALIGNED(x)   ((x & (chip->subpagesize - 1)) != 0)
3955
3956 /**
3957  * nand_do_write_ops - [INTERN] NAND write with ECC
3958  * @chip: NAND chip object
3959  * @to: offset to write to
3960  * @ops: oob operations description structure
3961  *
3962  * NAND write with ECC.
3963  */
3964 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3965                              struct mtd_oob_ops *ops)
3966 {
3967         struct mtd_info *mtd = nand_to_mtd(chip);
3968         int chipnr, realpage, page, column;
3969         uint32_t writelen = ops->len;
3970
3971         uint32_t oobwritelen = ops->ooblen;
3972         uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3973
3974         uint8_t *oob = ops->oobbuf;
3975         uint8_t *buf = ops->datbuf;
3976         int ret;
3977         int oob_required = oob ? 1 : 0;
3978
3979         ops->retlen = 0;
3980         if (!writelen)
3981                 return 0;
3982
3983         /* Reject writes, which are not page aligned */
3984         if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3985                 pr_notice("%s: attempt to write non page aligned data\n",
3986                            __func__);
3987                 return -EINVAL;
3988         }
3989
3990         column = to & (mtd->writesize - 1);
3991
3992         chipnr = (int)(to >> chip->chip_shift);
3993         nand_select_target(chip, chipnr);
3994
3995         /* Check, if it is write protected */
3996         if (nand_check_wp(chip)) {
3997                 ret = -EIO;
3998                 goto err_out;
3999         }
4000
4001         realpage = (int)(to >> chip->page_shift);
4002         page = realpage & chip->pagemask;
4003
4004         /* Invalidate the page cache, when we write to the cached page */
4005         if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
4006             ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
4007                 chip->pagebuf = -1;
4008
4009         /* Don't allow multipage oob writes with offset */
4010         if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4011                 ret = -EINVAL;
4012                 goto err_out;
4013         }
4014
4015         while (1) {
4016                 int bytes = mtd->writesize;
4017                 uint8_t *wbuf = buf;
4018                 int use_bufpoi;
4019                 int part_pagewr = (column || writelen < mtd->writesize);
4020
4021                 if (part_pagewr)
4022                         use_bufpoi = 1;
4023                 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4024                         use_bufpoi = !virt_addr_valid(buf) ||
4025                                      !IS_ALIGNED((unsigned long)buf,
4026                                                  chip->buf_align);
4027                 else
4028                         use_bufpoi = 0;
4029
4030                 /* Partial page write?, or need to use bounce buffer */
4031                 if (use_bufpoi) {
4032                         pr_debug("%s: using write bounce buffer for buf@%p\n",
4033                                          __func__, buf);
4034                         if (part_pagewr)
4035                                 bytes = min_t(int, bytes - column, writelen);
4036                         chip->pagebuf = -1;
4037                         memset(chip->data_buf, 0xff, mtd->writesize);
4038                         memcpy(&chip->data_buf[column], buf, bytes);
4039                         wbuf = chip->data_buf;
4040                 }
4041
4042                 if (unlikely(oob)) {
4043                         size_t len = min(oobwritelen, oobmaxlen);
4044                         oob = nand_fill_oob(chip, oob, len, ops);
4045                         oobwritelen -= len;
4046                 } else {
4047                         /* We still need to erase leftover OOB data */
4048                         memset(chip->oob_poi, 0xff, mtd->oobsize);
4049                 }
4050
4051                 ret = nand_write_page(chip, column, bytes, wbuf,
4052                                       oob_required, page,
4053                                       (ops->mode == MTD_OPS_RAW));
4054                 if (ret)
4055                         break;
4056
4057                 writelen -= bytes;
4058                 if (!writelen)
4059                         break;
4060
4061                 column = 0;
4062                 buf += bytes;
4063                 realpage++;
4064
4065                 page = realpage & chip->pagemask;
4066                 /* Check, if we cross a chip boundary */
4067                 if (!page) {
4068                         chipnr++;
4069                         nand_deselect_target(chip);
4070                         nand_select_target(chip, chipnr);
4071                 }
4072         }
4073
4074         ops->retlen = ops->len - writelen;
4075         if (unlikely(oob))
4076                 ops->oobretlen = ops->ooblen;
4077
4078 err_out:
4079         nand_deselect_target(chip);
4080         return ret;
4081 }
4082
4083 /**
4084  * panic_nand_write - [MTD Interface] NAND write with ECC
4085  * @mtd: MTD device structure
4086  * @to: offset to write to
4087  * @len: number of bytes to write
4088  * @retlen: pointer to variable to store the number of written bytes
4089  * @buf: the data to write
4090  *
4091  * NAND write with ECC. Used when performing writes in interrupt context, this
4092  * may for example be called by mtdoops when writing an oops while in panic.
4093  */
4094 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4095                             size_t *retlen, const uint8_t *buf)
4096 {
4097         struct nand_chip *chip = mtd_to_nand(mtd);
4098         int chipnr = (int)(to >> chip->chip_shift);
4099         struct mtd_oob_ops ops;
4100         int ret;
4101
4102         /* Grab the device */
4103         panic_nand_get_device(chip, FL_WRITING);
4104
4105         nand_select_target(chip, chipnr);
4106
4107         /* Wait for the device to get ready */
4108         panic_nand_wait(chip, 400);
4109
4110         memset(&ops, 0, sizeof(ops));
4111         ops.len = len;
4112         ops.datbuf = (uint8_t *)buf;
4113         ops.mode = MTD_OPS_PLACE_OOB;
4114
4115         ret = nand_do_write_ops(chip, to, &ops);
4116
4117         *retlen = ops.retlen;
4118         return ret;
4119 }
4120
4121 /**
4122  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4123  * @mtd: MTD device structure
4124  * @to: offset to write to
4125  * @ops: oob operation description structure
4126  */
4127 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4128                           struct mtd_oob_ops *ops)
4129 {
4130         struct nand_chip *chip = mtd_to_nand(mtd);
4131         int ret = -ENOTSUPP;
4132
4133         ops->retlen = 0;
4134
4135         nand_get_device(chip, FL_WRITING);
4136
4137         switch (ops->mode) {
4138         case MTD_OPS_PLACE_OOB:
4139         case MTD_OPS_AUTO_OOB:
4140         case MTD_OPS_RAW:
4141                 break;
4142
4143         default:
4144                 goto out;
4145         }
4146
4147         if (!ops->datbuf)
4148                 ret = nand_do_write_oob(chip, to, ops);
4149         else
4150                 ret = nand_do_write_ops(chip, to, ops);
4151
4152 out:
4153         nand_release_device(chip);
4154         return ret;
4155 }
4156
4157 /**
4158  * single_erase - [GENERIC] NAND standard block erase command function
4159  * @chip: NAND chip object
4160  * @page: the page address of the block which will be erased
4161  *
4162  * Standard erase command for NAND chips. Returns NAND status.
4163  */
4164 static int single_erase(struct nand_chip *chip, int page)
4165 {
4166         unsigned int eraseblock;
4167
4168         /* Send commands to erase a block */
4169         eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
4170
4171         return nand_erase_op(chip, eraseblock);
4172 }
4173
4174 /**
4175  * nand_erase - [MTD Interface] erase block(s)
4176  * @mtd: MTD device structure
4177  * @instr: erase instruction
4178  *
4179  * Erase one ore more blocks.
4180  */
4181 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4182 {
4183         return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4184 }
4185
4186 /**
4187  * nand_erase_nand - [INTERN] erase block(s)
4188  * @chip: NAND chip object
4189  * @instr: erase instruction
4190  * @allowbbt: allow erasing the bbt area
4191  *
4192  * Erase one ore more blocks.
4193  */
4194 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4195                     int allowbbt)
4196 {
4197         int page, status, pages_per_block, ret, chipnr;
4198         loff_t len;
4199
4200         pr_debug("%s: start = 0x%012llx, len = %llu\n",
4201                         __func__, (unsigned long long)instr->addr,
4202                         (unsigned long long)instr->len);
4203
4204         if (check_offs_len(chip, instr->addr, instr->len))
4205                 return -EINVAL;
4206
4207         /* Grab the lock and see if the device is available */
4208         nand_get_device(chip, FL_ERASING);
4209
4210         /* Shift to get first page */
4211         page = (int)(instr->addr >> chip->page_shift);
4212         chipnr = (int)(instr->addr >> chip->chip_shift);
4213
4214         /* Calculate pages in each block */
4215         pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4216
4217         /* Select the NAND device */
4218         nand_select_target(chip, chipnr);
4219
4220         /* Check, if it is write protected */
4221         if (nand_check_wp(chip)) {
4222                 pr_debug("%s: device is write protected!\n",
4223                                 __func__);
4224                 ret = -EIO;
4225                 goto erase_exit;
4226         }
4227
4228         /* Loop through the pages */
4229         len = instr->len;
4230
4231         while (len) {
4232                 /* Check if we have a bad block, we do not erase bad blocks! */
4233                 if (nand_block_checkbad(chip, ((loff_t) page) <<
4234                                         chip->page_shift, allowbbt)) {
4235                         pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4236                                     __func__, page);
4237                         ret = -EIO;
4238                         goto erase_exit;
4239                 }
4240
4241                 /*
4242                  * Invalidate the page cache, if we erase the block which
4243                  * contains the current cached page.
4244                  */
4245                 if (page <= chip->pagebuf && chip->pagebuf <
4246                     (page + pages_per_block))
4247                         chip->pagebuf = -1;
4248
4249                 if (chip->legacy.erase)
4250                         status = chip->legacy.erase(chip,
4251                                                     page & chip->pagemask);
4252                 else
4253                         status = single_erase(chip, page & chip->pagemask);
4254
4255                 /* See if block erase succeeded */
4256                 if (status) {
4257                         pr_debug("%s: failed erase, page 0x%08x\n",
4258                                         __func__, page);
4259                         ret = -EIO;
4260                         instr->fail_addr =
4261                                 ((loff_t)page << chip->page_shift);
4262                         goto erase_exit;
4263                 }
4264
4265                 /* Increment page address and decrement length */
4266                 len -= (1ULL << chip->phys_erase_shift);
4267                 page += pages_per_block;
4268
4269                 /* Check, if we cross a chip boundary */
4270                 if (len && !(page & chip->pagemask)) {
4271                         chipnr++;
4272                         nand_deselect_target(chip);
4273                         nand_select_target(chip, chipnr);
4274                 }
4275         }
4276
4277         ret = 0;
4278 erase_exit:
4279
4280         /* Deselect and wake up anyone waiting on the device */
4281         nand_deselect_target(chip);
4282         nand_release_device(chip);
4283
4284         /* Return more or less happy */
4285         return ret;
4286 }
4287
4288 /**
4289  * nand_sync - [MTD Interface] sync
4290  * @mtd: MTD device structure
4291  *
4292  * Sync is actually a wait for chip ready function.
4293  */
4294 static void nand_sync(struct mtd_info *mtd)
4295 {
4296         struct nand_chip *chip = mtd_to_nand(mtd);
4297
4298         pr_debug("%s: called\n", __func__);
4299
4300         /* Grab the lock and see if the device is available */
4301         nand_get_device(chip, FL_SYNCING);
4302         /* Release it and go back */
4303         nand_release_device(chip);
4304 }
4305
4306 /**
4307  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4308  * @mtd: MTD device structure
4309  * @offs: offset relative to mtd start
4310  */
4311 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4312 {
4313         struct nand_chip *chip = mtd_to_nand(mtd);
4314         int chipnr = (int)(offs >> chip->chip_shift);
4315         int ret;
4316
4317         /* Select the NAND device */
4318         nand_get_device(chip, FL_READING);
4319         nand_select_target(chip, chipnr);
4320
4321         ret = nand_block_checkbad(chip, offs, 0);
4322
4323         nand_deselect_target(chip);
4324         nand_release_device(chip);
4325
4326         return ret;
4327 }
4328
4329 /**
4330  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4331  * @mtd: MTD device structure
4332  * @ofs: offset relative to mtd start
4333  */
4334 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4335 {
4336         int ret;
4337
4338         ret = nand_block_isbad(mtd, ofs);
4339         if (ret) {
4340                 /* If it was bad already, return success and do nothing */
4341                 if (ret > 0)
4342                         return 0;
4343                 return ret;
4344         }
4345
4346         return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4347 }
4348
4349 /**
4350  * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
4351  * @mtd: MTD device structure
4352  * @ofs: offset relative to mtd start
4353  * @len: length of mtd
4354  */
4355 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4356 {
4357         struct nand_chip *chip = mtd_to_nand(mtd);
4358         u32 part_start_block;
4359         u32 part_end_block;
4360         u32 part_start_die;
4361         u32 part_end_die;
4362
4363         /*
4364          * max_bb_per_die and blocks_per_die used to determine
4365          * the maximum bad block count.
4366          */
4367         if (!chip->max_bb_per_die || !chip->blocks_per_die)
4368                 return -ENOTSUPP;
4369
4370         /* Get the start and end of the partition in erase blocks. */
4371         part_start_block = mtd_div_by_eb(ofs, mtd);
4372         part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4373
4374         /* Get the start and end LUNs of the partition. */
4375         part_start_die = part_start_block / chip->blocks_per_die;
4376         part_end_die = part_end_block / chip->blocks_per_die;
4377
4378         /*
4379          * Look up the bad blocks per unit and multiply by the number of units
4380          * that the partition spans.
4381          */
4382         return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4383 }
4384
4385 /**
4386  * nand_suspend - [MTD Interface] Suspend the NAND flash
4387  * @mtd: MTD device structure
4388  */
4389 static int nand_suspend(struct mtd_info *mtd)
4390 {
4391         return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
4392 }
4393
4394 /**
4395  * nand_resume - [MTD Interface] Resume the NAND flash
4396  * @mtd: MTD device structure
4397  */
4398 static void nand_resume(struct mtd_info *mtd)
4399 {
4400         struct nand_chip *chip = mtd_to_nand(mtd);
4401
4402         if (chip->state == FL_PM_SUSPENDED)
4403                 nand_release_device(chip);
4404         else
4405                 pr_err("%s called for a chip which is not in suspended state\n",
4406                         __func__);
4407 }
4408
4409 /**
4410  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4411  *                 prevent further operations
4412  * @mtd: MTD device structure
4413  */
4414 static void nand_shutdown(struct mtd_info *mtd)
4415 {
4416         nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
4417 }
4418
4419 /* Set default functions */
4420 static void nand_set_defaults(struct nand_chip *chip)
4421 {
4422         /* If no controller is provided, use the dummy, legacy one. */
4423         if (!chip->controller) {
4424                 chip->controller = &chip->legacy.dummy_controller;
4425                 nand_controller_init(chip->controller);
4426         }
4427
4428         nand_legacy_set_defaults(chip);
4429
4430         if (!chip->buf_align)
4431                 chip->buf_align = 1;
4432 }
4433
4434 /* Sanitize ONFI strings so we can safely print them */
4435 void sanitize_string(uint8_t *s, size_t len)
4436 {
4437         ssize_t i;
4438
4439         /* Null terminate */
4440         s[len - 1] = 0;
4441
4442         /* Remove non printable chars */
4443         for (i = 0; i < len - 1; i++) {
4444                 if (s[i] < ' ' || s[i] > 127)
4445                         s[i] = '?';
4446         }
4447
4448         /* Remove trailing spaces */
4449         strim(s);
4450 }
4451
4452 /*
4453  * nand_id_has_period - Check if an ID string has a given wraparound period
4454  * @id_data: the ID string
4455  * @arrlen: the length of the @id_data array
4456  * @period: the period of repitition
4457  *
4458  * Check if an ID string is repeated within a given sequence of bytes at
4459  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4460  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4461  * if the repetition has a period of @period; otherwise, returns zero.
4462  */
4463 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4464 {
4465         int i, j;
4466         for (i = 0; i < period; i++)
4467                 for (j = i + period; j < arrlen; j += period)
4468                         if (id_data[i] != id_data[j])
4469                                 return 0;
4470         return 1;
4471 }
4472
4473 /*
4474  * nand_id_len - Get the length of an ID string returned by CMD_READID
4475  * @id_data: the ID string
4476  * @arrlen: the length of the @id_data array
4477
4478  * Returns the length of the ID string, according to known wraparound/trailing
4479  * zero patterns. If no pattern exists, returns the length of the array.
4480  */
4481 static int nand_id_len(u8 *id_data, int arrlen)
4482 {
4483         int last_nonzero, period;
4484
4485         /* Find last non-zero byte */
4486         for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4487                 if (id_data[last_nonzero])
4488                         break;
4489
4490         /* All zeros */
4491         if (last_nonzero < 0)
4492                 return 0;
4493
4494         /* Calculate wraparound period */
4495         for (period = 1; period < arrlen; period++)
4496                 if (nand_id_has_period(id_data, arrlen, period))
4497                         break;
4498
4499         /* There's a repeated pattern */
4500         if (period < arrlen)
4501                 return period;
4502
4503         /* There are trailing zeros */
4504         if (last_nonzero < arrlen - 1)
4505                 return last_nonzero + 1;
4506
4507         /* No pattern detected */
4508         return arrlen;
4509 }
4510
4511 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4512 static int nand_get_bits_per_cell(u8 cellinfo)
4513 {
4514         int bits;
4515
4516         bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4517         bits >>= NAND_CI_CELLTYPE_SHIFT;
4518         return bits + 1;
4519 }
4520
4521 /*
4522  * Many new NAND share similar device ID codes, which represent the size of the
4523  * chip. The rest of the parameters must be decoded according to generic or
4524  * manufacturer-specific "extended ID" decoding patterns.
4525  */
4526 void nand_decode_ext_id(struct nand_chip *chip)
4527 {
4528         struct mtd_info *mtd = nand_to_mtd(chip);
4529         int extid;
4530         u8 *id_data = chip->id.data;
4531         /* The 3rd id byte holds MLC / multichip data */
4532         chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4533         /* The 4th id byte is the important one */
4534         extid = id_data[3];
4535
4536         /* Calc pagesize */
4537         mtd->writesize = 1024 << (extid & 0x03);
4538         extid >>= 2;
4539         /* Calc oobsize */
4540         mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4541         extid >>= 2;
4542         /* Calc blocksize. Blocksize is multiples of 64KiB */
4543         mtd->erasesize = (64 * 1024) << (extid & 0x03);
4544         extid >>= 2;
4545         /* Get buswidth information */
4546         if (extid & 0x1)
4547                 chip->options |= NAND_BUSWIDTH_16;
4548 }
4549 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4550
4551 /*
4552  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4553  * decodes a matching ID table entry and assigns the MTD size parameters for
4554  * the chip.
4555  */
4556 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4557 {
4558         struct mtd_info *mtd = nand_to_mtd(chip);
4559
4560         mtd->erasesize = type->erasesize;
4561         mtd->writesize = type->pagesize;
4562         mtd->oobsize = mtd->writesize / 32;
4563
4564         /* All legacy ID NAND are small-page, SLC */
4565         chip->bits_per_cell = 1;
4566 }
4567
4568 /*
4569  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4570  * heuristic patterns using various detected parameters (e.g., manufacturer,
4571  * page size, cell-type information).
4572  */
4573 static void nand_decode_bbm_options(struct nand_chip *chip)
4574 {
4575         struct mtd_info *mtd = nand_to_mtd(chip);
4576
4577         /* Set the bad block position */
4578         if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4579                 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
4580         else
4581                 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
4582 }
4583
4584 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4585 {
4586         return type->id_len;
4587 }
4588
4589 static bool find_full_id_nand(struct nand_chip *chip,
4590                               struct nand_flash_dev *type)
4591 {
4592         struct mtd_info *mtd = nand_to_mtd(chip);
4593         u8 *id_data = chip->id.data;
4594
4595         if (!strncmp(type->id, id_data, type->id_len)) {
4596                 mtd->writesize = type->pagesize;
4597                 mtd->erasesize = type->erasesize;
4598                 mtd->oobsize = type->oobsize;
4599
4600                 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4601                 chip->chipsize = (uint64_t)type->chipsize << 20;
4602                 chip->options |= type->options;
4603                 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
4604                 chip->ecc_step_ds = NAND_ECC_STEP(type);
4605                 chip->onfi_timing_mode_default =
4606                                         type->onfi_timing_mode_default;
4607
4608                 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4609                 if (!chip->parameters.model)
4610                         return false;
4611
4612                 return true;
4613         }
4614         return false;
4615 }
4616
4617 /*
4618  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4619  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4620  * table.
4621  */
4622 static void nand_manufacturer_detect(struct nand_chip *chip)
4623 {
4624         /*
4625          * Try manufacturer detection if available and use
4626          * nand_decode_ext_id() otherwise.
4627          */
4628         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4629             chip->manufacturer.desc->ops->detect) {
4630                 /* The 3rd id byte holds MLC / multichip data */
4631                 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4632                 chip->manufacturer.desc->ops->detect(chip);
4633         } else {
4634                 nand_decode_ext_id(chip);
4635         }
4636 }
4637
4638 /*
4639  * Manufacturer initialization. This function is called for all NANDs including
4640  * ONFI and JEDEC compliant ones.
4641  * Manufacturer drivers should put all their specific initialization code in
4642  * their ->init() hook.
4643  */
4644 static int nand_manufacturer_init(struct nand_chip *chip)
4645 {
4646         if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4647             !chip->manufacturer.desc->ops->init)
4648                 return 0;
4649
4650         return chip->manufacturer.desc->ops->init(chip);
4651 }
4652
4653 /*
4654  * Manufacturer cleanup. This function is called for all NANDs including
4655  * ONFI and JEDEC compliant ones.
4656  * Manufacturer drivers should put all their specific cleanup code in their
4657  * ->cleanup() hook.
4658  */
4659 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4660 {
4661         /* Release manufacturer private data */
4662         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4663             chip->manufacturer.desc->ops->cleanup)
4664                 chip->manufacturer.desc->ops->cleanup(chip);
4665 }
4666
4667 static const char *
4668 nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4669 {
4670         return manufacturer ? manufacturer->name : "Unknown";
4671 }
4672
4673 /*
4674  * Get the flash and manufacturer id and lookup if the type is supported.
4675  */
4676 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4677 {
4678         const struct nand_manufacturer *manufacturer;
4679         struct mtd_info *mtd = nand_to_mtd(chip);
4680         int busw, ret;
4681         u8 *id_data = chip->id.data;
4682         u8 maf_id, dev_id;
4683
4684         /*
4685          * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4686          * after power-up.
4687          */
4688         ret = nand_reset(chip, 0);
4689         if (ret)
4690                 return ret;
4691
4692         /* Select the device */
4693         nand_select_target(chip, 0);
4694
4695         /* Send the command for reading device ID */
4696         ret = nand_readid_op(chip, 0, id_data, 2);
4697         if (ret)
4698                 return ret;
4699
4700         /* Read manufacturer and device IDs */
4701         maf_id = id_data[0];
4702         dev_id = id_data[1];
4703
4704         /*
4705          * Try again to make sure, as some systems the bus-hold or other
4706          * interface concerns can cause random data which looks like a
4707          * possibly credible NAND flash to appear. If the two results do
4708          * not match, ignore the device completely.
4709          */
4710
4711         /* Read entire ID string */
4712         ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4713         if (ret)
4714                 return ret;
4715
4716         if (id_data[0] != maf_id || id_data[1] != dev_id) {
4717                 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4718                         maf_id, dev_id, id_data[0], id_data[1]);
4719                 return -ENODEV;
4720         }
4721
4722         chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4723
4724         /* Try to identify manufacturer */
4725         manufacturer = nand_get_manufacturer(maf_id);
4726         chip->manufacturer.desc = manufacturer;
4727
4728         if (!type)
4729                 type = nand_flash_ids;
4730
4731         /*
4732          * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4733          * override it.
4734          * This is required to make sure initial NAND bus width set by the
4735          * NAND controller driver is coherent with the real NAND bus width
4736          * (extracted by auto-detection code).
4737          */
4738         busw = chip->options & NAND_BUSWIDTH_16;
4739
4740         /*
4741          * The flag is only set (never cleared), reset it to its default value
4742          * before starting auto-detection.
4743          */
4744         chip->options &= ~NAND_BUSWIDTH_16;
4745
4746         for (; type->name != NULL; type++) {
4747                 if (is_full_id_nand(type)) {
4748                         if (find_full_id_nand(chip, type))
4749                                 goto ident_done;
4750                 } else if (dev_id == type->dev_id) {
4751                         break;
4752                 }
4753         }
4754
4755         if (!type->name || !type->pagesize) {
4756                 /* Check if the chip is ONFI compliant */
4757                 ret = nand_onfi_detect(chip);
4758                 if (ret < 0)
4759                         return ret;
4760                 else if (ret)
4761                         goto ident_done;
4762
4763                 /* Check if the chip is JEDEC compliant */
4764                 ret = nand_jedec_detect(chip);
4765                 if (ret < 0)
4766                         return ret;
4767                 else if (ret)
4768                         goto ident_done;
4769         }
4770
4771         if (!type->name)
4772                 return -ENODEV;
4773
4774         chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4775         if (!chip->parameters.model)
4776                 return -ENOMEM;
4777
4778         chip->chipsize = (uint64_t)type->chipsize << 20;
4779
4780         if (!type->pagesize)
4781                 nand_manufacturer_detect(chip);
4782         else
4783                 nand_decode_id(chip, type);
4784
4785         /* Get chip options */
4786         chip->options |= type->options;
4787
4788 ident_done:
4789         if (!mtd->name)
4790                 mtd->name = chip->parameters.model;
4791
4792         if (chip->options & NAND_BUSWIDTH_AUTO) {
4793                 WARN_ON(busw & NAND_BUSWIDTH_16);
4794                 nand_set_defaults(chip);
4795         } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4796                 /*
4797                  * Check, if buswidth is correct. Hardware drivers should set
4798                  * chip correct!
4799                  */
4800                 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4801                         maf_id, dev_id);
4802                 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4803                         mtd->name);
4804                 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4805                         (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4806                 ret = -EINVAL;
4807
4808                 goto free_detect_allocation;
4809         }
4810
4811         nand_decode_bbm_options(chip);
4812
4813         /* Calculate the address shift from the page size */
4814         chip->page_shift = ffs(mtd->writesize) - 1;
4815         /* Convert chipsize to number of pages per chip -1 */
4816         chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4817
4818         chip->bbt_erase_shift = chip->phys_erase_shift =
4819                 ffs(mtd->erasesize) - 1;
4820         if (chip->chipsize & 0xffffffff)
4821                 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4822         else {
4823                 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4824                 chip->chip_shift += 32 - 1;
4825         }
4826
4827         if (chip->chip_shift - chip->page_shift > 16)
4828                 chip->options |= NAND_ROW_ADDR_3;
4829
4830         chip->badblockbits = 8;
4831
4832         nand_legacy_adjust_cmdfunc(chip);
4833
4834         pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4835                 maf_id, dev_id);
4836         pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4837                 chip->parameters.model);
4838         pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4839                 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4840                 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4841         return 0;
4842
4843 free_detect_allocation:
4844         kfree(chip->parameters.model);
4845
4846         return ret;
4847 }
4848
4849 static const char * const nand_ecc_modes[] = {
4850         [NAND_ECC_NONE]         = "none",
4851         [NAND_ECC_SOFT]         = "soft",
4852         [NAND_ECC_HW]           = "hw",
4853         [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
4854         [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4855         [NAND_ECC_ON_DIE]       = "on-die",
4856 };
4857
4858 static int of_get_nand_ecc_mode(struct device_node *np)
4859 {
4860         const char *pm;
4861         int err, i;
4862
4863         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4864         if (err < 0)
4865                 return err;
4866
4867         for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4868                 if (!strcasecmp(pm, nand_ecc_modes[i]))
4869                         return i;
4870
4871         /*
4872          * For backward compatibility we support few obsoleted values that don't
4873          * have their mappings into nand_ecc_modes_t anymore (they were merged
4874          * with other enums).
4875          */
4876         if (!strcasecmp(pm, "soft_bch"))
4877                 return NAND_ECC_SOFT;
4878
4879         return -ENODEV;
4880 }
4881
4882 static const char * const nand_ecc_algos[] = {
4883         [NAND_ECC_HAMMING]      = "hamming",
4884         [NAND_ECC_BCH]          = "bch",
4885         [NAND_ECC_RS]           = "rs",
4886 };
4887
4888 static int of_get_nand_ecc_algo(struct device_node *np)
4889 {
4890         const char *pm;
4891         int err, i;
4892
4893         err = of_property_read_string(np, "nand-ecc-algo", &pm);
4894         if (!err) {
4895                 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4896                         if (!strcasecmp(pm, nand_ecc_algos[i]))
4897                                 return i;
4898                 return -ENODEV;
4899         }
4900
4901         /*
4902          * For backward compatibility we also read "nand-ecc-mode" checking
4903          * for some obsoleted values that were specifying ECC algorithm.
4904          */
4905         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4906         if (err < 0)
4907                 return err;
4908
4909         if (!strcasecmp(pm, "soft"))
4910                 return NAND_ECC_HAMMING;
4911         else if (!strcasecmp(pm, "soft_bch"))
4912                 return NAND_ECC_BCH;
4913
4914         return -ENODEV;
4915 }
4916
4917 static int of_get_nand_ecc_step_size(struct device_node *np)
4918 {
4919         int ret;
4920         u32 val;
4921
4922         ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4923         return ret ? ret : val;
4924 }
4925
4926 static int of_get_nand_ecc_strength(struct device_node *np)
4927 {
4928         int ret;
4929         u32 val;
4930
4931         ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4932         return ret ? ret : val;
4933 }
4934
4935 static int of_get_nand_bus_width(struct device_node *np)
4936 {
4937         u32 val;
4938
4939         if (of_property_read_u32(np, "nand-bus-width", &val))
4940                 return 8;
4941
4942         switch (val) {
4943         case 8:
4944         case 16:
4945                 return val;
4946         default:
4947                 return -EIO;
4948         }
4949 }
4950
4951 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4952 {
4953         return of_property_read_bool(np, "nand-on-flash-bbt");
4954 }
4955
4956 static int nand_dt_init(struct nand_chip *chip)
4957 {
4958         struct device_node *dn = nand_get_flash_node(chip);
4959         int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4960
4961         if (!dn)
4962                 return 0;
4963
4964         if (of_get_nand_bus_width(dn) == 16)
4965                 chip->options |= NAND_BUSWIDTH_16;
4966
4967         if (of_property_read_bool(dn, "nand-is-boot-medium"))
4968                 chip->options |= NAND_IS_BOOT_MEDIUM;
4969
4970         if (of_get_nand_on_flash_bbt(dn))
4971                 chip->bbt_options |= NAND_BBT_USE_FLASH;
4972
4973         ecc_mode = of_get_nand_ecc_mode(dn);
4974         ecc_algo = of_get_nand_ecc_algo(dn);
4975         ecc_strength = of_get_nand_ecc_strength(dn);
4976         ecc_step = of_get_nand_ecc_step_size(dn);
4977
4978         if (ecc_mode >= 0)
4979                 chip->ecc.mode = ecc_mode;
4980
4981         if (ecc_algo >= 0)
4982                 chip->ecc.algo = ecc_algo;
4983
4984         if (ecc_strength >= 0)
4985                 chip->ecc.strength = ecc_strength;
4986
4987         if (ecc_step > 0)
4988                 chip->ecc.size = ecc_step;
4989
4990         if (of_property_read_bool(dn, "nand-ecc-maximize"))
4991                 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4992
4993         return 0;
4994 }
4995
4996 /**
4997  * nand_scan_ident - Scan for the NAND device
4998  * @chip: NAND chip object
4999  * @maxchips: number of chips to scan for
5000  * @table: alternative NAND ID table
5001  *
5002  * This is the first phase of the normal nand_scan() function. It reads the
5003  * flash ID and sets up MTD fields accordingly.
5004  *
5005  * This helper used to be called directly from controller drivers that needed
5006  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5007  * prevented dynamic allocations during this phase which was unconvenient and
5008  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5009  */
5010 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5011                            struct nand_flash_dev *table)
5012 {
5013         struct mtd_info *mtd = nand_to_mtd(chip);
5014         int nand_maf_id, nand_dev_id;
5015         unsigned int i;
5016         int ret;
5017
5018         /* Assume all dies are deselected when we enter nand_scan_ident(). */
5019         chip->cur_cs = -1;
5020
5021         /* Enforce the right timings for reset/detection */
5022         onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5023
5024         ret = nand_dt_init(chip);
5025         if (ret)
5026                 return ret;
5027
5028         if (!mtd->name && mtd->dev.parent)
5029                 mtd->name = dev_name(mtd->dev.parent);
5030
5031         /*
5032          * Start with chips->numchips = maxchips to let nand_select_target() do
5033          * its job. chip->numchips will be adjusted after.
5034          */
5035         chip->numchips = maxchips;
5036
5037         /* Set the default functions */
5038         nand_set_defaults(chip);
5039
5040         ret = nand_legacy_check_hooks(chip);
5041         if (ret)
5042                 return ret;
5043
5044         /* Read the flash type */
5045         ret = nand_detect(chip, table);
5046         if (ret) {
5047                 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5048                         pr_warn("No NAND device found\n");
5049                 nand_deselect_target(chip);
5050                 return ret;
5051         }
5052
5053         nand_maf_id = chip->id.data[0];
5054         nand_dev_id = chip->id.data[1];
5055
5056         nand_deselect_target(chip);
5057
5058         /* Check for a chip array */
5059         for (i = 1; i < maxchips; i++) {
5060                 u8 id[2];
5061
5062                 /* See comment in nand_get_flash_type for reset */
5063                 nand_reset(chip, i);
5064
5065                 nand_select_target(chip, i);
5066                 /* Send the command for reading device ID */
5067                 nand_readid_op(chip, 0, id, sizeof(id));
5068                 /* Read manufacturer and device IDs */
5069                 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5070                         nand_deselect_target(chip);
5071                         break;
5072                 }
5073                 nand_deselect_target(chip);
5074         }
5075         if (i > 1)
5076                 pr_info("%d chips detected\n", i);
5077
5078         /* Store the number of chips and calc total size for mtd */
5079         chip->numchips = i;
5080         mtd->size = i * chip->chipsize;
5081
5082         return 0;
5083 }
5084
5085 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5086 {
5087         kfree(chip->parameters.model);
5088         kfree(chip->parameters.onfi);
5089 }
5090
5091 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5092 {
5093         struct mtd_info *mtd = nand_to_mtd(chip);
5094         struct nand_ecc_ctrl *ecc = &chip->ecc;
5095
5096         if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5097                 return -EINVAL;
5098
5099         switch (ecc->algo) {
5100         case NAND_ECC_HAMMING:
5101                 ecc->calculate = nand_calculate_ecc;
5102                 ecc->correct = nand_correct_data;
5103                 ecc->read_page = nand_read_page_swecc;
5104                 ecc->read_subpage = nand_read_subpage;
5105                 ecc->write_page = nand_write_page_swecc;
5106                 ecc->read_page_raw = nand_read_page_raw;
5107                 ecc->write_page_raw = nand_write_page_raw;
5108                 ecc->read_oob = nand_read_oob_std;
5109                 ecc->write_oob = nand_write_oob_std;
5110                 if (!ecc->size)
5111                         ecc->size = 256;
5112                 ecc->bytes = 3;
5113                 ecc->strength = 1;
5114
5115                 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
5116                         ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5117
5118                 return 0;
5119         case NAND_ECC_BCH:
5120                 if (!mtd_nand_has_bch()) {
5121                         WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5122                         return -EINVAL;
5123                 }
5124                 ecc->calculate = nand_bch_calculate_ecc;
5125                 ecc->correct = nand_bch_correct_data;
5126                 ecc->read_page = nand_read_page_swecc;
5127                 ecc->read_subpage = nand_read_subpage;
5128                 ecc->write_page = nand_write_page_swecc;
5129                 ecc->read_page_raw = nand_read_page_raw;
5130                 ecc->write_page_raw = nand_write_page_raw;
5131                 ecc->read_oob = nand_read_oob_std;
5132                 ecc->write_oob = nand_write_oob_std;
5133
5134                 /*
5135                 * Board driver should supply ecc.size and ecc.strength
5136                 * values to select how many bits are correctable.
5137                 * Otherwise, default to 4 bits for large page devices.
5138                 */
5139                 if (!ecc->size && (mtd->oobsize >= 64)) {
5140                         ecc->size = 512;
5141                         ecc->strength = 4;
5142                 }
5143
5144                 /*
5145                  * if no ecc placement scheme was provided pickup the default
5146                  * large page one.
5147                  */
5148                 if (!mtd->ooblayout) {
5149                         /* handle large page devices only */
5150                         if (mtd->oobsize < 64) {
5151                                 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5152                                 return -EINVAL;
5153                         }
5154
5155                         mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5156
5157                 }
5158
5159                 /*
5160                  * We can only maximize ECC config when the default layout is
5161                  * used, otherwise we don't know how many bytes can really be
5162                  * used.
5163                  */
5164                 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5165                     ecc->options & NAND_ECC_MAXIMIZE) {
5166                         int steps, bytes;
5167
5168                         /* Always prefer 1k blocks over 512bytes ones */
5169                         ecc->size = 1024;
5170                         steps = mtd->writesize / ecc->size;
5171
5172                         /* Reserve 2 bytes for the BBM */
5173                         bytes = (mtd->oobsize - 2) / steps;
5174                         ecc->strength = bytes * 8 / fls(8 * ecc->size);
5175                 }
5176
5177                 /* See nand_bch_init() for details. */
5178                 ecc->bytes = 0;
5179                 ecc->priv = nand_bch_init(mtd);
5180                 if (!ecc->priv) {
5181                         WARN(1, "BCH ECC initialization failed!\n");
5182                         return -EINVAL;
5183                 }
5184                 return 0;
5185         default:
5186                 WARN(1, "Unsupported ECC algorithm!\n");
5187                 return -EINVAL;
5188         }
5189 }
5190
5191 /**
5192  * nand_check_ecc_caps - check the sanity of preset ECC settings
5193  * @chip: nand chip info structure
5194  * @caps: ECC caps info structure
5195  * @oobavail: OOB size that the ECC engine can use
5196  *
5197  * When ECC step size and strength are already set, check if they are supported
5198  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5199  * On success, the calculated ECC bytes is set.
5200  */
5201 static int
5202 nand_check_ecc_caps(struct nand_chip *chip,
5203                     const struct nand_ecc_caps *caps, int oobavail)
5204 {
5205         struct mtd_info *mtd = nand_to_mtd(chip);
5206         const struct nand_ecc_step_info *stepinfo;
5207         int preset_step = chip->ecc.size;
5208         int preset_strength = chip->ecc.strength;
5209         int ecc_bytes, nsteps = mtd->writesize / preset_step;
5210         int i, j;
5211
5212         for (i = 0; i < caps->nstepinfos; i++) {
5213                 stepinfo = &caps->stepinfos[i];
5214
5215                 if (stepinfo->stepsize != preset_step)
5216                         continue;
5217
5218                 for (j = 0; j < stepinfo->nstrengths; j++) {
5219                         if (stepinfo->strengths[j] != preset_strength)
5220                                 continue;
5221
5222                         ecc_bytes = caps->calc_ecc_bytes(preset_step,
5223                                                          preset_strength);
5224                         if (WARN_ON_ONCE(ecc_bytes < 0))
5225                                 return ecc_bytes;
5226
5227                         if (ecc_bytes * nsteps > oobavail) {
5228                                 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5229                                        preset_step, preset_strength);
5230                                 return -ENOSPC;
5231                         }
5232
5233                         chip->ecc.bytes = ecc_bytes;
5234
5235                         return 0;
5236                 }
5237         }
5238
5239         pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5240                preset_step, preset_strength);
5241
5242         return -ENOTSUPP;
5243 }
5244
5245 /**
5246  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5247  * @chip: nand chip info structure
5248  * @caps: ECC engine caps info structure
5249  * @oobavail: OOB size that the ECC engine can use
5250  *
5251  * If a chip's ECC requirement is provided, try to meet it with the least
5252  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5253  * On success, the chosen ECC settings are set.
5254  */
5255 static int
5256 nand_match_ecc_req(struct nand_chip *chip,
5257                    const struct nand_ecc_caps *caps, int oobavail)
5258 {
5259         struct mtd_info *mtd = nand_to_mtd(chip);
5260         const struct nand_ecc_step_info *stepinfo;
5261         int req_step = chip->ecc_step_ds;
5262         int req_strength = chip->ecc_strength_ds;
5263         int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5264         int best_step, best_strength, best_ecc_bytes;
5265         int best_ecc_bytes_total = INT_MAX;
5266         int i, j;
5267
5268         /* No information provided by the NAND chip */
5269         if (!req_step || !req_strength)
5270                 return -ENOTSUPP;
5271
5272         /* number of correctable bits the chip requires in a page */
5273         req_corr = mtd->writesize / req_step * req_strength;
5274
5275         for (i = 0; i < caps->nstepinfos; i++) {
5276                 stepinfo = &caps->stepinfos[i];
5277                 step_size = stepinfo->stepsize;
5278
5279                 for (j = 0; j < stepinfo->nstrengths; j++) {
5280                         strength = stepinfo->strengths[j];
5281
5282                         /*
5283                          * If both step size and strength are smaller than the
5284                          * chip's requirement, it is not easy to compare the
5285                          * resulted reliability.
5286                          */
5287                         if (step_size < req_step && strength < req_strength)
5288                                 continue;
5289
5290                         if (mtd->writesize % step_size)
5291                                 continue;
5292
5293                         nsteps = mtd->writesize / step_size;
5294
5295                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5296                         if (WARN_ON_ONCE(ecc_bytes < 0))
5297                                 continue;
5298                         ecc_bytes_total = ecc_bytes * nsteps;
5299
5300                         if (ecc_bytes_total > oobavail ||
5301                             strength * nsteps < req_corr)
5302                                 continue;
5303
5304                         /*
5305                          * We assume the best is to meet the chip's requrement
5306                          * with the least number of ECC bytes.
5307                          */
5308                         if (ecc_bytes_total < best_ecc_bytes_total) {
5309                                 best_ecc_bytes_total = ecc_bytes_total;
5310                                 best_step = step_size;
5311                                 best_strength = strength;
5312                                 best_ecc_bytes = ecc_bytes;
5313                         }
5314                 }
5315         }
5316
5317         if (best_ecc_bytes_total == INT_MAX)
5318                 return -ENOTSUPP;
5319
5320         chip->ecc.size = best_step;
5321         chip->ecc.strength = best_strength;
5322         chip->ecc.bytes = best_ecc_bytes;
5323
5324         return 0;
5325 }
5326
5327 /**
5328  * nand_maximize_ecc - choose the max ECC strength available
5329  * @chip: nand chip info structure
5330  * @caps: ECC engine caps info structure
5331  * @oobavail: OOB size that the ECC engine can use
5332  *
5333  * Choose the max ECC strength that is supported on the controller, and can fit
5334  * within the chip's OOB.  On success, the chosen ECC settings are set.
5335  */
5336 static int
5337 nand_maximize_ecc(struct nand_chip *chip,
5338                   const struct nand_ecc_caps *caps, int oobavail)
5339 {
5340         struct mtd_info *mtd = nand_to_mtd(chip);
5341         const struct nand_ecc_step_info *stepinfo;
5342         int step_size, strength, nsteps, ecc_bytes, corr;
5343         int best_corr = 0;
5344         int best_step = 0;
5345         int best_strength, best_ecc_bytes;
5346         int i, j;
5347
5348         for (i = 0; i < caps->nstepinfos; i++) {
5349                 stepinfo = &caps->stepinfos[i];
5350                 step_size = stepinfo->stepsize;
5351
5352                 /* If chip->ecc.size is already set, respect it */
5353                 if (chip->ecc.size && step_size != chip->ecc.size)
5354                         continue;
5355
5356                 for (j = 0; j < stepinfo->nstrengths; j++) {
5357                         strength = stepinfo->strengths[j];
5358
5359                         if (mtd->writesize % step_size)
5360                                 continue;
5361
5362                         nsteps = mtd->writesize / step_size;
5363
5364                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5365                         if (WARN_ON_ONCE(ecc_bytes < 0))
5366                                 continue;
5367
5368                         if (ecc_bytes * nsteps > oobavail)
5369                                 continue;
5370
5371                         corr = strength * nsteps;
5372
5373                         /*
5374                          * If the number of correctable bits is the same,
5375                          * bigger step_size has more reliability.
5376                          */
5377                         if (corr > best_corr ||
5378                             (corr == best_corr && step_size > best_step)) {
5379                                 best_corr = corr;
5380                                 best_step = step_size;
5381                                 best_strength = strength;
5382                                 best_ecc_bytes = ecc_bytes;
5383                         }
5384                 }
5385         }
5386
5387         if (!best_corr)
5388                 return -ENOTSUPP;
5389
5390         chip->ecc.size = best_step;
5391         chip->ecc.strength = best_strength;
5392         chip->ecc.bytes = best_ecc_bytes;
5393
5394         return 0;
5395 }
5396
5397 /**
5398  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5399  * @chip: nand chip info structure
5400  * @caps: ECC engine caps info structure
5401  * @oobavail: OOB size that the ECC engine can use
5402  *
5403  * Choose the ECC configuration according to following logic
5404  *
5405  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5406  *    then check if it is supported by this controller.
5407  * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5408  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5409  *    to the chip's requirement. If available OOB size can't fit the chip
5410  *    requirement then fallback to the maximum ECC step size and ECC strength.
5411  *
5412  * On success, the chosen ECC settings are set.
5413  */
5414 int nand_ecc_choose_conf(struct nand_chip *chip,
5415                          const struct nand_ecc_caps *caps, int oobavail)
5416 {
5417         struct mtd_info *mtd = nand_to_mtd(chip);
5418
5419         if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5420                 return -EINVAL;
5421
5422         if (chip->ecc.size && chip->ecc.strength)
5423                 return nand_check_ecc_caps(chip, caps, oobavail);
5424
5425         if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5426                 return nand_maximize_ecc(chip, caps, oobavail);
5427
5428         if (!nand_match_ecc_req(chip, caps, oobavail))
5429                 return 0;
5430
5431         return nand_maximize_ecc(chip, caps, oobavail);
5432 }
5433 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5434
5435 /*
5436  * Check if the chip configuration meet the datasheet requirements.
5437
5438  * If our configuration corrects A bits per B bytes and the minimum
5439  * required correction level is X bits per Y bytes, then we must ensure
5440  * both of the following are true:
5441  *
5442  * (1) A / B >= X / Y
5443  * (2) A >= X
5444  *
5445  * Requirement (1) ensures we can correct for the required bitflip density.
5446  * Requirement (2) ensures we can correct even when all bitflips are clumped
5447  * in the same sector.
5448  */
5449 static bool nand_ecc_strength_good(struct nand_chip *chip)
5450 {
5451         struct mtd_info *mtd = nand_to_mtd(chip);
5452         struct nand_ecc_ctrl *ecc = &chip->ecc;
5453         int corr, ds_corr;
5454
5455         if (ecc->size == 0 || chip->ecc_step_ds == 0)
5456                 /* Not enough information */
5457                 return true;
5458
5459         /*
5460          * We get the number of corrected bits per page to compare
5461          * the correction density.
5462          */
5463         corr = (mtd->writesize * ecc->strength) / ecc->size;
5464         ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
5465
5466         return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
5467 }
5468
5469 /**
5470  * nand_scan_tail - Scan for the NAND device
5471  * @chip: NAND chip object
5472  *
5473  * This is the second phase of the normal nand_scan() function. It fills out
5474  * all the uninitialized function pointers with the defaults and scans for a
5475  * bad block table if appropriate.
5476  */
5477 static int nand_scan_tail(struct nand_chip *chip)
5478 {
5479         struct mtd_info *mtd = nand_to_mtd(chip);
5480         struct nand_ecc_ctrl *ecc = &chip->ecc;
5481         int ret, i;
5482
5483         /* New bad blocks should be marked in OOB, flash-based BBT, or both */
5484         if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5485                    !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5486                 return -EINVAL;
5487         }
5488
5489         chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5490         if (!chip->data_buf)
5491                 return -ENOMEM;
5492
5493         /*
5494          * FIXME: some NAND manufacturer drivers expect the first die to be
5495          * selected when manufacturer->init() is called. They should be fixed
5496          * to explictly select the relevant die when interacting with the NAND
5497          * chip.
5498          */
5499         nand_select_target(chip, 0);
5500         ret = nand_manufacturer_init(chip);
5501         nand_deselect_target(chip);
5502         if (ret)
5503                 goto err_free_buf;
5504
5505         /* Set the internal oob buffer location, just after the page data */
5506         chip->oob_poi = chip->data_buf + mtd->writesize;
5507
5508         /*
5509          * If no default placement scheme is given, select an appropriate one.
5510          */
5511         if (!mtd->ooblayout &&
5512             !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5513                 switch (mtd->oobsize) {
5514                 case 8:
5515                 case 16:
5516                         mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5517                         break;
5518                 case 64:
5519                 case 128:
5520                         mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5521                         break;
5522                 default:
5523                         /*
5524                          * Expose the whole OOB area to users if ECC_NONE
5525                          * is passed. We could do that for all kind of
5526                          * ->oobsize, but we must keep the old large/small
5527                          * page with ECC layout when ->oobsize <= 128 for
5528                          * compatibility reasons.
5529                          */
5530                         if (ecc->mode == NAND_ECC_NONE) {
5531                                 mtd_set_ooblayout(mtd,
5532                                                 &nand_ooblayout_lp_ops);
5533                                 break;
5534                         }
5535
5536                         WARN(1, "No oob scheme defined for oobsize %d\n",
5537                                 mtd->oobsize);
5538                         ret = -EINVAL;
5539                         goto err_nand_manuf_cleanup;
5540                 }
5541         }
5542
5543         /*
5544          * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5545          * selected and we have 256 byte pagesize fallback to software ECC
5546          */
5547
5548         switch (ecc->mode) {
5549         case NAND_ECC_HW_OOB_FIRST:
5550                 /* Similar to NAND_ECC_HW, but a separate read_page handle */
5551                 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5552                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5553                         ret = -EINVAL;
5554                         goto err_nand_manuf_cleanup;
5555                 }
5556                 if (!ecc->read_page)
5557                         ecc->read_page = nand_read_page_hwecc_oob_first;
5558
5559         case NAND_ECC_HW:
5560                 /* Use standard hwecc read page function? */
5561                 if (!ecc->read_page)
5562                         ecc->read_page = nand_read_page_hwecc;
5563                 if (!ecc->write_page)
5564                         ecc->write_page = nand_write_page_hwecc;
5565                 if (!ecc->read_page_raw)
5566                         ecc->read_page_raw = nand_read_page_raw;
5567                 if (!ecc->write_page_raw)
5568                         ecc->write_page_raw = nand_write_page_raw;
5569                 if (!ecc->read_oob)
5570                         ecc->read_oob = nand_read_oob_std;
5571                 if (!ecc->write_oob)
5572                         ecc->write_oob = nand_write_oob_std;
5573                 if (!ecc->read_subpage)
5574                         ecc->read_subpage = nand_read_subpage;
5575                 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5576                         ecc->write_subpage = nand_write_subpage_hwecc;
5577
5578         case NAND_ECC_HW_SYNDROME:
5579                 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5580                     (!ecc->read_page ||
5581                      ecc->read_page == nand_read_page_hwecc ||
5582                      !ecc->write_page ||
5583                      ecc->write_page == nand_write_page_hwecc)) {
5584                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5585                         ret = -EINVAL;
5586                         goto err_nand_manuf_cleanup;
5587                 }
5588                 /* Use standard syndrome read/write page function? */
5589                 if (!ecc->read_page)
5590                         ecc->read_page = nand_read_page_syndrome;
5591                 if (!ecc->write_page)
5592                         ecc->write_page = nand_write_page_syndrome;
5593                 if (!ecc->read_page_raw)
5594                         ecc->read_page_raw = nand_read_page_raw_syndrome;
5595                 if (!ecc->write_page_raw)
5596                         ecc->write_page_raw = nand_write_page_raw_syndrome;
5597                 if (!ecc->read_oob)
5598                         ecc->read_oob = nand_read_oob_syndrome;
5599                 if (!ecc->write_oob)
5600                         ecc->write_oob = nand_write_oob_syndrome;
5601
5602                 if (mtd->writesize >= ecc->size) {
5603                         if (!ecc->strength) {
5604                                 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5605                                 ret = -EINVAL;
5606                                 goto err_nand_manuf_cleanup;
5607                         }
5608                         break;
5609                 }
5610                 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5611                         ecc->size, mtd->writesize);
5612                 ecc->mode = NAND_ECC_SOFT;
5613                 ecc->algo = NAND_ECC_HAMMING;
5614
5615         case NAND_ECC_SOFT:
5616                 ret = nand_set_ecc_soft_ops(chip);
5617                 if (ret) {
5618                         ret = -EINVAL;
5619                         goto err_nand_manuf_cleanup;
5620                 }
5621                 break;
5622
5623         case NAND_ECC_ON_DIE:
5624                 if (!ecc->read_page || !ecc->write_page) {
5625                         WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5626                         ret = -EINVAL;
5627                         goto err_nand_manuf_cleanup;
5628                 }
5629                 if (!ecc->read_oob)
5630                         ecc->read_oob = nand_read_oob_std;
5631                 if (!ecc->write_oob)
5632                         ecc->write_oob = nand_write_oob_std;
5633                 break;
5634
5635         case NAND_ECC_NONE:
5636                 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5637                 ecc->read_page = nand_read_page_raw;
5638                 ecc->write_page = nand_write_page_raw;
5639                 ecc->read_oob = nand_read_oob_std;
5640                 ecc->read_page_raw = nand_read_page_raw;
5641                 ecc->write_page_raw = nand_write_page_raw;
5642                 ecc->write_oob = nand_write_oob_std;
5643                 ecc->size = mtd->writesize;
5644                 ecc->bytes = 0;
5645                 ecc->strength = 0;
5646                 break;
5647
5648         default:
5649                 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5650                 ret = -EINVAL;
5651                 goto err_nand_manuf_cleanup;
5652         }
5653
5654         if (ecc->correct || ecc->calculate) {
5655                 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5656                 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5657                 if (!ecc->calc_buf || !ecc->code_buf) {
5658                         ret = -ENOMEM;
5659                         goto err_nand_manuf_cleanup;
5660                 }
5661         }
5662
5663         /* For many systems, the standard OOB write also works for raw */
5664         if (!ecc->read_oob_raw)
5665                 ecc->read_oob_raw = ecc->read_oob;
5666         if (!ecc->write_oob_raw)
5667                 ecc->write_oob_raw = ecc->write_oob;
5668
5669         /* propagate ecc info to mtd_info */
5670         mtd->ecc_strength = ecc->strength;
5671         mtd->ecc_step_size = ecc->size;
5672
5673         /*
5674          * Set the number of read / write steps for one page depending on ECC
5675          * mode.
5676          */
5677         ecc->steps = mtd->writesize / ecc->size;
5678         if (ecc->steps * ecc->size != mtd->writesize) {
5679                 WARN(1, "Invalid ECC parameters\n");
5680                 ret = -EINVAL;
5681                 goto err_nand_manuf_cleanup;
5682         }
5683         ecc->total = ecc->steps * ecc->bytes;
5684         if (ecc->total > mtd->oobsize) {
5685                 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5686                 ret = -EINVAL;
5687                 goto err_nand_manuf_cleanup;
5688         }
5689
5690         /*
5691          * The number of bytes available for a client to place data into
5692          * the out of band area.
5693          */
5694         ret = mtd_ooblayout_count_freebytes(mtd);
5695         if (ret < 0)
5696                 ret = 0;
5697
5698         mtd->oobavail = ret;
5699
5700         /* ECC sanity check: warn if it's too weak */
5701         if (!nand_ecc_strength_good(chip))
5702                 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5703                         mtd->name);
5704
5705         /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5706         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5707                 switch (ecc->steps) {
5708                 case 2:
5709                         mtd->subpage_sft = 1;
5710                         break;
5711                 case 4:
5712                 case 8:
5713                 case 16:
5714                         mtd->subpage_sft = 2;
5715                         break;
5716                 }
5717         }
5718         chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5719
5720         /* Initialize state */
5721         chip->state = FL_READY;
5722
5723         /* Invalidate the pagebuffer reference */
5724         chip->pagebuf = -1;
5725
5726         /* Large page NAND with SOFT_ECC should support subpage reads */
5727         switch (ecc->mode) {
5728         case NAND_ECC_SOFT:
5729                 if (chip->page_shift > 9)
5730                         chip->options |= NAND_SUBPAGE_READ;
5731                 break;
5732
5733         default:
5734                 break;
5735         }
5736
5737         /* Fill in remaining MTD driver data */
5738         mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5739         mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5740                                                 MTD_CAP_NANDFLASH;
5741         mtd->_erase = nand_erase;
5742         mtd->_point = NULL;
5743         mtd->_unpoint = NULL;
5744         mtd->_panic_write = panic_nand_write;
5745         mtd->_read_oob = nand_read_oob;
5746         mtd->_write_oob = nand_write_oob;
5747         mtd->_sync = nand_sync;
5748         mtd->_lock = NULL;
5749         mtd->_unlock = NULL;
5750         mtd->_suspend = nand_suspend;
5751         mtd->_resume = nand_resume;
5752         mtd->_reboot = nand_shutdown;
5753         mtd->_block_isreserved = nand_block_isreserved;
5754         mtd->_block_isbad = nand_block_isbad;
5755         mtd->_block_markbad = nand_block_markbad;
5756         mtd->_max_bad_blocks = nand_max_bad_blocks;
5757         mtd->writebufsize = mtd->writesize;
5758
5759         /*
5760          * Initialize bitflip_threshold to its default prior scan_bbt() call.
5761          * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5762          * properly set.
5763          */
5764         if (!mtd->bitflip_threshold)
5765                 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5766
5767         /* Initialize the ->data_interface field. */
5768         ret = nand_init_data_interface(chip);
5769         if (ret)
5770                 goto err_nand_manuf_cleanup;
5771
5772         /* Enter fastest possible mode on all dies. */
5773         for (i = 0; i < chip->numchips; i++) {
5774                 ret = nand_setup_data_interface(chip, i);
5775                 if (ret)
5776                         goto err_nand_manuf_cleanup;
5777         }
5778
5779         /* Check, if we should skip the bad block table scan */
5780         if (chip->options & NAND_SKIP_BBTSCAN)
5781                 return 0;
5782
5783         /* Build bad block table */
5784         ret = nand_create_bbt(chip);
5785         if (ret)
5786                 goto err_nand_manuf_cleanup;
5787
5788         return 0;
5789
5790
5791 err_nand_manuf_cleanup:
5792         nand_manufacturer_cleanup(chip);
5793
5794 err_free_buf:
5795         kfree(chip->data_buf);
5796         kfree(ecc->code_buf);
5797         kfree(ecc->calc_buf);
5798
5799         return ret;
5800 }
5801
5802 static int nand_attach(struct nand_chip *chip)
5803 {
5804         if (chip->controller->ops && chip->controller->ops->attach_chip)
5805                 return chip->controller->ops->attach_chip(chip);
5806
5807         return 0;
5808 }
5809
5810 static void nand_detach(struct nand_chip *chip)
5811 {
5812         if (chip->controller->ops && chip->controller->ops->detach_chip)
5813                 chip->controller->ops->detach_chip(chip);
5814 }
5815
5816 /**
5817  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5818  * @chip: NAND chip object
5819  * @maxchips: number of chips to scan for.
5820  * @ids: optional flash IDs table
5821  *
5822  * This fills out all the uninitialized function pointers with the defaults.
5823  * The flash ID is read and the mtd/chip structures are filled with the
5824  * appropriate values.
5825  */
5826 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5827                        struct nand_flash_dev *ids)
5828 {
5829         int ret;
5830
5831         if (!maxchips)
5832                 return -EINVAL;
5833
5834         ret = nand_scan_ident(chip, maxchips, ids);
5835         if (ret)
5836                 return ret;
5837
5838         ret = nand_attach(chip);
5839         if (ret)
5840                 goto cleanup_ident;
5841
5842         ret = nand_scan_tail(chip);
5843         if (ret)
5844                 goto detach_chip;
5845
5846         return 0;
5847
5848 detach_chip:
5849         nand_detach(chip);
5850 cleanup_ident:
5851         nand_scan_ident_cleanup(chip);
5852
5853         return ret;
5854 }
5855 EXPORT_SYMBOL(nand_scan_with_ids);
5856
5857 /**
5858  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5859  * @chip: NAND chip object
5860  */
5861 void nand_cleanup(struct nand_chip *chip)
5862 {
5863         if (chip->ecc.mode == NAND_ECC_SOFT &&
5864             chip->ecc.algo == NAND_ECC_BCH)
5865                 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5866
5867         /* Free bad block table memory */
5868         kfree(chip->bbt);
5869         kfree(chip->data_buf);
5870         kfree(chip->ecc.code_buf);
5871         kfree(chip->ecc.calc_buf);
5872
5873         /* Free bad block descriptor memory */
5874         if (chip->badblock_pattern && chip->badblock_pattern->options
5875                         & NAND_BBT_DYNAMICSTRUCT)
5876                 kfree(chip->badblock_pattern);
5877
5878         /* Free manufacturer priv data. */
5879         nand_manufacturer_cleanup(chip);
5880
5881         /* Free controller specific allocations after chip identification */
5882         nand_detach(chip);
5883
5884         /* Free identification phase allocations */
5885         nand_scan_ident_cleanup(chip);
5886 }
5887
5888 EXPORT_SYMBOL_GPL(nand_cleanup);
5889
5890 /**
5891  * nand_release - [NAND Interface] Unregister the MTD device and free resources
5892  *                held by the NAND device
5893  * @chip: NAND chip object
5894  */
5895 void nand_release(struct nand_chip *chip)
5896 {
5897         mtd_device_unregister(nand_to_mtd(chip));
5898         nand_cleanup(chip);
5899 }
5900 EXPORT_SYMBOL_GPL(nand_release);
5901
5902 MODULE_LICENSE("GPL");
5903 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5904 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5905 MODULE_DESCRIPTION("Generic NAND flash driver code");