Merge tag 'for-linus' of git://github.com/openrisc/linux
[linux-2.6-microblaze.git] / drivers / mtd / nand / raw / nand_base.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Overview:
4  *   This is the generic MTD driver for NAND flash devices. It should be
5  *   capable of working with almost all NAND chips currently available.
6  *
7  *      Additional technical information is available on
8  *      http://www.linux-mtd.infradead.org/doc/nand.html
9  *
10  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11  *                2002-2006 Thomas Gleixner (tglx@linutronix.de)
12  *
13  *  Credits:
14  *      David Woodhouse for adding multichip support
15  *
16  *      Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17  *      rework for 2K page size chips
18  *
19  *  TODO:
20  *      Enable cached programming for 2k page size chips
21  *      Check, if mtd->ecctype should be set to MTD_ECC_HW
22  *      if we have HW ECC support.
23  *      BBT table is not serialized, has to be fixed
24  */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/mm.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/nand-ecc-sw-hamming.h>
39 #include <linux/mtd/nand-ecc-sw-bch.h>
40 #include <linux/interrupt.h>
41 #include <linux/bitops.h>
42 #include <linux/io.h>
43 #include <linux/mtd/partitions.h>
44 #include <linux/of.h>
45 #include <linux/gpio/consumer.h>
46
47 #include "internals.h"
48
49 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
50                                        struct mtd_pairing_info *info)
51 {
52         int lastpage = (mtd->erasesize / mtd->writesize) - 1;
53         int dist = 3;
54
55         if (page == lastpage)
56                 dist = 2;
57
58         if (!page || (page & 1)) {
59                 info->group = 0;
60                 info->pair = (page + 1) / 2;
61         } else {
62                 info->group = 1;
63                 info->pair = (page + 1 - dist) / 2;
64         }
65
66         return 0;
67 }
68
69 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
70                                         const struct mtd_pairing_info *info)
71 {
72         int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
73         int page = info->pair * 2;
74         int dist = 3;
75
76         if (!info->group && !info->pair)
77                 return 0;
78
79         if (info->pair == lastpair && info->group)
80                 dist = 2;
81
82         if (!info->group)
83                 page--;
84         else if (info->pair)
85                 page += dist - 1;
86
87         if (page >= mtd->erasesize / mtd->writesize)
88                 return -EINVAL;
89
90         return page;
91 }
92
93 const struct mtd_pairing_scheme dist3_pairing_scheme = {
94         .ngroups = 2,
95         .get_info = nand_pairing_dist3_get_info,
96         .get_wunit = nand_pairing_dist3_get_wunit,
97 };
98
99 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
100 {
101         int ret = 0;
102
103         /* Start address must align on block boundary */
104         if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
105                 pr_debug("%s: unaligned address\n", __func__);
106                 ret = -EINVAL;
107         }
108
109         /* Length must align on block boundary */
110         if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
111                 pr_debug("%s: length not block aligned\n", __func__);
112                 ret = -EINVAL;
113         }
114
115         return ret;
116 }
117
118 /**
119  * nand_extract_bits - Copy unaligned bits from one buffer to another one
120  * @dst: destination buffer
121  * @dst_off: bit offset at which the writing starts
122  * @src: source buffer
123  * @src_off: bit offset at which the reading starts
124  * @nbits: number of bits to copy from @src to @dst
125  *
126  * Copy bits from one memory region to another (overlap authorized).
127  */
128 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
129                        unsigned int src_off, unsigned int nbits)
130 {
131         unsigned int tmp, n;
132
133         dst += dst_off / 8;
134         dst_off %= 8;
135         src += src_off / 8;
136         src_off %= 8;
137
138         while (nbits) {
139                 n = min3(8 - dst_off, 8 - src_off, nbits);
140
141                 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
142                 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
143                 *dst |= tmp << dst_off;
144
145                 dst_off += n;
146                 if (dst_off >= 8) {
147                         dst++;
148                         dst_off -= 8;
149                 }
150
151                 src_off += n;
152                 if (src_off >= 8) {
153                         src++;
154                         src_off -= 8;
155                 }
156
157                 nbits -= n;
158         }
159 }
160 EXPORT_SYMBOL_GPL(nand_extract_bits);
161
162 /**
163  * nand_select_target() - Select a NAND target (A.K.A. die)
164  * @chip: NAND chip object
165  * @cs: the CS line to select. Note that this CS id is always from the chip
166  *      PoV, not the controller one
167  *
168  * Select a NAND target so that further operations executed on @chip go to the
169  * selected NAND target.
170  */
171 void nand_select_target(struct nand_chip *chip, unsigned int cs)
172 {
173         /*
174          * cs should always lie between 0 and nanddev_ntargets(), when that's
175          * not the case it's a bug and the caller should be fixed.
176          */
177         if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
178                 return;
179
180         chip->cur_cs = cs;
181
182         if (chip->legacy.select_chip)
183                 chip->legacy.select_chip(chip, cs);
184 }
185 EXPORT_SYMBOL_GPL(nand_select_target);
186
187 /**
188  * nand_deselect_target() - Deselect the currently selected target
189  * @chip: NAND chip object
190  *
191  * Deselect the currently selected NAND target. The result of operations
192  * executed on @chip after the target has been deselected is undefined.
193  */
194 void nand_deselect_target(struct nand_chip *chip)
195 {
196         if (chip->legacy.select_chip)
197                 chip->legacy.select_chip(chip, -1);
198
199         chip->cur_cs = -1;
200 }
201 EXPORT_SYMBOL_GPL(nand_deselect_target);
202
203 /**
204  * nand_release_device - [GENERIC] release chip
205  * @chip: NAND chip object
206  *
207  * Release chip lock and wake up anyone waiting on the device.
208  */
209 static void nand_release_device(struct nand_chip *chip)
210 {
211         /* Release the controller and the chip */
212         mutex_unlock(&chip->controller->lock);
213         mutex_unlock(&chip->lock);
214 }
215
216 /**
217  * nand_bbm_get_next_page - Get the next page for bad block markers
218  * @chip: NAND chip object
219  * @page: First page to start checking for bad block marker usage
220  *
221  * Returns an integer that corresponds to the page offset within a block, for
222  * a page that is used to store bad block markers. If no more pages are
223  * available, -EINVAL is returned.
224  */
225 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
226 {
227         struct mtd_info *mtd = nand_to_mtd(chip);
228         int last_page = ((mtd->erasesize - mtd->writesize) >>
229                          chip->page_shift) & chip->pagemask;
230         unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
231                 | NAND_BBM_LASTPAGE;
232
233         if (page == 0 && !(chip->options & bbm_flags))
234                 return 0;
235         if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
236                 return 0;
237         if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
238                 return 1;
239         if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
240                 return last_page;
241
242         return -EINVAL;
243 }
244
245 /**
246  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
247  * @chip: NAND chip object
248  * @ofs: offset from device start
249  *
250  * Check, if the block is bad.
251  */
252 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
253 {
254         int first_page, page_offset;
255         int res;
256         u8 bad;
257
258         first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
259         page_offset = nand_bbm_get_next_page(chip, 0);
260
261         while (page_offset >= 0) {
262                 res = chip->ecc.read_oob(chip, first_page + page_offset);
263                 if (res < 0)
264                         return res;
265
266                 bad = chip->oob_poi[chip->badblockpos];
267
268                 if (likely(chip->badblockbits == 8))
269                         res = bad != 0xFF;
270                 else
271                         res = hweight8(bad) < chip->badblockbits;
272                 if (res)
273                         return res;
274
275                 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
276         }
277
278         return 0;
279 }
280
281 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
282 {
283         if (chip->options & NAND_NO_BBM_QUIRK)
284                 return 0;
285
286         if (chip->legacy.block_bad)
287                 return chip->legacy.block_bad(chip, ofs);
288
289         return nand_block_bad(chip, ofs);
290 }
291
292 /**
293  * nand_get_device - [GENERIC] Get chip for selected access
294  * @chip: NAND chip structure
295  *
296  * Lock the device and its controller for exclusive access
297  *
298  * Return: -EBUSY if the chip has been suspended, 0 otherwise
299  */
300 static int nand_get_device(struct nand_chip *chip)
301 {
302         mutex_lock(&chip->lock);
303         if (chip->suspended) {
304                 mutex_unlock(&chip->lock);
305                 return -EBUSY;
306         }
307         mutex_lock(&chip->controller->lock);
308
309         return 0;
310 }
311
312 /**
313  * nand_check_wp - [GENERIC] check if the chip is write protected
314  * @chip: NAND chip object
315  *
316  * Check, if the device is write protected. The function expects, that the
317  * device is already selected.
318  */
319 static int nand_check_wp(struct nand_chip *chip)
320 {
321         u8 status;
322         int ret;
323
324         /* Broken xD cards report WP despite being writable */
325         if (chip->options & NAND_BROKEN_XD)
326                 return 0;
327
328         /* Check the WP bit */
329         ret = nand_status_op(chip, &status);
330         if (ret)
331                 return ret;
332
333         return status & NAND_STATUS_WP ? 0 : 1;
334 }
335
336 /**
337  * nand_fill_oob - [INTERN] Transfer client buffer to oob
338  * @chip: NAND chip object
339  * @oob: oob data buffer
340  * @len: oob data write length
341  * @ops: oob ops structure
342  */
343 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
344                               struct mtd_oob_ops *ops)
345 {
346         struct mtd_info *mtd = nand_to_mtd(chip);
347         int ret;
348
349         /*
350          * Initialise to all 0xFF, to avoid the possibility of left over OOB
351          * data from a previous OOB read.
352          */
353         memset(chip->oob_poi, 0xff, mtd->oobsize);
354
355         switch (ops->mode) {
356
357         case MTD_OPS_PLACE_OOB:
358         case MTD_OPS_RAW:
359                 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
360                 return oob + len;
361
362         case MTD_OPS_AUTO_OOB:
363                 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
364                                                   ops->ooboffs, len);
365                 BUG_ON(ret);
366                 return oob + len;
367
368         default:
369                 BUG();
370         }
371         return NULL;
372 }
373
374 /**
375  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
376  * @chip: NAND chip object
377  * @to: offset to write to
378  * @ops: oob operation description structure
379  *
380  * NAND write out-of-band.
381  */
382 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
383                              struct mtd_oob_ops *ops)
384 {
385         struct mtd_info *mtd = nand_to_mtd(chip);
386         int chipnr, page, status, len, ret;
387
388         pr_debug("%s: to = 0x%08x, len = %i\n",
389                          __func__, (unsigned int)to, (int)ops->ooblen);
390
391         len = mtd_oobavail(mtd, ops);
392
393         /* Do not allow write past end of page */
394         if ((ops->ooboffs + ops->ooblen) > len) {
395                 pr_debug("%s: attempt to write past end of page\n",
396                                 __func__);
397                 return -EINVAL;
398         }
399
400         chipnr = (int)(to >> chip->chip_shift);
401
402         /*
403          * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
404          * of my DiskOnChip 2000 test units) will clear the whole data page too
405          * if we don't do this. I have no clue why, but I seem to have 'fixed'
406          * it in the doc2000 driver in August 1999.  dwmw2.
407          */
408         ret = nand_reset(chip, chipnr);
409         if (ret)
410                 return ret;
411
412         nand_select_target(chip, chipnr);
413
414         /* Shift to get page */
415         page = (int)(to >> chip->page_shift);
416
417         /* Check, if it is write protected */
418         if (nand_check_wp(chip)) {
419                 nand_deselect_target(chip);
420                 return -EROFS;
421         }
422
423         /* Invalidate the page cache, if we write to the cached page */
424         if (page == chip->pagecache.page)
425                 chip->pagecache.page = -1;
426
427         nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
428
429         if (ops->mode == MTD_OPS_RAW)
430                 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
431         else
432                 status = chip->ecc.write_oob(chip, page & chip->pagemask);
433
434         nand_deselect_target(chip);
435
436         if (status)
437                 return status;
438
439         ops->oobretlen = ops->ooblen;
440
441         return 0;
442 }
443
444 /**
445  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
446  * @chip: NAND chip object
447  * @ofs: offset from device start
448  *
449  * This is the default implementation, which can be overridden by a hardware
450  * specific driver. It provides the details for writing a bad block marker to a
451  * block.
452  */
453 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
454 {
455         struct mtd_info *mtd = nand_to_mtd(chip);
456         struct mtd_oob_ops ops;
457         uint8_t buf[2] = { 0, 0 };
458         int ret = 0, res, page_offset;
459
460         memset(&ops, 0, sizeof(ops));
461         ops.oobbuf = buf;
462         ops.ooboffs = chip->badblockpos;
463         if (chip->options & NAND_BUSWIDTH_16) {
464                 ops.ooboffs &= ~0x01;
465                 ops.len = ops.ooblen = 2;
466         } else {
467                 ops.len = ops.ooblen = 1;
468         }
469         ops.mode = MTD_OPS_PLACE_OOB;
470
471         page_offset = nand_bbm_get_next_page(chip, 0);
472
473         while (page_offset >= 0) {
474                 res = nand_do_write_oob(chip,
475                                         ofs + (page_offset * mtd->writesize),
476                                         &ops);
477
478                 if (!ret)
479                         ret = res;
480
481                 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
482         }
483
484         return ret;
485 }
486
487 /**
488  * nand_markbad_bbm - mark a block by updating the BBM
489  * @chip: NAND chip object
490  * @ofs: offset of the block to mark bad
491  */
492 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
493 {
494         if (chip->legacy.block_markbad)
495                 return chip->legacy.block_markbad(chip, ofs);
496
497         return nand_default_block_markbad(chip, ofs);
498 }
499
500 /**
501  * nand_block_markbad_lowlevel - mark a block bad
502  * @chip: NAND chip object
503  * @ofs: offset from device start
504  *
505  * This function performs the generic NAND bad block marking steps (i.e., bad
506  * block table(s) and/or marker(s)). We only allow the hardware driver to
507  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
508  *
509  * We try operations in the following order:
510  *
511  *  (1) erase the affected block, to allow OOB marker to be written cleanly
512  *  (2) write bad block marker to OOB area of affected block (unless flag
513  *      NAND_BBT_NO_OOB_BBM is present)
514  *  (3) update the BBT
515  *
516  * Note that we retain the first error encountered in (2) or (3), finish the
517  * procedures, and dump the error in the end.
518 */
519 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
520 {
521         struct mtd_info *mtd = nand_to_mtd(chip);
522         int res, ret = 0;
523
524         if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
525                 struct erase_info einfo;
526
527                 /* Attempt erase before marking OOB */
528                 memset(&einfo, 0, sizeof(einfo));
529                 einfo.addr = ofs;
530                 einfo.len = 1ULL << chip->phys_erase_shift;
531                 nand_erase_nand(chip, &einfo, 0);
532
533                 /* Write bad block marker to OOB */
534                 ret = nand_get_device(chip);
535                 if (ret)
536                         return ret;
537
538                 ret = nand_markbad_bbm(chip, ofs);
539                 nand_release_device(chip);
540         }
541
542         /* Mark block bad in BBT */
543         if (chip->bbt) {
544                 res = nand_markbad_bbt(chip, ofs);
545                 if (!ret)
546                         ret = res;
547         }
548
549         if (!ret)
550                 mtd->ecc_stats.badblocks++;
551
552         return ret;
553 }
554
555 /**
556  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
557  * @mtd: MTD device structure
558  * @ofs: offset from device start
559  *
560  * Check if the block is marked as reserved.
561  */
562 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
563 {
564         struct nand_chip *chip = mtd_to_nand(mtd);
565
566         if (!chip->bbt)
567                 return 0;
568         /* Return info from the table */
569         return nand_isreserved_bbt(chip, ofs);
570 }
571
572 /**
573  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
574  * @chip: NAND chip object
575  * @ofs: offset from device start
576  * @allowbbt: 1, if its allowed to access the bbt area
577  *
578  * Check, if the block is bad. Either by reading the bad block table or
579  * calling of the scan function.
580  */
581 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
582 {
583         /* Return info from the table */
584         if (chip->bbt)
585                 return nand_isbad_bbt(chip, ofs, allowbbt);
586
587         return nand_isbad_bbm(chip, ofs);
588 }
589
590 /**
591  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
592  * @chip: NAND chip structure
593  * @timeout_ms: Timeout in ms
594  *
595  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
596  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
597  * returned.
598  *
599  * This helper is intended to be used when the controller does not have access
600  * to the NAND R/B pin.
601  *
602  * Be aware that calling this helper from an ->exec_op() implementation means
603  * ->exec_op() must be re-entrant.
604  *
605  * Return 0 if the NAND chip is ready, a negative error otherwise.
606  */
607 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
608 {
609         const struct nand_sdr_timings *timings;
610         u8 status = 0;
611         int ret;
612
613         if (!nand_has_exec_op(chip))
614                 return -ENOTSUPP;
615
616         /* Wait tWB before polling the STATUS reg. */
617         timings = nand_get_sdr_timings(nand_get_interface_config(chip));
618         ndelay(PSEC_TO_NSEC(timings->tWB_max));
619
620         ret = nand_status_op(chip, NULL);
621         if (ret)
622                 return ret;
623
624         /*
625          * +1 below is necessary because if we are now in the last fraction
626          * of jiffy and msecs_to_jiffies is 1 then we will wait only that
627          * small jiffy fraction - possibly leading to false timeout
628          */
629         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
630         do {
631                 ret = nand_read_data_op(chip, &status, sizeof(status), true,
632                                         false);
633                 if (ret)
634                         break;
635
636                 if (status & NAND_STATUS_READY)
637                         break;
638
639                 /*
640                  * Typical lowest execution time for a tR on most NANDs is 10us,
641                  * use this as polling delay before doing something smarter (ie.
642                  * deriving a delay from the timeout value, timeout_ms/ratio).
643                  */
644                 udelay(10);
645         } while (time_before(jiffies, timeout_ms));
646
647         /*
648          * We have to exit READ_STATUS mode in order to read real data on the
649          * bus in case the WAITRDY instruction is preceding a DATA_IN
650          * instruction.
651          */
652         nand_exit_status_op(chip);
653
654         if (ret)
655                 return ret;
656
657         return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
658 };
659 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
660
661 /**
662  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
663  * @chip: NAND chip structure
664  * @gpiod: GPIO descriptor of R/B pin
665  * @timeout_ms: Timeout in ms
666  *
667  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
668  * whitin the specified timeout, -ETIMEDOUT is returned.
669  *
670  * This helper is intended to be used when the controller has access to the
671  * NAND R/B pin over GPIO.
672  *
673  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
674  */
675 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
676                       unsigned long timeout_ms)
677 {
678
679         /*
680          * Wait until R/B pin indicates chip is ready or timeout occurs.
681          * +1 below is necessary because if we are now in the last fraction
682          * of jiffy and msecs_to_jiffies is 1 then we will wait only that
683          * small jiffy fraction - possibly leading to false timeout.
684          */
685         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
686         do {
687                 if (gpiod_get_value_cansleep(gpiod))
688                         return 0;
689
690                 cond_resched();
691         } while (time_before(jiffies, timeout_ms));
692
693         return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
694 };
695 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
696
697 /**
698  * panic_nand_wait - [GENERIC] wait until the command is done
699  * @chip: NAND chip structure
700  * @timeo: timeout
701  *
702  * Wait for command done. This is a helper function for nand_wait used when
703  * we are in interrupt context. May happen when in panic and trying to write
704  * an oops through mtdoops.
705  */
706 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
707 {
708         int i;
709         for (i = 0; i < timeo; i++) {
710                 if (chip->legacy.dev_ready) {
711                         if (chip->legacy.dev_ready(chip))
712                                 break;
713                 } else {
714                         int ret;
715                         u8 status;
716
717                         ret = nand_read_data_op(chip, &status, sizeof(status),
718                                                 true, false);
719                         if (ret)
720                                 return;
721
722                         if (status & NAND_STATUS_READY)
723                                 break;
724                 }
725                 mdelay(1);
726         }
727 }
728
729 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
730 {
731         return (chip->parameters.supports_set_get_features &&
732                 test_bit(addr, chip->parameters.get_feature_list));
733 }
734
735 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
736 {
737         return (chip->parameters.supports_set_get_features &&
738                 test_bit(addr, chip->parameters.set_feature_list));
739 }
740
741 /**
742  * nand_reset_interface - Reset data interface and timings
743  * @chip: The NAND chip
744  * @chipnr: Internal die id
745  *
746  * Reset the Data interface and timings to ONFI mode 0.
747  *
748  * Returns 0 for success or negative error code otherwise.
749  */
750 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
751 {
752         const struct nand_controller_ops *ops = chip->controller->ops;
753         int ret;
754
755         if (!nand_controller_can_setup_interface(chip))
756                 return 0;
757
758         /*
759          * The ONFI specification says:
760          * "
761          * To transition from NV-DDR or NV-DDR2 to the SDR data
762          * interface, the host shall use the Reset (FFh) command
763          * using SDR timing mode 0. A device in any timing mode is
764          * required to recognize Reset (FFh) command issued in SDR
765          * timing mode 0.
766          * "
767          *
768          * Configure the data interface in SDR mode and set the
769          * timings to timing mode 0.
770          */
771
772         chip->current_interface_config = nand_get_reset_interface_config();
773         ret = ops->setup_interface(chip, chipnr,
774                                    chip->current_interface_config);
775         if (ret)
776                 pr_err("Failed to configure data interface to SDR timing mode 0\n");
777
778         return ret;
779 }
780
781 /**
782  * nand_setup_interface - Setup the best data interface and timings
783  * @chip: The NAND chip
784  * @chipnr: Internal die id
785  *
786  * Configure what has been reported to be the best data interface and NAND
787  * timings supported by the chip and the driver.
788  *
789  * Returns 0 for success or negative error code otherwise.
790  */
791 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
792 {
793         const struct nand_controller_ops *ops = chip->controller->ops;
794         u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
795         int ret;
796
797         if (!nand_controller_can_setup_interface(chip))
798                 return 0;
799
800         /*
801          * A nand_reset_interface() put both the NAND chip and the NAND
802          * controller in timings mode 0. If the default mode for this chip is
803          * also 0, no need to proceed to the change again. Plus, at probe time,
804          * nand_setup_interface() uses ->set/get_features() which would
805          * fail anyway as the parameter page is not available yet.
806          */
807         if (!chip->best_interface_config)
808                 return 0;
809
810         tmode_param[0] = chip->best_interface_config->timings.mode;
811
812         /* Change the mode on the chip side (if supported by the NAND chip) */
813         if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
814                 nand_select_target(chip, chipnr);
815                 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
816                                         tmode_param);
817                 nand_deselect_target(chip);
818                 if (ret)
819                         return ret;
820         }
821
822         /* Change the mode on the controller side */
823         ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
824         if (ret)
825                 return ret;
826
827         /* Check the mode has been accepted by the chip, if supported */
828         if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
829                 goto update_interface_config;
830
831         memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
832         nand_select_target(chip, chipnr);
833         ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
834                                 tmode_param);
835         nand_deselect_target(chip);
836         if (ret)
837                 goto err_reset_chip;
838
839         if (tmode_param[0] != chip->best_interface_config->timings.mode) {
840                 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
841                         chip->best_interface_config->timings.mode);
842                 goto err_reset_chip;
843         }
844
845 update_interface_config:
846         chip->current_interface_config = chip->best_interface_config;
847
848         return 0;
849
850 err_reset_chip:
851         /*
852          * Fallback to mode 0 if the chip explicitly did not ack the chosen
853          * timing mode.
854          */
855         nand_reset_interface(chip, chipnr);
856         nand_select_target(chip, chipnr);
857         nand_reset_op(chip);
858         nand_deselect_target(chip);
859
860         return ret;
861 }
862
863 /**
864  * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
865  *                                NAND controller and the NAND chip support
866  * @chip: the NAND chip
867  * @iface: the interface configuration (can eventually be updated)
868  * @spec_timings: specific timings, when not fitting the ONFI specification
869  *
870  * If specific timings are provided, use them. Otherwise, retrieve supported
871  * timing modes from ONFI information.
872  */
873 int nand_choose_best_sdr_timings(struct nand_chip *chip,
874                                  struct nand_interface_config *iface,
875                                  struct nand_sdr_timings *spec_timings)
876 {
877         const struct nand_controller_ops *ops = chip->controller->ops;
878         int best_mode = 0, mode, ret;
879
880         iface->type = NAND_SDR_IFACE;
881
882         if (spec_timings) {
883                 iface->timings.sdr = *spec_timings;
884                 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
885
886                 /* Verify the controller supports the requested interface */
887                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
888                                            iface);
889                 if (!ret) {
890                         chip->best_interface_config = iface;
891                         return ret;
892                 }
893
894                 /* Fallback to slower modes */
895                 best_mode = iface->timings.mode;
896         } else if (chip->parameters.onfi) {
897                 best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
898         }
899
900         for (mode = best_mode; mode >= 0; mode--) {
901                 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
902
903                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
904                                            iface);
905                 if (!ret)
906                         break;
907         }
908
909         chip->best_interface_config = iface;
910
911         return 0;
912 }
913
914 /**
915  * nand_choose_interface_config - find the best data interface and timings
916  * @chip: The NAND chip
917  *
918  * Find the best data interface and NAND timings supported by the chip
919  * and the driver. Eventually let the NAND manufacturer driver propose his own
920  * set of timings.
921  *
922  * After this function nand_chip->interface_config is initialized with the best
923  * timing mode available.
924  *
925  * Returns 0 for success or negative error code otherwise.
926  */
927 static int nand_choose_interface_config(struct nand_chip *chip)
928 {
929         struct nand_interface_config *iface;
930         int ret;
931
932         if (!nand_controller_can_setup_interface(chip))
933                 return 0;
934
935         iface = kzalloc(sizeof(*iface), GFP_KERNEL);
936         if (!iface)
937                 return -ENOMEM;
938
939         if (chip->ops.choose_interface_config)
940                 ret = chip->ops.choose_interface_config(chip, iface);
941         else
942                 ret = nand_choose_best_sdr_timings(chip, iface, NULL);
943
944         if (ret)
945                 kfree(iface);
946
947         return ret;
948 }
949
950 /**
951  * nand_fill_column_cycles - fill the column cycles of an address
952  * @chip: The NAND chip
953  * @addrs: Array of address cycles to fill
954  * @offset_in_page: The offset in the page
955  *
956  * Fills the first or the first two bytes of the @addrs field depending
957  * on the NAND bus width and the page size.
958  *
959  * Returns the number of cycles needed to encode the column, or a negative
960  * error code in case one of the arguments is invalid.
961  */
962 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
963                                    unsigned int offset_in_page)
964 {
965         struct mtd_info *mtd = nand_to_mtd(chip);
966
967         /* Make sure the offset is less than the actual page size. */
968         if (offset_in_page > mtd->writesize + mtd->oobsize)
969                 return -EINVAL;
970
971         /*
972          * On small page NANDs, there's a dedicated command to access the OOB
973          * area, and the column address is relative to the start of the OOB
974          * area, not the start of the page. Asjust the address accordingly.
975          */
976         if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
977                 offset_in_page -= mtd->writesize;
978
979         /*
980          * The offset in page is expressed in bytes, if the NAND bus is 16-bit
981          * wide, then it must be divided by 2.
982          */
983         if (chip->options & NAND_BUSWIDTH_16) {
984                 if (WARN_ON(offset_in_page % 2))
985                         return -EINVAL;
986
987                 offset_in_page /= 2;
988         }
989
990         addrs[0] = offset_in_page;
991
992         /*
993          * Small page NANDs use 1 cycle for the columns, while large page NANDs
994          * need 2
995          */
996         if (mtd->writesize <= 512)
997                 return 1;
998
999         addrs[1] = offset_in_page >> 8;
1000
1001         return 2;
1002 }
1003
1004 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1005                                      unsigned int offset_in_page, void *buf,
1006                                      unsigned int len)
1007 {
1008         const struct nand_sdr_timings *sdr =
1009                 nand_get_sdr_timings(nand_get_interface_config(chip));
1010         struct mtd_info *mtd = nand_to_mtd(chip);
1011         u8 addrs[4];
1012         struct nand_op_instr instrs[] = {
1013                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1014                 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1015                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1016                                  PSEC_TO_NSEC(sdr->tRR_min)),
1017                 NAND_OP_DATA_IN(len, buf, 0),
1018         };
1019         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1020         int ret;
1021
1022         /* Drop the DATA_IN instruction if len is set to 0. */
1023         if (!len)
1024                 op.ninstrs--;
1025
1026         if (offset_in_page >= mtd->writesize)
1027                 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1028         else if (offset_in_page >= 256 &&
1029                  !(chip->options & NAND_BUSWIDTH_16))
1030                 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1031
1032         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1033         if (ret < 0)
1034                 return ret;
1035
1036         addrs[1] = page;
1037         addrs[2] = page >> 8;
1038
1039         if (chip->options & NAND_ROW_ADDR_3) {
1040                 addrs[3] = page >> 16;
1041                 instrs[1].ctx.addr.naddrs++;
1042         }
1043
1044         return nand_exec_op(chip, &op);
1045 }
1046
1047 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1048                                      unsigned int offset_in_page, void *buf,
1049                                      unsigned int len)
1050 {
1051         const struct nand_sdr_timings *sdr =
1052                 nand_get_sdr_timings(nand_get_interface_config(chip));
1053         u8 addrs[5];
1054         struct nand_op_instr instrs[] = {
1055                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1056                 NAND_OP_ADDR(4, addrs, 0),
1057                 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1058                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1059                                  PSEC_TO_NSEC(sdr->tRR_min)),
1060                 NAND_OP_DATA_IN(len, buf, 0),
1061         };
1062         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1063         int ret;
1064
1065         /* Drop the DATA_IN instruction if len is set to 0. */
1066         if (!len)
1067                 op.ninstrs--;
1068
1069         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1070         if (ret < 0)
1071                 return ret;
1072
1073         addrs[2] = page;
1074         addrs[3] = page >> 8;
1075
1076         if (chip->options & NAND_ROW_ADDR_3) {
1077                 addrs[4] = page >> 16;
1078                 instrs[1].ctx.addr.naddrs++;
1079         }
1080
1081         return nand_exec_op(chip, &op);
1082 }
1083
1084 /**
1085  * nand_read_page_op - Do a READ PAGE operation
1086  * @chip: The NAND chip
1087  * @page: page to read
1088  * @offset_in_page: offset within the page
1089  * @buf: buffer used to store the data
1090  * @len: length of the buffer
1091  *
1092  * This function issues a READ PAGE operation.
1093  * This function does not select/unselect the CS line.
1094  *
1095  * Returns 0 on success, a negative error code otherwise.
1096  */
1097 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1098                       unsigned int offset_in_page, void *buf, unsigned int len)
1099 {
1100         struct mtd_info *mtd = nand_to_mtd(chip);
1101
1102         if (len && !buf)
1103                 return -EINVAL;
1104
1105         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1106                 return -EINVAL;
1107
1108         if (nand_has_exec_op(chip)) {
1109                 if (mtd->writesize > 512)
1110                         return nand_lp_exec_read_page_op(chip, page,
1111                                                          offset_in_page, buf,
1112                                                          len);
1113
1114                 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1115                                                  buf, len);
1116         }
1117
1118         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1119         if (len)
1120                 chip->legacy.read_buf(chip, buf, len);
1121
1122         return 0;
1123 }
1124 EXPORT_SYMBOL_GPL(nand_read_page_op);
1125
1126 /**
1127  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1128  * @chip: The NAND chip
1129  * @page: parameter page to read
1130  * @buf: buffer used to store the data
1131  * @len: length of the buffer
1132  *
1133  * This function issues a READ PARAMETER PAGE operation.
1134  * This function does not select/unselect the CS line.
1135  *
1136  * Returns 0 on success, a negative error code otherwise.
1137  */
1138 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1139                             unsigned int len)
1140 {
1141         unsigned int i;
1142         u8 *p = buf;
1143
1144         if (len && !buf)
1145                 return -EINVAL;
1146
1147         if (nand_has_exec_op(chip)) {
1148                 const struct nand_sdr_timings *sdr =
1149                         nand_get_sdr_timings(nand_get_interface_config(chip));
1150                 struct nand_op_instr instrs[] = {
1151                         NAND_OP_CMD(NAND_CMD_PARAM, 0),
1152                         NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1153                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1154                                          PSEC_TO_NSEC(sdr->tRR_min)),
1155                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1156                 };
1157                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1158
1159                 /* Drop the DATA_IN instruction if len is set to 0. */
1160                 if (!len)
1161                         op.ninstrs--;
1162
1163                 return nand_exec_op(chip, &op);
1164         }
1165
1166         chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1167         for (i = 0; i < len; i++)
1168                 p[i] = chip->legacy.read_byte(chip);
1169
1170         return 0;
1171 }
1172
1173 /**
1174  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1175  * @chip: The NAND chip
1176  * @offset_in_page: offset within the page
1177  * @buf: buffer used to store the data
1178  * @len: length of the buffer
1179  * @force_8bit: force 8-bit bus access
1180  *
1181  * This function issues a CHANGE READ COLUMN operation.
1182  * This function does not select/unselect the CS line.
1183  *
1184  * Returns 0 on success, a negative error code otherwise.
1185  */
1186 int nand_change_read_column_op(struct nand_chip *chip,
1187                                unsigned int offset_in_page, void *buf,
1188                                unsigned int len, bool force_8bit)
1189 {
1190         struct mtd_info *mtd = nand_to_mtd(chip);
1191
1192         if (len && !buf)
1193                 return -EINVAL;
1194
1195         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1196                 return -EINVAL;
1197
1198         /* Small page NANDs do not support column change. */
1199         if (mtd->writesize <= 512)
1200                 return -ENOTSUPP;
1201
1202         if (nand_has_exec_op(chip)) {
1203                 const struct nand_sdr_timings *sdr =
1204                         nand_get_sdr_timings(nand_get_interface_config(chip));
1205                 u8 addrs[2] = {};
1206                 struct nand_op_instr instrs[] = {
1207                         NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1208                         NAND_OP_ADDR(2, addrs, 0),
1209                         NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1210                                     PSEC_TO_NSEC(sdr->tCCS_min)),
1211                         NAND_OP_DATA_IN(len, buf, 0),
1212                 };
1213                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1214                 int ret;
1215
1216                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1217                 if (ret < 0)
1218                         return ret;
1219
1220                 /* Drop the DATA_IN instruction if len is set to 0. */
1221                 if (!len)
1222                         op.ninstrs--;
1223
1224                 instrs[3].ctx.data.force_8bit = force_8bit;
1225
1226                 return nand_exec_op(chip, &op);
1227         }
1228
1229         chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1230         if (len)
1231                 chip->legacy.read_buf(chip, buf, len);
1232
1233         return 0;
1234 }
1235 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1236
1237 /**
1238  * nand_read_oob_op - Do a READ OOB operation
1239  * @chip: The NAND chip
1240  * @page: page to read
1241  * @offset_in_oob: offset within the OOB area
1242  * @buf: buffer used to store the data
1243  * @len: length of the buffer
1244  *
1245  * This function issues a READ OOB operation.
1246  * This function does not select/unselect the CS line.
1247  *
1248  * Returns 0 on success, a negative error code otherwise.
1249  */
1250 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1251                      unsigned int offset_in_oob, void *buf, unsigned int len)
1252 {
1253         struct mtd_info *mtd = nand_to_mtd(chip);
1254
1255         if (len && !buf)
1256                 return -EINVAL;
1257
1258         if (offset_in_oob + len > mtd->oobsize)
1259                 return -EINVAL;
1260
1261         if (nand_has_exec_op(chip))
1262                 return nand_read_page_op(chip, page,
1263                                          mtd->writesize + offset_in_oob,
1264                                          buf, len);
1265
1266         chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1267         if (len)
1268                 chip->legacy.read_buf(chip, buf, len);
1269
1270         return 0;
1271 }
1272 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1273
1274 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1275                                   unsigned int offset_in_page, const void *buf,
1276                                   unsigned int len, bool prog)
1277 {
1278         const struct nand_sdr_timings *sdr =
1279                 nand_get_sdr_timings(nand_get_interface_config(chip));
1280         struct mtd_info *mtd = nand_to_mtd(chip);
1281         u8 addrs[5] = {};
1282         struct nand_op_instr instrs[] = {
1283                 /*
1284                  * The first instruction will be dropped if we're dealing
1285                  * with a large page NAND and adjusted if we're dealing
1286                  * with a small page NAND and the page offset is > 255.
1287                  */
1288                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1289                 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1290                 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1291                 NAND_OP_DATA_OUT(len, buf, 0),
1292                 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1293                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1294         };
1295         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1296         int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1297         int ret;
1298         u8 status;
1299
1300         if (naddrs < 0)
1301                 return naddrs;
1302
1303         addrs[naddrs++] = page;
1304         addrs[naddrs++] = page >> 8;
1305         if (chip->options & NAND_ROW_ADDR_3)
1306                 addrs[naddrs++] = page >> 16;
1307
1308         instrs[2].ctx.addr.naddrs = naddrs;
1309
1310         /* Drop the last two instructions if we're not programming the page. */
1311         if (!prog) {
1312                 op.ninstrs -= 2;
1313                 /* Also drop the DATA_OUT instruction if empty. */
1314                 if (!len)
1315                         op.ninstrs--;
1316         }
1317
1318         if (mtd->writesize <= 512) {
1319                 /*
1320                  * Small pages need some more tweaking: we have to adjust the
1321                  * first instruction depending on the page offset we're trying
1322                  * to access.
1323                  */
1324                 if (offset_in_page >= mtd->writesize)
1325                         instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1326                 else if (offset_in_page >= 256 &&
1327                          !(chip->options & NAND_BUSWIDTH_16))
1328                         instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1329         } else {
1330                 /*
1331                  * Drop the first command if we're dealing with a large page
1332                  * NAND.
1333                  */
1334                 op.instrs++;
1335                 op.ninstrs--;
1336         }
1337
1338         ret = nand_exec_op(chip, &op);
1339         if (!prog || ret)
1340                 return ret;
1341
1342         ret = nand_status_op(chip, &status);
1343         if (ret)
1344                 return ret;
1345
1346         return status;
1347 }
1348
1349 /**
1350  * nand_prog_page_begin_op - starts a PROG PAGE operation
1351  * @chip: The NAND chip
1352  * @page: page to write
1353  * @offset_in_page: offset within the page
1354  * @buf: buffer containing the data to write to the page
1355  * @len: length of the buffer
1356  *
1357  * This function issues the first half of a PROG PAGE operation.
1358  * This function does not select/unselect the CS line.
1359  *
1360  * Returns 0 on success, a negative error code otherwise.
1361  */
1362 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1363                             unsigned int offset_in_page, const void *buf,
1364                             unsigned int len)
1365 {
1366         struct mtd_info *mtd = nand_to_mtd(chip);
1367
1368         if (len && !buf)
1369                 return -EINVAL;
1370
1371         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1372                 return -EINVAL;
1373
1374         if (nand_has_exec_op(chip))
1375                 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1376                                               len, false);
1377
1378         chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1379
1380         if (buf)
1381                 chip->legacy.write_buf(chip, buf, len);
1382
1383         return 0;
1384 }
1385 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1386
1387 /**
1388  * nand_prog_page_end_op - ends a PROG PAGE operation
1389  * @chip: The NAND chip
1390  *
1391  * This function issues the second half of a PROG PAGE operation.
1392  * This function does not select/unselect the CS line.
1393  *
1394  * Returns 0 on success, a negative error code otherwise.
1395  */
1396 int nand_prog_page_end_op(struct nand_chip *chip)
1397 {
1398         int ret;
1399         u8 status;
1400
1401         if (nand_has_exec_op(chip)) {
1402                 const struct nand_sdr_timings *sdr =
1403                         nand_get_sdr_timings(nand_get_interface_config(chip));
1404                 struct nand_op_instr instrs[] = {
1405                         NAND_OP_CMD(NAND_CMD_PAGEPROG,
1406                                     PSEC_TO_NSEC(sdr->tWB_max)),
1407                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1408                 };
1409                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1410
1411                 ret = nand_exec_op(chip, &op);
1412                 if (ret)
1413                         return ret;
1414
1415                 ret = nand_status_op(chip, &status);
1416                 if (ret)
1417                         return ret;
1418         } else {
1419                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1420                 ret = chip->legacy.waitfunc(chip);
1421                 if (ret < 0)
1422                         return ret;
1423
1424                 status = ret;
1425         }
1426
1427         if (status & NAND_STATUS_FAIL)
1428                 return -EIO;
1429
1430         return 0;
1431 }
1432 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1433
1434 /**
1435  * nand_prog_page_op - Do a full PROG PAGE operation
1436  * @chip: The NAND chip
1437  * @page: page to write
1438  * @offset_in_page: offset within the page
1439  * @buf: buffer containing the data to write to the page
1440  * @len: length of the buffer
1441  *
1442  * This function issues a full PROG PAGE operation.
1443  * This function does not select/unselect the CS line.
1444  *
1445  * Returns 0 on success, a negative error code otherwise.
1446  */
1447 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1448                       unsigned int offset_in_page, const void *buf,
1449                       unsigned int len)
1450 {
1451         struct mtd_info *mtd = nand_to_mtd(chip);
1452         int status;
1453
1454         if (!len || !buf)
1455                 return -EINVAL;
1456
1457         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1458                 return -EINVAL;
1459
1460         if (nand_has_exec_op(chip)) {
1461                 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1462                                                 len, true);
1463         } else {
1464                 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1465                                      page);
1466                 chip->legacy.write_buf(chip, buf, len);
1467                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1468                 status = chip->legacy.waitfunc(chip);
1469         }
1470
1471         if (status & NAND_STATUS_FAIL)
1472                 return -EIO;
1473
1474         return 0;
1475 }
1476 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1477
1478 /**
1479  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1480  * @chip: The NAND chip
1481  * @offset_in_page: offset within the page
1482  * @buf: buffer containing the data to send to the NAND
1483  * @len: length of the buffer
1484  * @force_8bit: force 8-bit bus access
1485  *
1486  * This function issues a CHANGE WRITE COLUMN operation.
1487  * This function does not select/unselect the CS line.
1488  *
1489  * Returns 0 on success, a negative error code otherwise.
1490  */
1491 int nand_change_write_column_op(struct nand_chip *chip,
1492                                 unsigned int offset_in_page,
1493                                 const void *buf, unsigned int len,
1494                                 bool force_8bit)
1495 {
1496         struct mtd_info *mtd = nand_to_mtd(chip);
1497
1498         if (len && !buf)
1499                 return -EINVAL;
1500
1501         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1502                 return -EINVAL;
1503
1504         /* Small page NANDs do not support column change. */
1505         if (mtd->writesize <= 512)
1506                 return -ENOTSUPP;
1507
1508         if (nand_has_exec_op(chip)) {
1509                 const struct nand_sdr_timings *sdr =
1510                         nand_get_sdr_timings(nand_get_interface_config(chip));
1511                 u8 addrs[2];
1512                 struct nand_op_instr instrs[] = {
1513                         NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1514                         NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1515                         NAND_OP_DATA_OUT(len, buf, 0),
1516                 };
1517                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1518                 int ret;
1519
1520                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1521                 if (ret < 0)
1522                         return ret;
1523
1524                 instrs[2].ctx.data.force_8bit = force_8bit;
1525
1526                 /* Drop the DATA_OUT instruction if len is set to 0. */
1527                 if (!len)
1528                         op.ninstrs--;
1529
1530                 return nand_exec_op(chip, &op);
1531         }
1532
1533         chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1534         if (len)
1535                 chip->legacy.write_buf(chip, buf, len);
1536
1537         return 0;
1538 }
1539 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1540
1541 /**
1542  * nand_readid_op - Do a READID operation
1543  * @chip: The NAND chip
1544  * @addr: address cycle to pass after the READID command
1545  * @buf: buffer used to store the ID
1546  * @len: length of the buffer
1547  *
1548  * This function sends a READID command and reads back the ID returned by the
1549  * NAND.
1550  * This function does not select/unselect the CS line.
1551  *
1552  * Returns 0 on success, a negative error code otherwise.
1553  */
1554 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1555                    unsigned int len)
1556 {
1557         unsigned int i;
1558         u8 *id = buf;
1559
1560         if (len && !buf)
1561                 return -EINVAL;
1562
1563         if (nand_has_exec_op(chip)) {
1564                 const struct nand_sdr_timings *sdr =
1565                         nand_get_sdr_timings(nand_get_interface_config(chip));
1566                 struct nand_op_instr instrs[] = {
1567                         NAND_OP_CMD(NAND_CMD_READID, 0),
1568                         NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1569                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1570                 };
1571                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1572
1573                 /* Drop the DATA_IN instruction if len is set to 0. */
1574                 if (!len)
1575                         op.ninstrs--;
1576
1577                 return nand_exec_op(chip, &op);
1578         }
1579
1580         chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1581
1582         for (i = 0; i < len; i++)
1583                 id[i] = chip->legacy.read_byte(chip);
1584
1585         return 0;
1586 }
1587 EXPORT_SYMBOL_GPL(nand_readid_op);
1588
1589 /**
1590  * nand_status_op - Do a STATUS operation
1591  * @chip: The NAND chip
1592  * @status: out variable to store the NAND status
1593  *
1594  * This function sends a STATUS command and reads back the status returned by
1595  * the NAND.
1596  * This function does not select/unselect the CS line.
1597  *
1598  * Returns 0 on success, a negative error code otherwise.
1599  */
1600 int nand_status_op(struct nand_chip *chip, u8 *status)
1601 {
1602         if (nand_has_exec_op(chip)) {
1603                 const struct nand_sdr_timings *sdr =
1604                         nand_get_sdr_timings(nand_get_interface_config(chip));
1605                 struct nand_op_instr instrs[] = {
1606                         NAND_OP_CMD(NAND_CMD_STATUS,
1607                                     PSEC_TO_NSEC(sdr->tADL_min)),
1608                         NAND_OP_8BIT_DATA_IN(1, status, 0),
1609                 };
1610                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1611
1612                 if (!status)
1613                         op.ninstrs--;
1614
1615                 return nand_exec_op(chip, &op);
1616         }
1617
1618         chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1619         if (status)
1620                 *status = chip->legacy.read_byte(chip);
1621
1622         return 0;
1623 }
1624 EXPORT_SYMBOL_GPL(nand_status_op);
1625
1626 /**
1627  * nand_exit_status_op - Exit a STATUS operation
1628  * @chip: The NAND chip
1629  *
1630  * This function sends a READ0 command to cancel the effect of the STATUS
1631  * command to avoid reading only the status until a new read command is sent.
1632  *
1633  * This function does not select/unselect the CS line.
1634  *
1635  * Returns 0 on success, a negative error code otherwise.
1636  */
1637 int nand_exit_status_op(struct nand_chip *chip)
1638 {
1639         if (nand_has_exec_op(chip)) {
1640                 struct nand_op_instr instrs[] = {
1641                         NAND_OP_CMD(NAND_CMD_READ0, 0),
1642                 };
1643                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1644
1645                 return nand_exec_op(chip, &op);
1646         }
1647
1648         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1649
1650         return 0;
1651 }
1652
1653 /**
1654  * nand_erase_op - Do an erase operation
1655  * @chip: The NAND chip
1656  * @eraseblock: block to erase
1657  *
1658  * This function sends an ERASE command and waits for the NAND to be ready
1659  * before returning.
1660  * This function does not select/unselect the CS line.
1661  *
1662  * Returns 0 on success, a negative error code otherwise.
1663  */
1664 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1665 {
1666         unsigned int page = eraseblock <<
1667                             (chip->phys_erase_shift - chip->page_shift);
1668         int ret;
1669         u8 status;
1670
1671         if (nand_has_exec_op(chip)) {
1672                 const struct nand_sdr_timings *sdr =
1673                         nand_get_sdr_timings(nand_get_interface_config(chip));
1674                 u8 addrs[3] = { page, page >> 8, page >> 16 };
1675                 struct nand_op_instr instrs[] = {
1676                         NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1677                         NAND_OP_ADDR(2, addrs, 0),
1678                         NAND_OP_CMD(NAND_CMD_ERASE2,
1679                                     PSEC_TO_MSEC(sdr->tWB_max)),
1680                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1681                 };
1682                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1683
1684                 if (chip->options & NAND_ROW_ADDR_3)
1685                         instrs[1].ctx.addr.naddrs++;
1686
1687                 ret = nand_exec_op(chip, &op);
1688                 if (ret)
1689                         return ret;
1690
1691                 ret = nand_status_op(chip, &status);
1692                 if (ret)
1693                         return ret;
1694         } else {
1695                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1696                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1697
1698                 ret = chip->legacy.waitfunc(chip);
1699                 if (ret < 0)
1700                         return ret;
1701
1702                 status = ret;
1703         }
1704
1705         if (status & NAND_STATUS_FAIL)
1706                 return -EIO;
1707
1708         return 0;
1709 }
1710 EXPORT_SYMBOL_GPL(nand_erase_op);
1711
1712 /**
1713  * nand_set_features_op - Do a SET FEATURES operation
1714  * @chip: The NAND chip
1715  * @feature: feature id
1716  * @data: 4 bytes of data
1717  *
1718  * This function sends a SET FEATURES command and waits for the NAND to be
1719  * ready before returning.
1720  * This function does not select/unselect the CS line.
1721  *
1722  * Returns 0 on success, a negative error code otherwise.
1723  */
1724 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1725                                 const void *data)
1726 {
1727         const u8 *params = data;
1728         int i, ret;
1729
1730         if (nand_has_exec_op(chip)) {
1731                 const struct nand_sdr_timings *sdr =
1732                         nand_get_sdr_timings(nand_get_interface_config(chip));
1733                 struct nand_op_instr instrs[] = {
1734                         NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1735                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1736                         NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1737                                               PSEC_TO_NSEC(sdr->tWB_max)),
1738                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1739                 };
1740                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1741
1742                 return nand_exec_op(chip, &op);
1743         }
1744
1745         chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1746         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1747                 chip->legacy.write_byte(chip, params[i]);
1748
1749         ret = chip->legacy.waitfunc(chip);
1750         if (ret < 0)
1751                 return ret;
1752
1753         if (ret & NAND_STATUS_FAIL)
1754                 return -EIO;
1755
1756         return 0;
1757 }
1758
1759 /**
1760  * nand_get_features_op - Do a GET FEATURES operation
1761  * @chip: The NAND chip
1762  * @feature: feature id
1763  * @data: 4 bytes of data
1764  *
1765  * This function sends a GET FEATURES command and waits for the NAND to be
1766  * ready before returning.
1767  * This function does not select/unselect the CS line.
1768  *
1769  * Returns 0 on success, a negative error code otherwise.
1770  */
1771 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1772                                 void *data)
1773 {
1774         u8 *params = data;
1775         int i;
1776
1777         if (nand_has_exec_op(chip)) {
1778                 const struct nand_sdr_timings *sdr =
1779                         nand_get_sdr_timings(nand_get_interface_config(chip));
1780                 struct nand_op_instr instrs[] = {
1781                         NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1782                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1783                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1784                                          PSEC_TO_NSEC(sdr->tRR_min)),
1785                         NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1786                                              data, 0),
1787                 };
1788                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1789
1790                 return nand_exec_op(chip, &op);
1791         }
1792
1793         chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1794         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1795                 params[i] = chip->legacy.read_byte(chip);
1796
1797         return 0;
1798 }
1799
1800 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1801                             unsigned int delay_ns)
1802 {
1803         if (nand_has_exec_op(chip)) {
1804                 struct nand_op_instr instrs[] = {
1805                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1806                                          PSEC_TO_NSEC(delay_ns)),
1807                 };
1808                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1809
1810                 return nand_exec_op(chip, &op);
1811         }
1812
1813         /* Apply delay or wait for ready/busy pin */
1814         if (!chip->legacy.dev_ready)
1815                 udelay(chip->legacy.chip_delay);
1816         else
1817                 nand_wait_ready(chip);
1818
1819         return 0;
1820 }
1821
1822 /**
1823  * nand_reset_op - Do a reset operation
1824  * @chip: The NAND chip
1825  *
1826  * This function sends a RESET command and waits for the NAND to be ready
1827  * before returning.
1828  * This function does not select/unselect the CS line.
1829  *
1830  * Returns 0 on success, a negative error code otherwise.
1831  */
1832 int nand_reset_op(struct nand_chip *chip)
1833 {
1834         if (nand_has_exec_op(chip)) {
1835                 const struct nand_sdr_timings *sdr =
1836                         nand_get_sdr_timings(nand_get_interface_config(chip));
1837                 struct nand_op_instr instrs[] = {
1838                         NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1839                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1840                 };
1841                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1842
1843                 return nand_exec_op(chip, &op);
1844         }
1845
1846         chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1847
1848         return 0;
1849 }
1850 EXPORT_SYMBOL_GPL(nand_reset_op);
1851
1852 /**
1853  * nand_read_data_op - Read data from the NAND
1854  * @chip: The NAND chip
1855  * @buf: buffer used to store the data
1856  * @len: length of the buffer
1857  * @force_8bit: force 8-bit bus access
1858  * @check_only: do not actually run the command, only checks if the
1859  *              controller driver supports it
1860  *
1861  * This function does a raw data read on the bus. Usually used after launching
1862  * another NAND operation like nand_read_page_op().
1863  * This function does not select/unselect the CS line.
1864  *
1865  * Returns 0 on success, a negative error code otherwise.
1866  */
1867 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1868                       bool force_8bit, bool check_only)
1869 {
1870         if (!len || !buf)
1871                 return -EINVAL;
1872
1873         if (nand_has_exec_op(chip)) {
1874                 struct nand_op_instr instrs[] = {
1875                         NAND_OP_DATA_IN(len, buf, 0),
1876                 };
1877                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1878
1879                 instrs[0].ctx.data.force_8bit = force_8bit;
1880
1881                 if (check_only)
1882                         return nand_check_op(chip, &op);
1883
1884                 return nand_exec_op(chip, &op);
1885         }
1886
1887         if (check_only)
1888                 return 0;
1889
1890         if (force_8bit) {
1891                 u8 *p = buf;
1892                 unsigned int i;
1893
1894                 for (i = 0; i < len; i++)
1895                         p[i] = chip->legacy.read_byte(chip);
1896         } else {
1897                 chip->legacy.read_buf(chip, buf, len);
1898         }
1899
1900         return 0;
1901 }
1902 EXPORT_SYMBOL_GPL(nand_read_data_op);
1903
1904 /**
1905  * nand_write_data_op - Write data from the NAND
1906  * @chip: The NAND chip
1907  * @buf: buffer containing the data to send on the bus
1908  * @len: length of the buffer
1909  * @force_8bit: force 8-bit bus access
1910  *
1911  * This function does a raw data write on the bus. Usually used after launching
1912  * another NAND operation like nand_write_page_begin_op().
1913  * This function does not select/unselect the CS line.
1914  *
1915  * Returns 0 on success, a negative error code otherwise.
1916  */
1917 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1918                        unsigned int len, bool force_8bit)
1919 {
1920         if (!len || !buf)
1921                 return -EINVAL;
1922
1923         if (nand_has_exec_op(chip)) {
1924                 struct nand_op_instr instrs[] = {
1925                         NAND_OP_DATA_OUT(len, buf, 0),
1926                 };
1927                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1928
1929                 instrs[0].ctx.data.force_8bit = force_8bit;
1930
1931                 return nand_exec_op(chip, &op);
1932         }
1933
1934         if (force_8bit) {
1935                 const u8 *p = buf;
1936                 unsigned int i;
1937
1938                 for (i = 0; i < len; i++)
1939                         chip->legacy.write_byte(chip, p[i]);
1940         } else {
1941                 chip->legacy.write_buf(chip, buf, len);
1942         }
1943
1944         return 0;
1945 }
1946 EXPORT_SYMBOL_GPL(nand_write_data_op);
1947
1948 /**
1949  * struct nand_op_parser_ctx - Context used by the parser
1950  * @instrs: array of all the instructions that must be addressed
1951  * @ninstrs: length of the @instrs array
1952  * @subop: Sub-operation to be passed to the NAND controller
1953  *
1954  * This structure is used by the core to split NAND operations into
1955  * sub-operations that can be handled by the NAND controller.
1956  */
1957 struct nand_op_parser_ctx {
1958         const struct nand_op_instr *instrs;
1959         unsigned int ninstrs;
1960         struct nand_subop subop;
1961 };
1962
1963 /**
1964  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1965  * @pat: the parser pattern element that matches @instr
1966  * @instr: pointer to the instruction to check
1967  * @start_offset: this is an in/out parameter. If @instr has already been
1968  *                split, then @start_offset is the offset from which to start
1969  *                (either an address cycle or an offset in the data buffer).
1970  *                Conversely, if the function returns true (ie. instr must be
1971  *                split), this parameter is updated to point to the first
1972  *                data/address cycle that has not been taken care of.
1973  *
1974  * Some NAND controllers are limited and cannot send X address cycles with a
1975  * unique operation, or cannot read/write more than Y bytes at the same time.
1976  * In this case, split the instruction that does not fit in a single
1977  * controller-operation into two or more chunks.
1978  *
1979  * Returns true if the instruction must be split, false otherwise.
1980  * The @start_offset parameter is also updated to the offset at which the next
1981  * bundle of instruction must start (if an address or a data instruction).
1982  */
1983 static bool
1984 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1985                                 const struct nand_op_instr *instr,
1986                                 unsigned int *start_offset)
1987 {
1988         switch (pat->type) {
1989         case NAND_OP_ADDR_INSTR:
1990                 if (!pat->ctx.addr.maxcycles)
1991                         break;
1992
1993                 if (instr->ctx.addr.naddrs - *start_offset >
1994                     pat->ctx.addr.maxcycles) {
1995                         *start_offset += pat->ctx.addr.maxcycles;
1996                         return true;
1997                 }
1998                 break;
1999
2000         case NAND_OP_DATA_IN_INSTR:
2001         case NAND_OP_DATA_OUT_INSTR:
2002                 if (!pat->ctx.data.maxlen)
2003                         break;
2004
2005                 if (instr->ctx.data.len - *start_offset >
2006                     pat->ctx.data.maxlen) {
2007                         *start_offset += pat->ctx.data.maxlen;
2008                         return true;
2009                 }
2010                 break;
2011
2012         default:
2013                 break;
2014         }
2015
2016         return false;
2017 }
2018
2019 /**
2020  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2021  *                            remaining in the parser context
2022  * @pat: the pattern to test
2023  * @ctx: the parser context structure to match with the pattern @pat
2024  *
2025  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2026  * Returns true if this is the case, false ortherwise. When true is returned,
2027  * @ctx->subop is updated with the set of instructions to be passed to the
2028  * controller driver.
2029  */
2030 static bool
2031 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2032                          struct nand_op_parser_ctx *ctx)
2033 {
2034         unsigned int instr_offset = ctx->subop.first_instr_start_off;
2035         const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2036         const struct nand_op_instr *instr = ctx->subop.instrs;
2037         unsigned int i, ninstrs;
2038
2039         for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2040                 /*
2041                  * The pattern instruction does not match the operation
2042                  * instruction. If the instruction is marked optional in the
2043                  * pattern definition, we skip the pattern element and continue
2044                  * to the next one. If the element is mandatory, there's no
2045                  * match and we can return false directly.
2046                  */
2047                 if (instr->type != pat->elems[i].type) {
2048                         if (!pat->elems[i].optional)
2049                                 return false;
2050
2051                         continue;
2052                 }
2053
2054                 /*
2055                  * Now check the pattern element constraints. If the pattern is
2056                  * not able to handle the whole instruction in a single step,
2057                  * we have to split it.
2058                  * The last_instr_end_off value comes back updated to point to
2059                  * the position where we have to split the instruction (the
2060                  * start of the next subop chunk).
2061                  */
2062                 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2063                                                     &instr_offset)) {
2064                         ninstrs++;
2065                         i++;
2066                         break;
2067                 }
2068
2069                 instr++;
2070                 ninstrs++;
2071                 instr_offset = 0;
2072         }
2073
2074         /*
2075          * This can happen if all instructions of a pattern are optional.
2076          * Still, if there's not at least one instruction handled by this
2077          * pattern, this is not a match, and we should try the next one (if
2078          * any).
2079          */
2080         if (!ninstrs)
2081                 return false;
2082
2083         /*
2084          * We had a match on the pattern head, but the pattern may be longer
2085          * than the instructions we're asked to execute. We need to make sure
2086          * there's no mandatory elements in the pattern tail.
2087          */
2088         for (; i < pat->nelems; i++) {
2089                 if (!pat->elems[i].optional)
2090                         return false;
2091         }
2092
2093         /*
2094          * We have a match: update the subop structure accordingly and return
2095          * true.
2096          */
2097         ctx->subop.ninstrs = ninstrs;
2098         ctx->subop.last_instr_end_off = instr_offset;
2099
2100         return true;
2101 }
2102
2103 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2104 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2105 {
2106         const struct nand_op_instr *instr;
2107         char *prefix = "      ";
2108         unsigned int i;
2109
2110         pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2111
2112         for (i = 0; i < ctx->ninstrs; i++) {
2113                 instr = &ctx->instrs[i];
2114
2115                 if (instr == &ctx->subop.instrs[0])
2116                         prefix = "    ->";
2117
2118                 nand_op_trace(prefix, instr);
2119
2120                 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2121                         prefix = "      ";
2122         }
2123 }
2124 #else
2125 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2126 {
2127         /* NOP */
2128 }
2129 #endif
2130
2131 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2132                                   const struct nand_op_parser_ctx *b)
2133 {
2134         if (a->subop.ninstrs < b->subop.ninstrs)
2135                 return -1;
2136         else if (a->subop.ninstrs > b->subop.ninstrs)
2137                 return 1;
2138
2139         if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2140                 return -1;
2141         else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2142                 return 1;
2143
2144         return 0;
2145 }
2146
2147 /**
2148  * nand_op_parser_exec_op - exec_op parser
2149  * @chip: the NAND chip
2150  * @parser: patterns description provided by the controller driver
2151  * @op: the NAND operation to address
2152  * @check_only: when true, the function only checks if @op can be handled but
2153  *              does not execute the operation
2154  *
2155  * Helper function designed to ease integration of NAND controller drivers that
2156  * only support a limited set of instruction sequences. The supported sequences
2157  * are described in @parser, and the framework takes care of splitting @op into
2158  * multiple sub-operations (if required) and pass them back to the ->exec()
2159  * callback of the matching pattern if @check_only is set to false.
2160  *
2161  * NAND controller drivers should call this function from their own ->exec_op()
2162  * implementation.
2163  *
2164  * Returns 0 on success, a negative error code otherwise. A failure can be
2165  * caused by an unsupported operation (none of the supported patterns is able
2166  * to handle the requested operation), or an error returned by one of the
2167  * matching pattern->exec() hook.
2168  */
2169 int nand_op_parser_exec_op(struct nand_chip *chip,
2170                            const struct nand_op_parser *parser,
2171                            const struct nand_operation *op, bool check_only)
2172 {
2173         struct nand_op_parser_ctx ctx = {
2174                 .subop.cs = op->cs,
2175                 .subop.instrs = op->instrs,
2176                 .instrs = op->instrs,
2177                 .ninstrs = op->ninstrs,
2178         };
2179         unsigned int i;
2180
2181         while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2182                 const struct nand_op_parser_pattern *pattern;
2183                 struct nand_op_parser_ctx best_ctx;
2184                 int ret, best_pattern = -1;
2185
2186                 for (i = 0; i < parser->npatterns; i++) {
2187                         struct nand_op_parser_ctx test_ctx = ctx;
2188
2189                         pattern = &parser->patterns[i];
2190                         if (!nand_op_parser_match_pat(pattern, &test_ctx))
2191                                 continue;
2192
2193                         if (best_pattern >= 0 &&
2194                             nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2195                                 continue;
2196
2197                         best_pattern = i;
2198                         best_ctx = test_ctx;
2199                 }
2200
2201                 if (best_pattern < 0) {
2202                         pr_debug("->exec_op() parser: pattern not found!\n");
2203                         return -ENOTSUPP;
2204                 }
2205
2206                 ctx = best_ctx;
2207                 nand_op_parser_trace(&ctx);
2208
2209                 if (!check_only) {
2210                         pattern = &parser->patterns[best_pattern];
2211                         ret = pattern->exec(chip, &ctx.subop);
2212                         if (ret)
2213                                 return ret;
2214                 }
2215
2216                 /*
2217                  * Update the context structure by pointing to the start of the
2218                  * next subop.
2219                  */
2220                 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2221                 if (ctx.subop.last_instr_end_off)
2222                         ctx.subop.instrs -= 1;
2223
2224                 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2225         }
2226
2227         return 0;
2228 }
2229 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2230
2231 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2232 {
2233         return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2234                          instr->type == NAND_OP_DATA_OUT_INSTR);
2235 }
2236
2237 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2238                                       unsigned int instr_idx)
2239 {
2240         return subop && instr_idx < subop->ninstrs;
2241 }
2242
2243 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2244                                              unsigned int instr_idx)
2245 {
2246         if (instr_idx)
2247                 return 0;
2248
2249         return subop->first_instr_start_off;
2250 }
2251
2252 /**
2253  * nand_subop_get_addr_start_off - Get the start offset in an address array
2254  * @subop: The entire sub-operation
2255  * @instr_idx: Index of the instruction inside the sub-operation
2256  *
2257  * During driver development, one could be tempted to directly use the
2258  * ->addr.addrs field of address instructions. This is wrong as address
2259  * instructions might be split.
2260  *
2261  * Given an address instruction, returns the offset of the first cycle to issue.
2262  */
2263 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2264                                            unsigned int instr_idx)
2265 {
2266         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2267                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2268                 return 0;
2269
2270         return nand_subop_get_start_off(subop, instr_idx);
2271 }
2272 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2273
2274 /**
2275  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2276  * @subop: The entire sub-operation
2277  * @instr_idx: Index of the instruction inside the sub-operation
2278  *
2279  * During driver development, one could be tempted to directly use the
2280  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2281  * might be split.
2282  *
2283  * Given an address instruction, returns the number of address cycle to issue.
2284  */
2285 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2286                                          unsigned int instr_idx)
2287 {
2288         int start_off, end_off;
2289
2290         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2291                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2292                 return 0;
2293
2294         start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2295
2296         if (instr_idx == subop->ninstrs - 1 &&
2297             subop->last_instr_end_off)
2298                 end_off = subop->last_instr_end_off;
2299         else
2300                 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2301
2302         return end_off - start_off;
2303 }
2304 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2305
2306 /**
2307  * nand_subop_get_data_start_off - Get the start offset in a data array
2308  * @subop: The entire sub-operation
2309  * @instr_idx: Index of the instruction inside the sub-operation
2310  *
2311  * During driver development, one could be tempted to directly use the
2312  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2313  * instructions might be split.
2314  *
2315  * Given a data instruction, returns the offset to start from.
2316  */
2317 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2318                                            unsigned int instr_idx)
2319 {
2320         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2321                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2322                 return 0;
2323
2324         return nand_subop_get_start_off(subop, instr_idx);
2325 }
2326 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2327
2328 /**
2329  * nand_subop_get_data_len - Get the number of bytes to retrieve
2330  * @subop: The entire sub-operation
2331  * @instr_idx: Index of the instruction inside the sub-operation
2332  *
2333  * During driver development, one could be tempted to directly use the
2334  * ->data->len field of a data instruction. This is wrong as data instructions
2335  * might be split.
2336  *
2337  * Returns the length of the chunk of data to send/receive.
2338  */
2339 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2340                                      unsigned int instr_idx)
2341 {
2342         int start_off = 0, end_off;
2343
2344         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2345                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2346                 return 0;
2347
2348         start_off = nand_subop_get_data_start_off(subop, instr_idx);
2349
2350         if (instr_idx == subop->ninstrs - 1 &&
2351             subop->last_instr_end_off)
2352                 end_off = subop->last_instr_end_off;
2353         else
2354                 end_off = subop->instrs[instr_idx].ctx.data.len;
2355
2356         return end_off - start_off;
2357 }
2358 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2359
2360 /**
2361  * nand_reset - Reset and initialize a NAND device
2362  * @chip: The NAND chip
2363  * @chipnr: Internal die id
2364  *
2365  * Save the timings data structure, then apply SDR timings mode 0 (see
2366  * nand_reset_interface for details), do the reset operation, and apply
2367  * back the previous timings.
2368  *
2369  * Returns 0 on success, a negative error code otherwise.
2370  */
2371 int nand_reset(struct nand_chip *chip, int chipnr)
2372 {
2373         int ret;
2374
2375         ret = nand_reset_interface(chip, chipnr);
2376         if (ret)
2377                 return ret;
2378
2379         /*
2380          * The CS line has to be released before we can apply the new NAND
2381          * interface settings, hence this weird nand_select_target()
2382          * nand_deselect_target() dance.
2383          */
2384         nand_select_target(chip, chipnr);
2385         ret = nand_reset_op(chip);
2386         nand_deselect_target(chip);
2387         if (ret)
2388                 return ret;
2389
2390         ret = nand_setup_interface(chip, chipnr);
2391         if (ret)
2392                 return ret;
2393
2394         return 0;
2395 }
2396 EXPORT_SYMBOL_GPL(nand_reset);
2397
2398 /**
2399  * nand_get_features - wrapper to perform a GET_FEATURE
2400  * @chip: NAND chip info structure
2401  * @addr: feature address
2402  * @subfeature_param: the subfeature parameters, a four bytes array
2403  *
2404  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2405  * operation cannot be handled.
2406  */
2407 int nand_get_features(struct nand_chip *chip, int addr,
2408                       u8 *subfeature_param)
2409 {
2410         if (!nand_supports_get_features(chip, addr))
2411                 return -ENOTSUPP;
2412
2413         if (chip->legacy.get_features)
2414                 return chip->legacy.get_features(chip, addr, subfeature_param);
2415
2416         return nand_get_features_op(chip, addr, subfeature_param);
2417 }
2418
2419 /**
2420  * nand_set_features - wrapper to perform a SET_FEATURE
2421  * @chip: NAND chip info structure
2422  * @addr: feature address
2423  * @subfeature_param: the subfeature parameters, a four bytes array
2424  *
2425  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2426  * operation cannot be handled.
2427  */
2428 int nand_set_features(struct nand_chip *chip, int addr,
2429                       u8 *subfeature_param)
2430 {
2431         if (!nand_supports_set_features(chip, addr))
2432                 return -ENOTSUPP;
2433
2434         if (chip->legacy.set_features)
2435                 return chip->legacy.set_features(chip, addr, subfeature_param);
2436
2437         return nand_set_features_op(chip, addr, subfeature_param);
2438 }
2439
2440 /**
2441  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2442  * @buf: buffer to test
2443  * @len: buffer length
2444  * @bitflips_threshold: maximum number of bitflips
2445  *
2446  * Check if a buffer contains only 0xff, which means the underlying region
2447  * has been erased and is ready to be programmed.
2448  * The bitflips_threshold specify the maximum number of bitflips before
2449  * considering the region is not erased.
2450  * Note: The logic of this function has been extracted from the memweight
2451  * implementation, except that nand_check_erased_buf function exit before
2452  * testing the whole buffer if the number of bitflips exceed the
2453  * bitflips_threshold value.
2454  *
2455  * Returns a positive number of bitflips less than or equal to
2456  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2457  * threshold.
2458  */
2459 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2460 {
2461         const unsigned char *bitmap = buf;
2462         int bitflips = 0;
2463         int weight;
2464
2465         for (; len && ((uintptr_t)bitmap) % sizeof(long);
2466              len--, bitmap++) {
2467                 weight = hweight8(*bitmap);
2468                 bitflips += BITS_PER_BYTE - weight;
2469                 if (unlikely(bitflips > bitflips_threshold))
2470                         return -EBADMSG;
2471         }
2472
2473         for (; len >= sizeof(long);
2474              len -= sizeof(long), bitmap += sizeof(long)) {
2475                 unsigned long d = *((unsigned long *)bitmap);
2476                 if (d == ~0UL)
2477                         continue;
2478                 weight = hweight_long(d);
2479                 bitflips += BITS_PER_LONG - weight;
2480                 if (unlikely(bitflips > bitflips_threshold))
2481                         return -EBADMSG;
2482         }
2483
2484         for (; len > 0; len--, bitmap++) {
2485                 weight = hweight8(*bitmap);
2486                 bitflips += BITS_PER_BYTE - weight;
2487                 if (unlikely(bitflips > bitflips_threshold))
2488                         return -EBADMSG;
2489         }
2490
2491         return bitflips;
2492 }
2493
2494 /**
2495  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2496  *                               0xff data
2497  * @data: data buffer to test
2498  * @datalen: data length
2499  * @ecc: ECC buffer
2500  * @ecclen: ECC length
2501  * @extraoob: extra OOB buffer
2502  * @extraooblen: extra OOB length
2503  * @bitflips_threshold: maximum number of bitflips
2504  *
2505  * Check if a data buffer and its associated ECC and OOB data contains only
2506  * 0xff pattern, which means the underlying region has been erased and is
2507  * ready to be programmed.
2508  * The bitflips_threshold specify the maximum number of bitflips before
2509  * considering the region as not erased.
2510  *
2511  * Note:
2512  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2513  *    different from the NAND page size. When fixing bitflips, ECC engines will
2514  *    report the number of errors per chunk, and the NAND core infrastructure
2515  *    expect you to return the maximum number of bitflips for the whole page.
2516  *    This is why you should always use this function on a single chunk and
2517  *    not on the whole page. After checking each chunk you should update your
2518  *    max_bitflips value accordingly.
2519  * 2/ When checking for bitflips in erased pages you should not only check
2520  *    the payload data but also their associated ECC data, because a user might
2521  *    have programmed almost all bits to 1 but a few. In this case, we
2522  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2523  *    this case.
2524  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2525  *    data are protected by the ECC engine.
2526  *    It could also be used if you support subpages and want to attach some
2527  *    extra OOB data to an ECC chunk.
2528  *
2529  * Returns a positive number of bitflips less than or equal to
2530  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2531  * threshold. In case of success, the passed buffers are filled with 0xff.
2532  */
2533 int nand_check_erased_ecc_chunk(void *data, int datalen,
2534                                 void *ecc, int ecclen,
2535                                 void *extraoob, int extraooblen,
2536                                 int bitflips_threshold)
2537 {
2538         int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2539
2540         data_bitflips = nand_check_erased_buf(data, datalen,
2541                                               bitflips_threshold);
2542         if (data_bitflips < 0)
2543                 return data_bitflips;
2544
2545         bitflips_threshold -= data_bitflips;
2546
2547         ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2548         if (ecc_bitflips < 0)
2549                 return ecc_bitflips;
2550
2551         bitflips_threshold -= ecc_bitflips;
2552
2553         extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2554                                                   bitflips_threshold);
2555         if (extraoob_bitflips < 0)
2556                 return extraoob_bitflips;
2557
2558         if (data_bitflips)
2559                 memset(data, 0xff, datalen);
2560
2561         if (ecc_bitflips)
2562                 memset(ecc, 0xff, ecclen);
2563
2564         if (extraoob_bitflips)
2565                 memset(extraoob, 0xff, extraooblen);
2566
2567         return data_bitflips + ecc_bitflips + extraoob_bitflips;
2568 }
2569 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2570
2571 /**
2572  * nand_read_page_raw_notsupp - dummy read raw page function
2573  * @chip: nand chip info structure
2574  * @buf: buffer to store read data
2575  * @oob_required: caller requires OOB data read to chip->oob_poi
2576  * @page: page number to read
2577  *
2578  * Returns -ENOTSUPP unconditionally.
2579  */
2580 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2581                                int oob_required, int page)
2582 {
2583         return -ENOTSUPP;
2584 }
2585
2586 /**
2587  * nand_read_page_raw - [INTERN] read raw page data without ecc
2588  * @chip: nand chip info structure
2589  * @buf: buffer to store read data
2590  * @oob_required: caller requires OOB data read to chip->oob_poi
2591  * @page: page number to read
2592  *
2593  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2594  */
2595 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2596                        int page)
2597 {
2598         struct mtd_info *mtd = nand_to_mtd(chip);
2599         int ret;
2600
2601         ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2602         if (ret)
2603                 return ret;
2604
2605         if (oob_required) {
2606                 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2607                                         false, false);
2608                 if (ret)
2609                         return ret;
2610         }
2611
2612         return 0;
2613 }
2614 EXPORT_SYMBOL(nand_read_page_raw);
2615
2616 /**
2617  * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2618  * @chip: NAND chip info structure
2619  * @buf: buffer to store read data
2620  * @oob_required: caller requires OOB data read to chip->oob_poi
2621  * @page: page number to read
2622  *
2623  * This is a raw page read, ie. without any error detection/correction.
2624  * Monolithic means we are requesting all the relevant data (main plus
2625  * eventually OOB) to be loaded in the NAND cache and sent over the
2626  * bus (from the NAND chip to the NAND controller) in a single
2627  * operation. This is an alternative to nand_read_page_raw(), which
2628  * first reads the main data, and if the OOB data is requested too,
2629  * then reads more data on the bus.
2630  */
2631 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2632                                   int oob_required, int page)
2633 {
2634         struct mtd_info *mtd = nand_to_mtd(chip);
2635         unsigned int size = mtd->writesize;
2636         u8 *read_buf = buf;
2637         int ret;
2638
2639         if (oob_required) {
2640                 size += mtd->oobsize;
2641
2642                 if (buf != chip->data_buf)
2643                         read_buf = nand_get_data_buf(chip);
2644         }
2645
2646         ret = nand_read_page_op(chip, page, 0, read_buf, size);
2647         if (ret)
2648                 return ret;
2649
2650         if (buf != chip->data_buf)
2651                 memcpy(buf, read_buf, mtd->writesize);
2652
2653         return 0;
2654 }
2655 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2656
2657 /**
2658  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2659  * @chip: nand chip info structure
2660  * @buf: buffer to store read data
2661  * @oob_required: caller requires OOB data read to chip->oob_poi
2662  * @page: page number to read
2663  *
2664  * We need a special oob layout and handling even when OOB isn't used.
2665  */
2666 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2667                                        int oob_required, int page)
2668 {
2669         struct mtd_info *mtd = nand_to_mtd(chip);
2670         int eccsize = chip->ecc.size;
2671         int eccbytes = chip->ecc.bytes;
2672         uint8_t *oob = chip->oob_poi;
2673         int steps, size, ret;
2674
2675         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2676         if (ret)
2677                 return ret;
2678
2679         for (steps = chip->ecc.steps; steps > 0; steps--) {
2680                 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2681                 if (ret)
2682                         return ret;
2683
2684                 buf += eccsize;
2685
2686                 if (chip->ecc.prepad) {
2687                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2688                                                 false, false);
2689                         if (ret)
2690                                 return ret;
2691
2692                         oob += chip->ecc.prepad;
2693                 }
2694
2695                 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2696                 if (ret)
2697                         return ret;
2698
2699                 oob += eccbytes;
2700
2701                 if (chip->ecc.postpad) {
2702                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2703                                                 false, false);
2704                         if (ret)
2705                                 return ret;
2706
2707                         oob += chip->ecc.postpad;
2708                 }
2709         }
2710
2711         size = mtd->oobsize - (oob - chip->oob_poi);
2712         if (size) {
2713                 ret = nand_read_data_op(chip, oob, size, false, false);
2714                 if (ret)
2715                         return ret;
2716         }
2717
2718         return 0;
2719 }
2720
2721 /**
2722  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2723  * @chip: nand chip info structure
2724  * @buf: buffer to store read data
2725  * @oob_required: caller requires OOB data read to chip->oob_poi
2726  * @page: page number to read
2727  */
2728 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2729                                 int oob_required, int page)
2730 {
2731         struct mtd_info *mtd = nand_to_mtd(chip);
2732         int i, eccsize = chip->ecc.size, ret;
2733         int eccbytes = chip->ecc.bytes;
2734         int eccsteps = chip->ecc.steps;
2735         uint8_t *p = buf;
2736         uint8_t *ecc_calc = chip->ecc.calc_buf;
2737         uint8_t *ecc_code = chip->ecc.code_buf;
2738         unsigned int max_bitflips = 0;
2739
2740         chip->ecc.read_page_raw(chip, buf, 1, page);
2741
2742         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2743                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2744
2745         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2746                                          chip->ecc.total);
2747         if (ret)
2748                 return ret;
2749
2750         eccsteps = chip->ecc.steps;
2751         p = buf;
2752
2753         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2754                 int stat;
2755
2756                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2757                 if (stat < 0) {
2758                         mtd->ecc_stats.failed++;
2759                 } else {
2760                         mtd->ecc_stats.corrected += stat;
2761                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2762                 }
2763         }
2764         return max_bitflips;
2765 }
2766
2767 /**
2768  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2769  * @chip: nand chip info structure
2770  * @data_offs: offset of requested data within the page
2771  * @readlen: data length
2772  * @bufpoi: buffer to store read data
2773  * @page: page number to read
2774  */
2775 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2776                              uint32_t readlen, uint8_t *bufpoi, int page)
2777 {
2778         struct mtd_info *mtd = nand_to_mtd(chip);
2779         int start_step, end_step, num_steps, ret;
2780         uint8_t *p;
2781         int data_col_addr, i, gaps = 0;
2782         int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2783         int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2784         int index, section = 0;
2785         unsigned int max_bitflips = 0;
2786         struct mtd_oob_region oobregion = { };
2787
2788         /* Column address within the page aligned to ECC size (256bytes) */
2789         start_step = data_offs / chip->ecc.size;
2790         end_step = (data_offs + readlen - 1) / chip->ecc.size;
2791         num_steps = end_step - start_step + 1;
2792         index = start_step * chip->ecc.bytes;
2793
2794         /* Data size aligned to ECC ecc.size */
2795         datafrag_len = num_steps * chip->ecc.size;
2796         eccfrag_len = num_steps * chip->ecc.bytes;
2797
2798         data_col_addr = start_step * chip->ecc.size;
2799         /* If we read not a page aligned data */
2800         p = bufpoi + data_col_addr;
2801         ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2802         if (ret)
2803                 return ret;
2804
2805         /* Calculate ECC */
2806         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2807                 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2808
2809         /*
2810          * The performance is faster if we position offsets according to
2811          * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2812          */
2813         ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2814         if (ret)
2815                 return ret;
2816
2817         if (oobregion.length < eccfrag_len)
2818                 gaps = 1;
2819
2820         if (gaps) {
2821                 ret = nand_change_read_column_op(chip, mtd->writesize,
2822                                                  chip->oob_poi, mtd->oobsize,
2823                                                  false);
2824                 if (ret)
2825                         return ret;
2826         } else {
2827                 /*
2828                  * Send the command to read the particular ECC bytes take care
2829                  * about buswidth alignment in read_buf.
2830                  */
2831                 aligned_pos = oobregion.offset & ~(busw - 1);
2832                 aligned_len = eccfrag_len;
2833                 if (oobregion.offset & (busw - 1))
2834                         aligned_len++;
2835                 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2836                     (busw - 1))
2837                         aligned_len++;
2838
2839                 ret = nand_change_read_column_op(chip,
2840                                                  mtd->writesize + aligned_pos,
2841                                                  &chip->oob_poi[aligned_pos],
2842                                                  aligned_len, false);
2843                 if (ret)
2844                         return ret;
2845         }
2846
2847         ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2848                                          chip->oob_poi, index, eccfrag_len);
2849         if (ret)
2850                 return ret;
2851
2852         p = bufpoi + data_col_addr;
2853         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2854                 int stat;
2855
2856                 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2857                                          &chip->ecc.calc_buf[i]);
2858                 if (stat == -EBADMSG &&
2859                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2860                         /* check for empty pages with bitflips */
2861                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2862                                                 &chip->ecc.code_buf[i],
2863                                                 chip->ecc.bytes,
2864                                                 NULL, 0,
2865                                                 chip->ecc.strength);
2866                 }
2867
2868                 if (stat < 0) {
2869                         mtd->ecc_stats.failed++;
2870                 } else {
2871                         mtd->ecc_stats.corrected += stat;
2872                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2873                 }
2874         }
2875         return max_bitflips;
2876 }
2877
2878 /**
2879  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2880  * @chip: nand chip info structure
2881  * @buf: buffer to store read data
2882  * @oob_required: caller requires OOB data read to chip->oob_poi
2883  * @page: page number to read
2884  *
2885  * Not for syndrome calculating ECC controllers which need a special oob layout.
2886  */
2887 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2888                                 int oob_required, int page)
2889 {
2890         struct mtd_info *mtd = nand_to_mtd(chip);
2891         int i, eccsize = chip->ecc.size, ret;
2892         int eccbytes = chip->ecc.bytes;
2893         int eccsteps = chip->ecc.steps;
2894         uint8_t *p = buf;
2895         uint8_t *ecc_calc = chip->ecc.calc_buf;
2896         uint8_t *ecc_code = chip->ecc.code_buf;
2897         unsigned int max_bitflips = 0;
2898
2899         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2900         if (ret)
2901                 return ret;
2902
2903         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2904                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2905
2906                 ret = nand_read_data_op(chip, p, eccsize, false, false);
2907                 if (ret)
2908                         return ret;
2909
2910                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2911         }
2912
2913         ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
2914                                 false);
2915         if (ret)
2916                 return ret;
2917
2918         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2919                                          chip->ecc.total);
2920         if (ret)
2921                 return ret;
2922
2923         eccsteps = chip->ecc.steps;
2924         p = buf;
2925
2926         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2927                 int stat;
2928
2929                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2930                 if (stat == -EBADMSG &&
2931                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2932                         /* check for empty pages with bitflips */
2933                         stat = nand_check_erased_ecc_chunk(p, eccsize,
2934                                                 &ecc_code[i], eccbytes,
2935                                                 NULL, 0,
2936                                                 chip->ecc.strength);
2937                 }
2938
2939                 if (stat < 0) {
2940                         mtd->ecc_stats.failed++;
2941                 } else {
2942                         mtd->ecc_stats.corrected += stat;
2943                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2944                 }
2945         }
2946         return max_bitflips;
2947 }
2948
2949 /**
2950  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2951  * @chip: nand chip info structure
2952  * @buf: buffer to store read data
2953  * @oob_required: caller requires OOB data read to chip->oob_poi
2954  * @page: page number to read
2955  *
2956  * The hw generator calculates the error syndrome automatically. Therefore we
2957  * need a special oob layout and handling.
2958  */
2959 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2960                                    int oob_required, int page)
2961 {
2962         struct mtd_info *mtd = nand_to_mtd(chip);
2963         int ret, i, eccsize = chip->ecc.size;
2964         int eccbytes = chip->ecc.bytes;
2965         int eccsteps = chip->ecc.steps;
2966         int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2967         uint8_t *p = buf;
2968         uint8_t *oob = chip->oob_poi;
2969         unsigned int max_bitflips = 0;
2970
2971         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2972         if (ret)
2973                 return ret;
2974
2975         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2976                 int stat;
2977
2978                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2979
2980                 ret = nand_read_data_op(chip, p, eccsize, false, false);
2981                 if (ret)
2982                         return ret;
2983
2984                 if (chip->ecc.prepad) {
2985                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2986                                                 false, false);
2987                         if (ret)
2988                                 return ret;
2989
2990                         oob += chip->ecc.prepad;
2991                 }
2992
2993                 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
2994
2995                 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2996                 if (ret)
2997                         return ret;
2998
2999                 stat = chip->ecc.correct(chip, p, oob, NULL);
3000
3001                 oob += eccbytes;
3002
3003                 if (chip->ecc.postpad) {
3004                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3005                                                 false, false);
3006                         if (ret)
3007                                 return ret;
3008
3009                         oob += chip->ecc.postpad;
3010                 }
3011
3012                 if (stat == -EBADMSG &&
3013                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3014                         /* check for empty pages with bitflips */
3015                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3016                                                            oob - eccpadbytes,
3017                                                            eccpadbytes,
3018                                                            NULL, 0,
3019                                                            chip->ecc.strength);
3020                 }
3021
3022                 if (stat < 0) {
3023                         mtd->ecc_stats.failed++;
3024                 } else {
3025                         mtd->ecc_stats.corrected += stat;
3026                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3027                 }
3028         }
3029
3030         /* Calculate remaining oob bytes */
3031         i = mtd->oobsize - (oob - chip->oob_poi);
3032         if (i) {
3033                 ret = nand_read_data_op(chip, oob, i, false, false);
3034                 if (ret)
3035                         return ret;
3036         }
3037
3038         return max_bitflips;
3039 }
3040
3041 /**
3042  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3043  * @chip: NAND chip object
3044  * @oob: oob destination address
3045  * @ops: oob ops structure
3046  * @len: size of oob to transfer
3047  */
3048 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3049                                   struct mtd_oob_ops *ops, size_t len)
3050 {
3051         struct mtd_info *mtd = nand_to_mtd(chip);
3052         int ret;
3053
3054         switch (ops->mode) {
3055
3056         case MTD_OPS_PLACE_OOB:
3057         case MTD_OPS_RAW:
3058                 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3059                 return oob + len;
3060
3061         case MTD_OPS_AUTO_OOB:
3062                 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3063                                                   ops->ooboffs, len);
3064                 BUG_ON(ret);
3065                 return oob + len;
3066
3067         default:
3068                 BUG();
3069         }
3070         return NULL;
3071 }
3072
3073 /**
3074  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3075  * @chip: NAND chip object
3076  * @retry_mode: the retry mode to use
3077  *
3078  * Some vendors supply a special command to shift the Vt threshold, to be used
3079  * when there are too many bitflips in a page (i.e., ECC error). After setting
3080  * a new threshold, the host should retry reading the page.
3081  */
3082 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3083 {
3084         pr_debug("setting READ RETRY mode %d\n", retry_mode);
3085
3086         if (retry_mode >= chip->read_retries)
3087                 return -EINVAL;
3088
3089         if (!chip->ops.setup_read_retry)
3090                 return -EOPNOTSUPP;
3091
3092         return chip->ops.setup_read_retry(chip, retry_mode);
3093 }
3094
3095 static void nand_wait_readrdy(struct nand_chip *chip)
3096 {
3097         const struct nand_sdr_timings *sdr;
3098
3099         if (!(chip->options & NAND_NEED_READRDY))
3100                 return;
3101
3102         sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
3103         WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3104 }
3105
3106 /**
3107  * nand_do_read_ops - [INTERN] Read data with ECC
3108  * @chip: NAND chip object
3109  * @from: offset to read from
3110  * @ops: oob ops structure
3111  *
3112  * Internal function. Called with chip held.
3113  */
3114 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3115                             struct mtd_oob_ops *ops)
3116 {
3117         int chipnr, page, realpage, col, bytes, aligned, oob_required;
3118         struct mtd_info *mtd = nand_to_mtd(chip);
3119         int ret = 0;
3120         uint32_t readlen = ops->len;
3121         uint32_t oobreadlen = ops->ooblen;
3122         uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3123
3124         uint8_t *bufpoi, *oob, *buf;
3125         int use_bounce_buf;
3126         unsigned int max_bitflips = 0;
3127         int retry_mode = 0;
3128         bool ecc_fail = false;
3129
3130         chipnr = (int)(from >> chip->chip_shift);
3131         nand_select_target(chip, chipnr);
3132
3133         realpage = (int)(from >> chip->page_shift);
3134         page = realpage & chip->pagemask;
3135
3136         col = (int)(from & (mtd->writesize - 1));
3137
3138         buf = ops->datbuf;
3139         oob = ops->oobbuf;
3140         oob_required = oob ? 1 : 0;
3141
3142         while (1) {
3143                 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3144
3145                 bytes = min(mtd->writesize - col, readlen);
3146                 aligned = (bytes == mtd->writesize);
3147
3148                 if (!aligned)
3149                         use_bounce_buf = 1;
3150                 else if (chip->options & NAND_USES_DMA)
3151                         use_bounce_buf = !virt_addr_valid(buf) ||
3152                                          !IS_ALIGNED((unsigned long)buf,
3153                                                      chip->buf_align);
3154                 else
3155                         use_bounce_buf = 0;
3156
3157                 /* Is the current page in the buffer? */
3158                 if (realpage != chip->pagecache.page || oob) {
3159                         bufpoi = use_bounce_buf ? chip->data_buf : buf;
3160
3161                         if (use_bounce_buf && aligned)
3162                                 pr_debug("%s: using read bounce buffer for buf@%p\n",
3163                                                  __func__, buf);
3164
3165 read_retry:
3166                         /*
3167                          * Now read the page into the buffer.  Absent an error,
3168                          * the read methods return max bitflips per ecc step.
3169                          */
3170                         if (unlikely(ops->mode == MTD_OPS_RAW))
3171                                 ret = chip->ecc.read_page_raw(chip, bufpoi,
3172                                                               oob_required,
3173                                                               page);
3174                         else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3175                                  !oob)
3176                                 ret = chip->ecc.read_subpage(chip, col, bytes,
3177                                                              bufpoi, page);
3178                         else
3179                                 ret = chip->ecc.read_page(chip, bufpoi,
3180                                                           oob_required, page);
3181                         if (ret < 0) {
3182                                 if (use_bounce_buf)
3183                                         /* Invalidate page cache */
3184                                         chip->pagecache.page = -1;
3185                                 break;
3186                         }
3187
3188                         /*
3189                          * Copy back the data in the initial buffer when reading
3190                          * partial pages or when a bounce buffer is required.
3191                          */
3192                         if (use_bounce_buf) {
3193                                 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3194                                     !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3195                                     (ops->mode != MTD_OPS_RAW)) {
3196                                         chip->pagecache.page = realpage;
3197                                         chip->pagecache.bitflips = ret;
3198                                 } else {
3199                                         /* Invalidate page cache */
3200                                         chip->pagecache.page = -1;
3201                                 }
3202                                 memcpy(buf, bufpoi + col, bytes);
3203                         }
3204
3205                         if (unlikely(oob)) {
3206                                 int toread = min(oobreadlen, max_oobsize);
3207
3208                                 if (toread) {
3209                                         oob = nand_transfer_oob(chip, oob, ops,
3210                                                                 toread);
3211                                         oobreadlen -= toread;
3212                                 }
3213                         }
3214
3215                         nand_wait_readrdy(chip);
3216
3217                         if (mtd->ecc_stats.failed - ecc_stats.failed) {
3218                                 if (retry_mode + 1 < chip->read_retries) {
3219                                         retry_mode++;
3220                                         ret = nand_setup_read_retry(chip,
3221                                                         retry_mode);
3222                                         if (ret < 0)
3223                                                 break;
3224
3225                                         /* Reset ecc_stats; retry */
3226                                         mtd->ecc_stats = ecc_stats;
3227                                         goto read_retry;
3228                                 } else {
3229                                         /* No more retry modes; real failure */
3230                                         ecc_fail = true;
3231                                 }
3232                         }
3233
3234                         buf += bytes;
3235                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
3236                 } else {
3237                         memcpy(buf, chip->data_buf + col, bytes);
3238                         buf += bytes;
3239                         max_bitflips = max_t(unsigned int, max_bitflips,
3240                                              chip->pagecache.bitflips);
3241                 }
3242
3243                 readlen -= bytes;
3244
3245                 /* Reset to retry mode 0 */
3246                 if (retry_mode) {
3247                         ret = nand_setup_read_retry(chip, 0);
3248                         if (ret < 0)
3249                                 break;
3250                         retry_mode = 0;
3251                 }
3252
3253                 if (!readlen)
3254                         break;
3255
3256                 /* For subsequent reads align to page boundary */
3257                 col = 0;
3258                 /* Increment page address */
3259                 realpage++;
3260
3261                 page = realpage & chip->pagemask;
3262                 /* Check, if we cross a chip boundary */
3263                 if (!page) {
3264                         chipnr++;
3265                         nand_deselect_target(chip);
3266                         nand_select_target(chip, chipnr);
3267                 }
3268         }
3269         nand_deselect_target(chip);
3270
3271         ops->retlen = ops->len - (size_t) readlen;
3272         if (oob)
3273                 ops->oobretlen = ops->ooblen - oobreadlen;
3274
3275         if (ret < 0)
3276                 return ret;
3277
3278         if (ecc_fail)
3279                 return -EBADMSG;
3280
3281         return max_bitflips;
3282 }
3283
3284 /**
3285  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3286  * @chip: nand chip info structure
3287  * @page: page number to read
3288  */
3289 int nand_read_oob_std(struct nand_chip *chip, int page)
3290 {
3291         struct mtd_info *mtd = nand_to_mtd(chip);
3292
3293         return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3294 }
3295 EXPORT_SYMBOL(nand_read_oob_std);
3296
3297 /**
3298  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3299  *                          with syndromes
3300  * @chip: nand chip info structure
3301  * @page: page number to read
3302  */
3303 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3304 {
3305         struct mtd_info *mtd = nand_to_mtd(chip);
3306         int length = mtd->oobsize;
3307         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3308         int eccsize = chip->ecc.size;
3309         uint8_t *bufpoi = chip->oob_poi;
3310         int i, toread, sndrnd = 0, pos, ret;
3311
3312         ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3313         if (ret)
3314                 return ret;
3315
3316         for (i = 0; i < chip->ecc.steps; i++) {
3317                 if (sndrnd) {
3318                         int ret;
3319
3320                         pos = eccsize + i * (eccsize + chunk);
3321                         if (mtd->writesize > 512)
3322                                 ret = nand_change_read_column_op(chip, pos,
3323                                                                  NULL, 0,
3324                                                                  false);
3325                         else
3326                                 ret = nand_read_page_op(chip, page, pos, NULL,
3327                                                         0);
3328
3329                         if (ret)
3330                                 return ret;
3331                 } else
3332                         sndrnd = 1;
3333                 toread = min_t(int, length, chunk);
3334
3335                 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3336                 if (ret)
3337                         return ret;
3338
3339                 bufpoi += toread;
3340                 length -= toread;
3341         }
3342         if (length > 0) {
3343                 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3344                 if (ret)
3345                         return ret;
3346         }
3347
3348         return 0;
3349 }
3350
3351 /**
3352  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3353  * @chip: nand chip info structure
3354  * @page: page number to write
3355  */
3356 int nand_write_oob_std(struct nand_chip *chip, int page)
3357 {
3358         struct mtd_info *mtd = nand_to_mtd(chip);
3359
3360         return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3361                                  mtd->oobsize);
3362 }
3363 EXPORT_SYMBOL(nand_write_oob_std);
3364
3365 /**
3366  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3367  *                           with syndrome - only for large page flash
3368  * @chip: nand chip info structure
3369  * @page: page number to write
3370  */
3371 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3372 {
3373         struct mtd_info *mtd = nand_to_mtd(chip);
3374         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3375         int eccsize = chip->ecc.size, length = mtd->oobsize;
3376         int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3377         const uint8_t *bufpoi = chip->oob_poi;
3378
3379         /*
3380          * data-ecc-data-ecc ... ecc-oob
3381          * or
3382          * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3383          */
3384         if (!chip->ecc.prepad && !chip->ecc.postpad) {
3385                 pos = steps * (eccsize + chunk);
3386                 steps = 0;
3387         } else
3388                 pos = eccsize;
3389
3390         ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3391         if (ret)
3392                 return ret;
3393
3394         for (i = 0; i < steps; i++) {
3395                 if (sndcmd) {
3396                         if (mtd->writesize <= 512) {
3397                                 uint32_t fill = 0xFFFFFFFF;
3398
3399                                 len = eccsize;
3400                                 while (len > 0) {
3401                                         int num = min_t(int, len, 4);
3402
3403                                         ret = nand_write_data_op(chip, &fill,
3404                                                                  num, false);
3405                                         if (ret)
3406                                                 return ret;
3407
3408                                         len -= num;
3409                                 }
3410                         } else {
3411                                 pos = eccsize + i * (eccsize + chunk);
3412                                 ret = nand_change_write_column_op(chip, pos,
3413                                                                   NULL, 0,
3414                                                                   false);
3415                                 if (ret)
3416                                         return ret;
3417                         }
3418                 } else
3419                         sndcmd = 1;
3420                 len = min_t(int, length, chunk);
3421
3422                 ret = nand_write_data_op(chip, bufpoi, len, false);
3423                 if (ret)
3424                         return ret;
3425
3426                 bufpoi += len;
3427                 length -= len;
3428         }
3429         if (length > 0) {
3430                 ret = nand_write_data_op(chip, bufpoi, length, false);
3431                 if (ret)
3432                         return ret;
3433         }
3434
3435         return nand_prog_page_end_op(chip);
3436 }
3437
3438 /**
3439  * nand_do_read_oob - [INTERN] NAND read out-of-band
3440  * @chip: NAND chip object
3441  * @from: offset to read from
3442  * @ops: oob operations description structure
3443  *
3444  * NAND read out-of-band data from the spare area.
3445  */
3446 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3447                             struct mtd_oob_ops *ops)
3448 {
3449         struct mtd_info *mtd = nand_to_mtd(chip);
3450         unsigned int max_bitflips = 0;
3451         int page, realpage, chipnr;
3452         struct mtd_ecc_stats stats;
3453         int readlen = ops->ooblen;
3454         int len;
3455         uint8_t *buf = ops->oobbuf;
3456         int ret = 0;
3457
3458         pr_debug("%s: from = 0x%08Lx, len = %i\n",
3459                         __func__, (unsigned long long)from, readlen);
3460
3461         stats = mtd->ecc_stats;
3462
3463         len = mtd_oobavail(mtd, ops);
3464
3465         chipnr = (int)(from >> chip->chip_shift);
3466         nand_select_target(chip, chipnr);
3467
3468         /* Shift to get page */
3469         realpage = (int)(from >> chip->page_shift);
3470         page = realpage & chip->pagemask;
3471
3472         while (1) {
3473                 if (ops->mode == MTD_OPS_RAW)
3474                         ret = chip->ecc.read_oob_raw(chip, page);
3475                 else
3476                         ret = chip->ecc.read_oob(chip, page);
3477
3478                 if (ret < 0)
3479                         break;
3480
3481                 len = min(len, readlen);
3482                 buf = nand_transfer_oob(chip, buf, ops, len);
3483
3484                 nand_wait_readrdy(chip);
3485
3486                 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3487
3488                 readlen -= len;
3489                 if (!readlen)
3490                         break;
3491
3492                 /* Increment page address */
3493                 realpage++;
3494
3495                 page = realpage & chip->pagemask;
3496                 /* Check, if we cross a chip boundary */
3497                 if (!page) {
3498                         chipnr++;
3499                         nand_deselect_target(chip);
3500                         nand_select_target(chip, chipnr);
3501                 }
3502         }
3503         nand_deselect_target(chip);
3504
3505         ops->oobretlen = ops->ooblen - readlen;
3506
3507         if (ret < 0)
3508                 return ret;
3509
3510         if (mtd->ecc_stats.failed - stats.failed)
3511                 return -EBADMSG;
3512
3513         return max_bitflips;
3514 }
3515
3516 /**
3517  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3518  * @mtd: MTD device structure
3519  * @from: offset to read from
3520  * @ops: oob operation description structure
3521  *
3522  * NAND read data and/or out-of-band data.
3523  */
3524 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3525                          struct mtd_oob_ops *ops)
3526 {
3527         struct nand_chip *chip = mtd_to_nand(mtd);
3528         int ret;
3529
3530         ops->retlen = 0;
3531
3532         if (ops->mode != MTD_OPS_PLACE_OOB &&
3533             ops->mode != MTD_OPS_AUTO_OOB &&
3534             ops->mode != MTD_OPS_RAW)
3535                 return -ENOTSUPP;
3536
3537         ret = nand_get_device(chip);
3538         if (ret)
3539                 return ret;
3540
3541         if (!ops->datbuf)
3542                 ret = nand_do_read_oob(chip, from, ops);
3543         else
3544                 ret = nand_do_read_ops(chip, from, ops);
3545
3546         nand_release_device(chip);
3547         return ret;
3548 }
3549
3550 /**
3551  * nand_write_page_raw_notsupp - dummy raw page write function
3552  * @chip: nand chip info structure
3553  * @buf: data buffer
3554  * @oob_required: must write chip->oob_poi to OOB
3555  * @page: page number to write
3556  *
3557  * Returns -ENOTSUPP unconditionally.
3558  */
3559 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3560                                 int oob_required, int page)
3561 {
3562         return -ENOTSUPP;
3563 }
3564
3565 /**
3566  * nand_write_page_raw - [INTERN] raw page write function
3567  * @chip: nand chip info structure
3568  * @buf: data buffer
3569  * @oob_required: must write chip->oob_poi to OOB
3570  * @page: page number to write
3571  *
3572  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3573  */
3574 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3575                         int oob_required, int page)
3576 {
3577         struct mtd_info *mtd = nand_to_mtd(chip);
3578         int ret;
3579
3580         ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3581         if (ret)
3582                 return ret;
3583
3584         if (oob_required) {
3585                 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3586                                          false);
3587                 if (ret)
3588                         return ret;
3589         }
3590
3591         return nand_prog_page_end_op(chip);
3592 }
3593 EXPORT_SYMBOL(nand_write_page_raw);
3594
3595 /**
3596  * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3597  * @chip: NAND chip info structure
3598  * @buf: data buffer to write
3599  * @oob_required: must write chip->oob_poi to OOB
3600  * @page: page number to write
3601  *
3602  * This is a raw page write, ie. without any error detection/correction.
3603  * Monolithic means we are requesting all the relevant data (main plus
3604  * eventually OOB) to be sent over the bus and effectively programmed
3605  * into the NAND chip arrays in a single operation. This is an
3606  * alternative to nand_write_page_raw(), which first sends the main
3607  * data, then eventually send the OOB data by latching more data
3608  * cycles on the NAND bus, and finally sends the program command to
3609  * synchronyze the NAND chip cache.
3610  */
3611 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3612                                    int oob_required, int page)
3613 {
3614         struct mtd_info *mtd = nand_to_mtd(chip);
3615         unsigned int size = mtd->writesize;
3616         u8 *write_buf = (u8 *)buf;
3617
3618         if (oob_required) {
3619                 size += mtd->oobsize;
3620
3621                 if (buf != chip->data_buf) {
3622                         write_buf = nand_get_data_buf(chip);
3623                         memcpy(write_buf, buf, mtd->writesize);
3624                 }
3625         }
3626
3627         return nand_prog_page_op(chip, page, 0, write_buf, size);
3628 }
3629 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3630
3631 /**
3632  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3633  * @chip: nand chip info structure
3634  * @buf: data buffer
3635  * @oob_required: must write chip->oob_poi to OOB
3636  * @page: page number to write
3637  *
3638  * We need a special oob layout and handling even when ECC isn't checked.
3639  */
3640 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3641                                         const uint8_t *buf, int oob_required,
3642                                         int page)
3643 {
3644         struct mtd_info *mtd = nand_to_mtd(chip);
3645         int eccsize = chip->ecc.size;
3646         int eccbytes = chip->ecc.bytes;
3647         uint8_t *oob = chip->oob_poi;
3648         int steps, size, ret;
3649
3650         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3651         if (ret)
3652                 return ret;
3653
3654         for (steps = chip->ecc.steps; steps > 0; steps--) {
3655                 ret = nand_write_data_op(chip, buf, eccsize, false);
3656                 if (ret)
3657                         return ret;
3658
3659                 buf += eccsize;
3660
3661                 if (chip->ecc.prepad) {
3662                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3663                                                  false);
3664                         if (ret)
3665                                 return ret;
3666
3667                         oob += chip->ecc.prepad;
3668                 }
3669
3670                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3671                 if (ret)
3672                         return ret;
3673
3674                 oob += eccbytes;
3675
3676                 if (chip->ecc.postpad) {
3677                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3678                                                  false);
3679                         if (ret)
3680                                 return ret;
3681
3682                         oob += chip->ecc.postpad;
3683                 }
3684         }
3685
3686         size = mtd->oobsize - (oob - chip->oob_poi);
3687         if (size) {
3688                 ret = nand_write_data_op(chip, oob, size, false);
3689                 if (ret)
3690                         return ret;
3691         }
3692
3693         return nand_prog_page_end_op(chip);
3694 }
3695 /**
3696  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3697  * @chip: nand chip info structure
3698  * @buf: data buffer
3699  * @oob_required: must write chip->oob_poi to OOB
3700  * @page: page number to write
3701  */
3702 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3703                                  int oob_required, int page)
3704 {
3705         struct mtd_info *mtd = nand_to_mtd(chip);
3706         int i, eccsize = chip->ecc.size, ret;
3707         int eccbytes = chip->ecc.bytes;
3708         int eccsteps = chip->ecc.steps;
3709         uint8_t *ecc_calc = chip->ecc.calc_buf;
3710         const uint8_t *p = buf;
3711
3712         /* Software ECC calculation */
3713         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3714                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3715
3716         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3717                                          chip->ecc.total);
3718         if (ret)
3719                 return ret;
3720
3721         return chip->ecc.write_page_raw(chip, buf, 1, page);
3722 }
3723
3724 /**
3725  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3726  * @chip: nand chip info structure
3727  * @buf: data buffer
3728  * @oob_required: must write chip->oob_poi to OOB
3729  * @page: page number to write
3730  */
3731 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3732                                  int oob_required, int page)
3733 {
3734         struct mtd_info *mtd = nand_to_mtd(chip);
3735         int i, eccsize = chip->ecc.size, ret;
3736         int eccbytes = chip->ecc.bytes;
3737         int eccsteps = chip->ecc.steps;
3738         uint8_t *ecc_calc = chip->ecc.calc_buf;
3739         const uint8_t *p = buf;
3740
3741         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3742         if (ret)
3743                 return ret;
3744
3745         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3746                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3747
3748                 ret = nand_write_data_op(chip, p, eccsize, false);
3749                 if (ret)
3750                         return ret;
3751
3752                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3753         }
3754
3755         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3756                                          chip->ecc.total);
3757         if (ret)
3758                 return ret;
3759
3760         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3761         if (ret)
3762                 return ret;
3763
3764         return nand_prog_page_end_op(chip);
3765 }
3766
3767
3768 /**
3769  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3770  * @chip:       nand chip info structure
3771  * @offset:     column address of subpage within the page
3772  * @data_len:   data length
3773  * @buf:        data buffer
3774  * @oob_required: must write chip->oob_poi to OOB
3775  * @page: page number to write
3776  */
3777 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3778                                     uint32_t data_len, const uint8_t *buf,
3779                                     int oob_required, int page)
3780 {
3781         struct mtd_info *mtd = nand_to_mtd(chip);
3782         uint8_t *oob_buf  = chip->oob_poi;
3783         uint8_t *ecc_calc = chip->ecc.calc_buf;
3784         int ecc_size      = chip->ecc.size;
3785         int ecc_bytes     = chip->ecc.bytes;
3786         int ecc_steps     = chip->ecc.steps;
3787         uint32_t start_step = offset / ecc_size;
3788         uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3789         int oob_bytes       = mtd->oobsize / ecc_steps;
3790         int step, ret;
3791
3792         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3793         if (ret)
3794                 return ret;
3795
3796         for (step = 0; step < ecc_steps; step++) {
3797                 /* configure controller for WRITE access */
3798                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3799
3800                 /* write data (untouched subpages already masked by 0xFF) */
3801                 ret = nand_write_data_op(chip, buf, ecc_size, false);
3802                 if (ret)
3803                         return ret;
3804
3805                 /* mask ECC of un-touched subpages by padding 0xFF */
3806                 if ((step < start_step) || (step > end_step))
3807                         memset(ecc_calc, 0xff, ecc_bytes);
3808                 else
3809                         chip->ecc.calculate(chip, buf, ecc_calc);
3810
3811                 /* mask OOB of un-touched subpages by padding 0xFF */
3812                 /* if oob_required, preserve OOB metadata of written subpage */
3813                 if (!oob_required || (step < start_step) || (step > end_step))
3814                         memset(oob_buf, 0xff, oob_bytes);
3815
3816                 buf += ecc_size;
3817                 ecc_calc += ecc_bytes;
3818                 oob_buf  += oob_bytes;
3819         }
3820
3821         /* copy calculated ECC for whole page to chip->buffer->oob */
3822         /* this include masked-value(0xFF) for unwritten subpages */
3823         ecc_calc = chip->ecc.calc_buf;
3824         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3825                                          chip->ecc.total);
3826         if (ret)
3827                 return ret;
3828
3829         /* write OOB buffer to NAND device */
3830         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3831         if (ret)
3832                 return ret;
3833
3834         return nand_prog_page_end_op(chip);
3835 }
3836
3837
3838 /**
3839  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3840  * @chip: nand chip info structure
3841  * @buf: data buffer
3842  * @oob_required: must write chip->oob_poi to OOB
3843  * @page: page number to write
3844  *
3845  * The hw generator calculates the error syndrome automatically. Therefore we
3846  * need a special oob layout and handling.
3847  */
3848 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3849                                     int oob_required, int page)
3850 {
3851         struct mtd_info *mtd = nand_to_mtd(chip);
3852         int i, eccsize = chip->ecc.size;
3853         int eccbytes = chip->ecc.bytes;
3854         int eccsteps = chip->ecc.steps;
3855         const uint8_t *p = buf;
3856         uint8_t *oob = chip->oob_poi;
3857         int ret;
3858
3859         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3860         if (ret)
3861                 return ret;
3862
3863         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3864                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3865
3866                 ret = nand_write_data_op(chip, p, eccsize, false);
3867                 if (ret)
3868                         return ret;
3869
3870                 if (chip->ecc.prepad) {
3871                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3872                                                  false);
3873                         if (ret)
3874                                 return ret;
3875
3876                         oob += chip->ecc.prepad;
3877                 }
3878
3879                 chip->ecc.calculate(chip, p, oob);
3880
3881                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3882                 if (ret)
3883                         return ret;
3884
3885                 oob += eccbytes;
3886
3887                 if (chip->ecc.postpad) {
3888                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3889                                                  false);
3890                         if (ret)
3891                                 return ret;
3892
3893                         oob += chip->ecc.postpad;
3894                 }
3895         }
3896
3897         /* Calculate remaining oob bytes */
3898         i = mtd->oobsize - (oob - chip->oob_poi);
3899         if (i) {
3900                 ret = nand_write_data_op(chip, oob, i, false);
3901                 if (ret)
3902                         return ret;
3903         }
3904
3905         return nand_prog_page_end_op(chip);
3906 }
3907
3908 /**
3909  * nand_write_page - write one page
3910  * @chip: NAND chip descriptor
3911  * @offset: address offset within the page
3912  * @data_len: length of actual data to be written
3913  * @buf: the data to write
3914  * @oob_required: must write chip->oob_poi to OOB
3915  * @page: page number to write
3916  * @raw: use _raw version of write_page
3917  */
3918 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3919                            int data_len, const uint8_t *buf, int oob_required,
3920                            int page, int raw)
3921 {
3922         struct mtd_info *mtd = nand_to_mtd(chip);
3923         int status, subpage;
3924
3925         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3926                 chip->ecc.write_subpage)
3927                 subpage = offset || (data_len < mtd->writesize);
3928         else
3929                 subpage = 0;
3930
3931         if (unlikely(raw))
3932                 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3933                                                   page);
3934         else if (subpage)
3935                 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3936                                                  oob_required, page);
3937         else
3938                 status = chip->ecc.write_page(chip, buf, oob_required, page);
3939
3940         if (status < 0)
3941                 return status;
3942
3943         return 0;
3944 }
3945
3946 #define NOTALIGNED(x)   ((x & (chip->subpagesize - 1)) != 0)
3947
3948 /**
3949  * nand_do_write_ops - [INTERN] NAND write with ECC
3950  * @chip: NAND chip object
3951  * @to: offset to write to
3952  * @ops: oob operations description structure
3953  *
3954  * NAND write with ECC.
3955  */
3956 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3957                              struct mtd_oob_ops *ops)
3958 {
3959         struct mtd_info *mtd = nand_to_mtd(chip);
3960         int chipnr, realpage, page, column;
3961         uint32_t writelen = ops->len;
3962
3963         uint32_t oobwritelen = ops->ooblen;
3964         uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3965
3966         uint8_t *oob = ops->oobbuf;
3967         uint8_t *buf = ops->datbuf;
3968         int ret;
3969         int oob_required = oob ? 1 : 0;
3970
3971         ops->retlen = 0;
3972         if (!writelen)
3973                 return 0;
3974
3975         /* Reject writes, which are not page aligned */
3976         if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3977                 pr_notice("%s: attempt to write non page aligned data\n",
3978                            __func__);
3979                 return -EINVAL;
3980         }
3981
3982         column = to & (mtd->writesize - 1);
3983
3984         chipnr = (int)(to >> chip->chip_shift);
3985         nand_select_target(chip, chipnr);
3986
3987         /* Check, if it is write protected */
3988         if (nand_check_wp(chip)) {
3989                 ret = -EIO;
3990                 goto err_out;
3991         }
3992
3993         realpage = (int)(to >> chip->page_shift);
3994         page = realpage & chip->pagemask;
3995
3996         /* Invalidate the page cache, when we write to the cached page */
3997         if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3998             ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3999                 chip->pagecache.page = -1;
4000
4001         /* Don't allow multipage oob writes with offset */
4002         if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4003                 ret = -EINVAL;
4004                 goto err_out;
4005         }
4006
4007         while (1) {
4008                 int bytes = mtd->writesize;
4009                 uint8_t *wbuf = buf;
4010                 int use_bounce_buf;
4011                 int part_pagewr = (column || writelen < mtd->writesize);
4012
4013                 if (part_pagewr)
4014                         use_bounce_buf = 1;
4015                 else if (chip->options & NAND_USES_DMA)
4016                         use_bounce_buf = !virt_addr_valid(buf) ||
4017                                          !IS_ALIGNED((unsigned long)buf,
4018                                                      chip->buf_align);
4019                 else
4020                         use_bounce_buf = 0;
4021
4022                 /*
4023                  * Copy the data from the initial buffer when doing partial page
4024                  * writes or when a bounce buffer is required.
4025                  */
4026                 if (use_bounce_buf) {
4027                         pr_debug("%s: using write bounce buffer for buf@%p\n",
4028                                          __func__, buf);
4029                         if (part_pagewr)
4030                                 bytes = min_t(int, bytes - column, writelen);
4031                         wbuf = nand_get_data_buf(chip);
4032                         memset(wbuf, 0xff, mtd->writesize);
4033                         memcpy(&wbuf[column], buf, bytes);
4034                 }
4035
4036                 if (unlikely(oob)) {
4037                         size_t len = min(oobwritelen, oobmaxlen);
4038                         oob = nand_fill_oob(chip, oob, len, ops);
4039                         oobwritelen -= len;
4040                 } else {
4041                         /* We still need to erase leftover OOB data */
4042                         memset(chip->oob_poi, 0xff, mtd->oobsize);
4043                 }
4044
4045                 ret = nand_write_page(chip, column, bytes, wbuf,
4046                                       oob_required, page,
4047                                       (ops->mode == MTD_OPS_RAW));
4048                 if (ret)
4049                         break;
4050
4051                 writelen -= bytes;
4052                 if (!writelen)
4053                         break;
4054
4055                 column = 0;
4056                 buf += bytes;
4057                 realpage++;
4058
4059                 page = realpage & chip->pagemask;
4060                 /* Check, if we cross a chip boundary */
4061                 if (!page) {
4062                         chipnr++;
4063                         nand_deselect_target(chip);
4064                         nand_select_target(chip, chipnr);
4065                 }
4066         }
4067
4068         ops->retlen = ops->len - writelen;
4069         if (unlikely(oob))
4070                 ops->oobretlen = ops->ooblen;
4071
4072 err_out:
4073         nand_deselect_target(chip);
4074         return ret;
4075 }
4076
4077 /**
4078  * panic_nand_write - [MTD Interface] NAND write with ECC
4079  * @mtd: MTD device structure
4080  * @to: offset to write to
4081  * @len: number of bytes to write
4082  * @retlen: pointer to variable to store the number of written bytes
4083  * @buf: the data to write
4084  *
4085  * NAND write with ECC. Used when performing writes in interrupt context, this
4086  * may for example be called by mtdoops when writing an oops while in panic.
4087  */
4088 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4089                             size_t *retlen, const uint8_t *buf)
4090 {
4091         struct nand_chip *chip = mtd_to_nand(mtd);
4092         int chipnr = (int)(to >> chip->chip_shift);
4093         struct mtd_oob_ops ops;
4094         int ret;
4095
4096         nand_select_target(chip, chipnr);
4097
4098         /* Wait for the device to get ready */
4099         panic_nand_wait(chip, 400);
4100
4101         memset(&ops, 0, sizeof(ops));
4102         ops.len = len;
4103         ops.datbuf = (uint8_t *)buf;
4104         ops.mode = MTD_OPS_PLACE_OOB;
4105
4106         ret = nand_do_write_ops(chip, to, &ops);
4107
4108         *retlen = ops.retlen;
4109         return ret;
4110 }
4111
4112 /**
4113  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4114  * @mtd: MTD device structure
4115  * @to: offset to write to
4116  * @ops: oob operation description structure
4117  */
4118 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4119                           struct mtd_oob_ops *ops)
4120 {
4121         struct nand_chip *chip = mtd_to_nand(mtd);
4122         int ret;
4123
4124         ops->retlen = 0;
4125
4126         ret = nand_get_device(chip);
4127         if (ret)
4128                 return ret;
4129
4130         switch (ops->mode) {
4131         case MTD_OPS_PLACE_OOB:
4132         case MTD_OPS_AUTO_OOB:
4133         case MTD_OPS_RAW:
4134                 break;
4135
4136         default:
4137                 goto out;
4138         }
4139
4140         if (!ops->datbuf)
4141                 ret = nand_do_write_oob(chip, to, ops);
4142         else
4143                 ret = nand_do_write_ops(chip, to, ops);
4144
4145 out:
4146         nand_release_device(chip);
4147         return ret;
4148 }
4149
4150 /**
4151  * nand_erase - [MTD Interface] erase block(s)
4152  * @mtd: MTD device structure
4153  * @instr: erase instruction
4154  *
4155  * Erase one ore more blocks.
4156  */
4157 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4158 {
4159         return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4160 }
4161
4162 /**
4163  * nand_erase_nand - [INTERN] erase block(s)
4164  * @chip: NAND chip object
4165  * @instr: erase instruction
4166  * @allowbbt: allow erasing the bbt area
4167  *
4168  * Erase one ore more blocks.
4169  */
4170 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4171                     int allowbbt)
4172 {
4173         int page, pages_per_block, ret, chipnr;
4174         loff_t len;
4175
4176         pr_debug("%s: start = 0x%012llx, len = %llu\n",
4177                         __func__, (unsigned long long)instr->addr,
4178                         (unsigned long long)instr->len);
4179
4180         if (check_offs_len(chip, instr->addr, instr->len))
4181                 return -EINVAL;
4182
4183         /* Grab the lock and see if the device is available */
4184         ret = nand_get_device(chip);
4185         if (ret)
4186                 return ret;
4187
4188         /* Shift to get first page */
4189         page = (int)(instr->addr >> chip->page_shift);
4190         chipnr = (int)(instr->addr >> chip->chip_shift);
4191
4192         /* Calculate pages in each block */
4193         pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4194
4195         /* Select the NAND device */
4196         nand_select_target(chip, chipnr);
4197
4198         /* Check, if it is write protected */
4199         if (nand_check_wp(chip)) {
4200                 pr_debug("%s: device is write protected!\n",
4201                                 __func__);
4202                 ret = -EIO;
4203                 goto erase_exit;
4204         }
4205
4206         /* Loop through the pages */
4207         len = instr->len;
4208
4209         while (len) {
4210                 /* Check if we have a bad block, we do not erase bad blocks! */
4211                 if (nand_block_checkbad(chip, ((loff_t) page) <<
4212                                         chip->page_shift, allowbbt)) {
4213                         pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4214                                     __func__, page);
4215                         ret = -EIO;
4216                         goto erase_exit;
4217                 }
4218
4219                 /*
4220                  * Invalidate the page cache, if we erase the block which
4221                  * contains the current cached page.
4222                  */
4223                 if (page <= chip->pagecache.page && chip->pagecache.page <
4224                     (page + pages_per_block))
4225                         chip->pagecache.page = -1;
4226
4227                 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4228                                     (chip->phys_erase_shift - chip->page_shift));
4229                 if (ret) {
4230                         pr_debug("%s: failed erase, page 0x%08x\n",
4231                                         __func__, page);
4232                         instr->fail_addr =
4233                                 ((loff_t)page << chip->page_shift);
4234                         goto erase_exit;
4235                 }
4236
4237                 /* Increment page address and decrement length */
4238                 len -= (1ULL << chip->phys_erase_shift);
4239                 page += pages_per_block;
4240
4241                 /* Check, if we cross a chip boundary */
4242                 if (len && !(page & chip->pagemask)) {
4243                         chipnr++;
4244                         nand_deselect_target(chip);
4245                         nand_select_target(chip, chipnr);
4246                 }
4247         }
4248
4249         ret = 0;
4250 erase_exit:
4251
4252         /* Deselect and wake up anyone waiting on the device */
4253         nand_deselect_target(chip);
4254         nand_release_device(chip);
4255
4256         /* Return more or less happy */
4257         return ret;
4258 }
4259
4260 /**
4261  * nand_sync - [MTD Interface] sync
4262  * @mtd: MTD device structure
4263  *
4264  * Sync is actually a wait for chip ready function.
4265  */
4266 static void nand_sync(struct mtd_info *mtd)
4267 {
4268         struct nand_chip *chip = mtd_to_nand(mtd);
4269
4270         pr_debug("%s: called\n", __func__);
4271
4272         /* Grab the lock and see if the device is available */
4273         WARN_ON(nand_get_device(chip));
4274         /* Release it and go back */
4275         nand_release_device(chip);
4276 }
4277
4278 /**
4279  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4280  * @mtd: MTD device structure
4281  * @offs: offset relative to mtd start
4282  */
4283 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4284 {
4285         struct nand_chip *chip = mtd_to_nand(mtd);
4286         int chipnr = (int)(offs >> chip->chip_shift);
4287         int ret;
4288
4289         /* Select the NAND device */
4290         ret = nand_get_device(chip);
4291         if (ret)
4292                 return ret;
4293
4294         nand_select_target(chip, chipnr);
4295
4296         ret = nand_block_checkbad(chip, offs, 0);
4297
4298         nand_deselect_target(chip);
4299         nand_release_device(chip);
4300
4301         return ret;
4302 }
4303
4304 /**
4305  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4306  * @mtd: MTD device structure
4307  * @ofs: offset relative to mtd start
4308  */
4309 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4310 {
4311         int ret;
4312
4313         ret = nand_block_isbad(mtd, ofs);
4314         if (ret) {
4315                 /* If it was bad already, return success and do nothing */
4316                 if (ret > 0)
4317                         return 0;
4318                 return ret;
4319         }
4320
4321         return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4322 }
4323
4324 /**
4325  * nand_suspend - [MTD Interface] Suspend the NAND flash
4326  * @mtd: MTD device structure
4327  *
4328  * Returns 0 for success or negative error code otherwise.
4329  */
4330 static int nand_suspend(struct mtd_info *mtd)
4331 {
4332         struct nand_chip *chip = mtd_to_nand(mtd);
4333         int ret = 0;
4334
4335         mutex_lock(&chip->lock);
4336         if (chip->ops.suspend)
4337                 ret = chip->ops.suspend(chip);
4338         if (!ret)
4339                 chip->suspended = 1;
4340         mutex_unlock(&chip->lock);
4341
4342         return ret;
4343 }
4344
4345 /**
4346  * nand_resume - [MTD Interface] Resume the NAND flash
4347  * @mtd: MTD device structure
4348  */
4349 static void nand_resume(struct mtd_info *mtd)
4350 {
4351         struct nand_chip *chip = mtd_to_nand(mtd);
4352
4353         mutex_lock(&chip->lock);
4354         if (chip->suspended) {
4355                 if (chip->ops.resume)
4356                         chip->ops.resume(chip);
4357                 chip->suspended = 0;
4358         } else {
4359                 pr_err("%s called for a chip which is not in suspended state\n",
4360                         __func__);
4361         }
4362         mutex_unlock(&chip->lock);
4363 }
4364
4365 /**
4366  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4367  *                 prevent further operations
4368  * @mtd: MTD device structure
4369  */
4370 static void nand_shutdown(struct mtd_info *mtd)
4371 {
4372         nand_suspend(mtd);
4373 }
4374
4375 /**
4376  * nand_lock - [MTD Interface] Lock the NAND flash
4377  * @mtd: MTD device structure
4378  * @ofs: offset byte address
4379  * @len: number of bytes to lock (must be a multiple of block/page size)
4380  */
4381 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4382 {
4383         struct nand_chip *chip = mtd_to_nand(mtd);
4384
4385         if (!chip->ops.lock_area)
4386                 return -ENOTSUPP;
4387
4388         return chip->ops.lock_area(chip, ofs, len);
4389 }
4390
4391 /**
4392  * nand_unlock - [MTD Interface] Unlock the NAND flash
4393  * @mtd: MTD device structure
4394  * @ofs: offset byte address
4395  * @len: number of bytes to unlock (must be a multiple of block/page size)
4396  */
4397 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4398 {
4399         struct nand_chip *chip = mtd_to_nand(mtd);
4400
4401         if (!chip->ops.unlock_area)
4402                 return -ENOTSUPP;
4403
4404         return chip->ops.unlock_area(chip, ofs, len);
4405 }
4406
4407 /* Set default functions */
4408 static void nand_set_defaults(struct nand_chip *chip)
4409 {
4410         /* If no controller is provided, use the dummy, legacy one. */
4411         if (!chip->controller) {
4412                 chip->controller = &chip->legacy.dummy_controller;
4413                 nand_controller_init(chip->controller);
4414         }
4415
4416         nand_legacy_set_defaults(chip);
4417
4418         if (!chip->buf_align)
4419                 chip->buf_align = 1;
4420 }
4421
4422 /* Sanitize ONFI strings so we can safely print them */
4423 void sanitize_string(uint8_t *s, size_t len)
4424 {
4425         ssize_t i;
4426
4427         /* Null terminate */
4428         s[len - 1] = 0;
4429
4430         /* Remove non printable chars */
4431         for (i = 0; i < len - 1; i++) {
4432                 if (s[i] < ' ' || s[i] > 127)
4433                         s[i] = '?';
4434         }
4435
4436         /* Remove trailing spaces */
4437         strim(s);
4438 }
4439
4440 /*
4441  * nand_id_has_period - Check if an ID string has a given wraparound period
4442  * @id_data: the ID string
4443  * @arrlen: the length of the @id_data array
4444  * @period: the period of repitition
4445  *
4446  * Check if an ID string is repeated within a given sequence of bytes at
4447  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4448  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4449  * if the repetition has a period of @period; otherwise, returns zero.
4450  */
4451 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4452 {
4453         int i, j;
4454         for (i = 0; i < period; i++)
4455                 for (j = i + period; j < arrlen; j += period)
4456                         if (id_data[i] != id_data[j])
4457                                 return 0;
4458         return 1;
4459 }
4460
4461 /*
4462  * nand_id_len - Get the length of an ID string returned by CMD_READID
4463  * @id_data: the ID string
4464  * @arrlen: the length of the @id_data array
4465
4466  * Returns the length of the ID string, according to known wraparound/trailing
4467  * zero patterns. If no pattern exists, returns the length of the array.
4468  */
4469 static int nand_id_len(u8 *id_data, int arrlen)
4470 {
4471         int last_nonzero, period;
4472
4473         /* Find last non-zero byte */
4474         for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4475                 if (id_data[last_nonzero])
4476                         break;
4477
4478         /* All zeros */
4479         if (last_nonzero < 0)
4480                 return 0;
4481
4482         /* Calculate wraparound period */
4483         for (period = 1; period < arrlen; period++)
4484                 if (nand_id_has_period(id_data, arrlen, period))
4485                         break;
4486
4487         /* There's a repeated pattern */
4488         if (period < arrlen)
4489                 return period;
4490
4491         /* There are trailing zeros */
4492         if (last_nonzero < arrlen - 1)
4493                 return last_nonzero + 1;
4494
4495         /* No pattern detected */
4496         return arrlen;
4497 }
4498
4499 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4500 static int nand_get_bits_per_cell(u8 cellinfo)
4501 {
4502         int bits;
4503
4504         bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4505         bits >>= NAND_CI_CELLTYPE_SHIFT;
4506         return bits + 1;
4507 }
4508
4509 /*
4510  * Many new NAND share similar device ID codes, which represent the size of the
4511  * chip. The rest of the parameters must be decoded according to generic or
4512  * manufacturer-specific "extended ID" decoding patterns.
4513  */
4514 void nand_decode_ext_id(struct nand_chip *chip)
4515 {
4516         struct nand_memory_organization *memorg;
4517         struct mtd_info *mtd = nand_to_mtd(chip);
4518         int extid;
4519         u8 *id_data = chip->id.data;
4520
4521         memorg = nanddev_get_memorg(&chip->base);
4522
4523         /* The 3rd id byte holds MLC / multichip data */
4524         memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4525         /* The 4th id byte is the important one */
4526         extid = id_data[3];
4527
4528         /* Calc pagesize */
4529         memorg->pagesize = 1024 << (extid & 0x03);
4530         mtd->writesize = memorg->pagesize;
4531         extid >>= 2;
4532         /* Calc oobsize */
4533         memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4534         mtd->oobsize = memorg->oobsize;
4535         extid >>= 2;
4536         /* Calc blocksize. Blocksize is multiples of 64KiB */
4537         memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4538                                        memorg->pagesize;
4539         mtd->erasesize = (64 * 1024) << (extid & 0x03);
4540         extid >>= 2;
4541         /* Get buswidth information */
4542         if (extid & 0x1)
4543                 chip->options |= NAND_BUSWIDTH_16;
4544 }
4545 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4546
4547 /*
4548  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4549  * decodes a matching ID table entry and assigns the MTD size parameters for
4550  * the chip.
4551  */
4552 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4553 {
4554         struct mtd_info *mtd = nand_to_mtd(chip);
4555         struct nand_memory_organization *memorg;
4556
4557         memorg = nanddev_get_memorg(&chip->base);
4558
4559         memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4560         mtd->erasesize = type->erasesize;
4561         memorg->pagesize = type->pagesize;
4562         mtd->writesize = memorg->pagesize;
4563         memorg->oobsize = memorg->pagesize / 32;
4564         mtd->oobsize = memorg->oobsize;
4565
4566         /* All legacy ID NAND are small-page, SLC */
4567         memorg->bits_per_cell = 1;
4568 }
4569
4570 /*
4571  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4572  * heuristic patterns using various detected parameters (e.g., manufacturer,
4573  * page size, cell-type information).
4574  */
4575 static void nand_decode_bbm_options(struct nand_chip *chip)
4576 {
4577         struct mtd_info *mtd = nand_to_mtd(chip);
4578
4579         /* Set the bad block position */
4580         if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4581                 chip->badblockpos = NAND_BBM_POS_LARGE;
4582         else
4583                 chip->badblockpos = NAND_BBM_POS_SMALL;
4584 }
4585
4586 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4587 {
4588         return type->id_len;
4589 }
4590
4591 static bool find_full_id_nand(struct nand_chip *chip,
4592                               struct nand_flash_dev *type)
4593 {
4594         struct nand_device *base = &chip->base;
4595         struct nand_ecc_props requirements;
4596         struct mtd_info *mtd = nand_to_mtd(chip);
4597         struct nand_memory_organization *memorg;
4598         u8 *id_data = chip->id.data;
4599
4600         memorg = nanddev_get_memorg(&chip->base);
4601
4602         if (!strncmp(type->id, id_data, type->id_len)) {
4603                 memorg->pagesize = type->pagesize;
4604                 mtd->writesize = memorg->pagesize;
4605                 memorg->pages_per_eraseblock = type->erasesize /
4606                                                type->pagesize;
4607                 mtd->erasesize = type->erasesize;
4608                 memorg->oobsize = type->oobsize;
4609                 mtd->oobsize = memorg->oobsize;
4610
4611                 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4612                 memorg->eraseblocks_per_lun =
4613                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4614                                            memorg->pagesize *
4615                                            memorg->pages_per_eraseblock);
4616                 chip->options |= type->options;
4617                 requirements.strength = NAND_ECC_STRENGTH(type);
4618                 requirements.step_size = NAND_ECC_STEP(type);
4619                 nanddev_set_ecc_requirements(base, &requirements);
4620
4621                 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4622                 if (!chip->parameters.model)
4623                         return false;
4624
4625                 return true;
4626         }
4627         return false;
4628 }
4629
4630 /*
4631  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4632  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4633  * table.
4634  */
4635 static void nand_manufacturer_detect(struct nand_chip *chip)
4636 {
4637         /*
4638          * Try manufacturer detection if available and use
4639          * nand_decode_ext_id() otherwise.
4640          */
4641         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4642             chip->manufacturer.desc->ops->detect) {
4643                 struct nand_memory_organization *memorg;
4644
4645                 memorg = nanddev_get_memorg(&chip->base);
4646
4647                 /* The 3rd id byte holds MLC / multichip data */
4648                 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4649                 chip->manufacturer.desc->ops->detect(chip);
4650         } else {
4651                 nand_decode_ext_id(chip);
4652         }
4653 }
4654
4655 /*
4656  * Manufacturer initialization. This function is called for all NANDs including
4657  * ONFI and JEDEC compliant ones.
4658  * Manufacturer drivers should put all their specific initialization code in
4659  * their ->init() hook.
4660  */
4661 static int nand_manufacturer_init(struct nand_chip *chip)
4662 {
4663         if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4664             !chip->manufacturer.desc->ops->init)
4665                 return 0;
4666
4667         return chip->manufacturer.desc->ops->init(chip);
4668 }
4669
4670 /*
4671  * Manufacturer cleanup. This function is called for all NANDs including
4672  * ONFI and JEDEC compliant ones.
4673  * Manufacturer drivers should put all their specific cleanup code in their
4674  * ->cleanup() hook.
4675  */
4676 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4677 {
4678         /* Release manufacturer private data */
4679         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4680             chip->manufacturer.desc->ops->cleanup)
4681                 chip->manufacturer.desc->ops->cleanup(chip);
4682 }
4683
4684 static const char *
4685 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4686 {
4687         return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4688 }
4689
4690 /*
4691  * Get the flash and manufacturer id and lookup if the type is supported.
4692  */
4693 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4694 {
4695         const struct nand_manufacturer_desc *manufacturer_desc;
4696         struct mtd_info *mtd = nand_to_mtd(chip);
4697         struct nand_memory_organization *memorg;
4698         int busw, ret;
4699         u8 *id_data = chip->id.data;
4700         u8 maf_id, dev_id;
4701         u64 targetsize;
4702
4703         /*
4704          * Let's start by initializing memorg fields that might be left
4705          * unassigned by the ID-based detection logic.
4706          */
4707         memorg = nanddev_get_memorg(&chip->base);
4708         memorg->planes_per_lun = 1;
4709         memorg->luns_per_target = 1;
4710
4711         /*
4712          * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4713          * after power-up.
4714          */
4715         ret = nand_reset(chip, 0);
4716         if (ret)
4717                 return ret;
4718
4719         /* Select the device */
4720         nand_select_target(chip, 0);
4721
4722         /* Send the command for reading device ID */
4723         ret = nand_readid_op(chip, 0, id_data, 2);
4724         if (ret)
4725                 return ret;
4726
4727         /* Read manufacturer and device IDs */
4728         maf_id = id_data[0];
4729         dev_id = id_data[1];
4730
4731         /*
4732          * Try again to make sure, as some systems the bus-hold or other
4733          * interface concerns can cause random data which looks like a
4734          * possibly credible NAND flash to appear. If the two results do
4735          * not match, ignore the device completely.
4736          */
4737
4738         /* Read entire ID string */
4739         ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4740         if (ret)
4741                 return ret;
4742
4743         if (id_data[0] != maf_id || id_data[1] != dev_id) {
4744                 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4745                         maf_id, dev_id, id_data[0], id_data[1]);
4746                 return -ENODEV;
4747         }
4748
4749         chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4750
4751         /* Try to identify manufacturer */
4752         manufacturer_desc = nand_get_manufacturer_desc(maf_id);
4753         chip->manufacturer.desc = manufacturer_desc;
4754
4755         if (!type)
4756                 type = nand_flash_ids;
4757
4758         /*
4759          * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4760          * override it.
4761          * This is required to make sure initial NAND bus width set by the
4762          * NAND controller driver is coherent with the real NAND bus width
4763          * (extracted by auto-detection code).
4764          */
4765         busw = chip->options & NAND_BUSWIDTH_16;
4766
4767         /*
4768          * The flag is only set (never cleared), reset it to its default value
4769          * before starting auto-detection.
4770          */
4771         chip->options &= ~NAND_BUSWIDTH_16;
4772
4773         for (; type->name != NULL; type++) {
4774                 if (is_full_id_nand(type)) {
4775                         if (find_full_id_nand(chip, type))
4776                                 goto ident_done;
4777                 } else if (dev_id == type->dev_id) {
4778                         break;
4779                 }
4780         }
4781
4782         if (!type->name || !type->pagesize) {
4783                 /* Check if the chip is ONFI compliant */
4784                 ret = nand_onfi_detect(chip);
4785                 if (ret < 0)
4786                         return ret;
4787                 else if (ret)
4788                         goto ident_done;
4789
4790                 /* Check if the chip is JEDEC compliant */
4791                 ret = nand_jedec_detect(chip);
4792                 if (ret < 0)
4793                         return ret;
4794                 else if (ret)
4795                         goto ident_done;
4796         }
4797
4798         if (!type->name)
4799                 return -ENODEV;
4800
4801         chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4802         if (!chip->parameters.model)
4803                 return -ENOMEM;
4804
4805         if (!type->pagesize)
4806                 nand_manufacturer_detect(chip);
4807         else
4808                 nand_decode_id(chip, type);
4809
4810         /* Get chip options */
4811         chip->options |= type->options;
4812
4813         memorg->eraseblocks_per_lun =
4814                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4815                                            memorg->pagesize *
4816                                            memorg->pages_per_eraseblock);
4817
4818 ident_done:
4819         if (!mtd->name)
4820                 mtd->name = chip->parameters.model;
4821
4822         if (chip->options & NAND_BUSWIDTH_AUTO) {
4823                 WARN_ON(busw & NAND_BUSWIDTH_16);
4824                 nand_set_defaults(chip);
4825         } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4826                 /*
4827                  * Check, if buswidth is correct. Hardware drivers should set
4828                  * chip correct!
4829                  */
4830                 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4831                         maf_id, dev_id);
4832                 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4833                         mtd->name);
4834                 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4835                         (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4836                 ret = -EINVAL;
4837
4838                 goto free_detect_allocation;
4839         }
4840
4841         nand_decode_bbm_options(chip);
4842
4843         /* Calculate the address shift from the page size */
4844         chip->page_shift = ffs(mtd->writesize) - 1;
4845         /* Convert chipsize to number of pages per chip -1 */
4846         targetsize = nanddev_target_size(&chip->base);
4847         chip->pagemask = (targetsize >> chip->page_shift) - 1;
4848
4849         chip->bbt_erase_shift = chip->phys_erase_shift =
4850                 ffs(mtd->erasesize) - 1;
4851         if (targetsize & 0xffffffff)
4852                 chip->chip_shift = ffs((unsigned)targetsize) - 1;
4853         else {
4854                 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4855                 chip->chip_shift += 32 - 1;
4856         }
4857
4858         if (chip->chip_shift - chip->page_shift > 16)
4859                 chip->options |= NAND_ROW_ADDR_3;
4860
4861         chip->badblockbits = 8;
4862
4863         nand_legacy_adjust_cmdfunc(chip);
4864
4865         pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4866                 maf_id, dev_id);
4867         pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4868                 chip->parameters.model);
4869         pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4870                 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4871                 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4872         return 0;
4873
4874 free_detect_allocation:
4875         kfree(chip->parameters.model);
4876
4877         return ret;
4878 }
4879
4880 static enum nand_ecc_engine_type
4881 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
4882 {
4883         enum nand_ecc_legacy_mode {
4884                 NAND_ECC_INVALID,
4885                 NAND_ECC_NONE,
4886                 NAND_ECC_SOFT,
4887                 NAND_ECC_SOFT_BCH,
4888                 NAND_ECC_HW,
4889                 NAND_ECC_HW_SYNDROME,
4890                 NAND_ECC_ON_DIE,
4891         };
4892         const char * const nand_ecc_legacy_modes[] = {
4893                 [NAND_ECC_NONE]         = "none",
4894                 [NAND_ECC_SOFT]         = "soft",
4895                 [NAND_ECC_SOFT_BCH]     = "soft_bch",
4896                 [NAND_ECC_HW]           = "hw",
4897                 [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
4898                 [NAND_ECC_ON_DIE]       = "on-die",
4899         };
4900         enum nand_ecc_legacy_mode eng_type;
4901         const char *pm;
4902         int err;
4903
4904         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4905         if (err)
4906                 return NAND_ECC_ENGINE_TYPE_INVALID;
4907
4908         for (eng_type = NAND_ECC_NONE;
4909              eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
4910                 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
4911                         switch (eng_type) {
4912                         case NAND_ECC_NONE:
4913                                 return NAND_ECC_ENGINE_TYPE_NONE;
4914                         case NAND_ECC_SOFT:
4915                         case NAND_ECC_SOFT_BCH:
4916                                 return NAND_ECC_ENGINE_TYPE_SOFT;
4917                         case NAND_ECC_HW:
4918                         case NAND_ECC_HW_SYNDROME:
4919                                 return NAND_ECC_ENGINE_TYPE_ON_HOST;
4920                         case NAND_ECC_ON_DIE:
4921                                 return NAND_ECC_ENGINE_TYPE_ON_DIE;
4922                         default:
4923                                 break;
4924                         }
4925                 }
4926         }
4927
4928         return NAND_ECC_ENGINE_TYPE_INVALID;
4929 }
4930
4931 static enum nand_ecc_placement
4932 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
4933 {
4934         const char *pm;
4935         int err;
4936
4937         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4938         if (!err) {
4939                 if (!strcasecmp(pm, "hw_syndrome"))
4940                         return NAND_ECC_PLACEMENT_INTERLEAVED;
4941         }
4942
4943         return NAND_ECC_PLACEMENT_UNKNOWN;
4944 }
4945
4946 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
4947 {
4948         const char *pm;
4949         int err;
4950
4951         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4952         if (!err) {
4953                 if (!strcasecmp(pm, "soft"))
4954                         return NAND_ECC_ALGO_HAMMING;
4955                 else if (!strcasecmp(pm, "soft_bch"))
4956                         return NAND_ECC_ALGO_BCH;
4957         }
4958
4959         return NAND_ECC_ALGO_UNKNOWN;
4960 }
4961
4962 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
4963 {
4964         struct device_node *dn = nand_get_flash_node(chip);
4965         struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
4966
4967         if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
4968                 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
4969
4970         if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
4971                 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
4972
4973         if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
4974                 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
4975 }
4976
4977 static int of_get_nand_bus_width(struct device_node *np)
4978 {
4979         u32 val;
4980
4981         if (of_property_read_u32(np, "nand-bus-width", &val))
4982                 return 8;
4983
4984         switch (val) {
4985         case 8:
4986         case 16:
4987                 return val;
4988         default:
4989                 return -EIO;
4990         }
4991 }
4992
4993 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4994 {
4995         return of_property_read_bool(np, "nand-on-flash-bbt");
4996 }
4997
4998 static int rawnand_dt_init(struct nand_chip *chip)
4999 {
5000         struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5001         struct device_node *dn = nand_get_flash_node(chip);
5002
5003         if (!dn)
5004                 return 0;
5005
5006         if (of_get_nand_bus_width(dn) == 16)
5007                 chip->options |= NAND_BUSWIDTH_16;
5008
5009         if (of_property_read_bool(dn, "nand-is-boot-medium"))
5010                 chip->options |= NAND_IS_BOOT_MEDIUM;
5011
5012         if (of_get_nand_on_flash_bbt(dn))
5013                 chip->bbt_options |= NAND_BBT_USE_FLASH;
5014
5015         of_get_nand_ecc_user_config(nand);
5016         of_get_nand_ecc_legacy_user_config(chip);
5017
5018         /*
5019          * If neither the user nor the NAND controller have requested a specific
5020          * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5021          */
5022         nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5023
5024         /*
5025          * Use the user requested engine type, unless there is none, in this
5026          * case default to the NAND controller choice, otherwise fallback to
5027          * the raw NAND default one.
5028          */
5029         if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5030                 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5031         if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5032                 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5033
5034         chip->ecc.placement = nand->ecc.user_conf.placement;
5035         chip->ecc.algo = nand->ecc.user_conf.algo;
5036         chip->ecc.strength = nand->ecc.user_conf.strength;
5037         chip->ecc.size = nand->ecc.user_conf.step_size;
5038
5039         return 0;
5040 }
5041
5042 /**
5043  * nand_scan_ident - Scan for the NAND device
5044  * @chip: NAND chip object
5045  * @maxchips: number of chips to scan for
5046  * @table: alternative NAND ID table
5047  *
5048  * This is the first phase of the normal nand_scan() function. It reads the
5049  * flash ID and sets up MTD fields accordingly.
5050  *
5051  * This helper used to be called directly from controller drivers that needed
5052  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5053  * prevented dynamic allocations during this phase which was unconvenient and
5054  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5055  */
5056 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5057                            struct nand_flash_dev *table)
5058 {
5059         struct mtd_info *mtd = nand_to_mtd(chip);
5060         struct nand_memory_organization *memorg;
5061         int nand_maf_id, nand_dev_id;
5062         unsigned int i;
5063         int ret;
5064
5065         memorg = nanddev_get_memorg(&chip->base);
5066
5067         /* Assume all dies are deselected when we enter nand_scan_ident(). */
5068         chip->cur_cs = -1;
5069
5070         mutex_init(&chip->lock);
5071
5072         /* Enforce the right timings for reset/detection */
5073         chip->current_interface_config = nand_get_reset_interface_config();
5074
5075         ret = rawnand_dt_init(chip);
5076         if (ret)
5077                 return ret;
5078
5079         if (!mtd->name && mtd->dev.parent)
5080                 mtd->name = dev_name(mtd->dev.parent);
5081
5082         /* Set the default functions */
5083         nand_set_defaults(chip);
5084
5085         ret = nand_legacy_check_hooks(chip);
5086         if (ret)
5087                 return ret;
5088
5089         memorg->ntargets = maxchips;
5090
5091         /* Read the flash type */
5092         ret = nand_detect(chip, table);
5093         if (ret) {
5094                 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5095                         pr_warn("No NAND device found\n");
5096                 nand_deselect_target(chip);
5097                 return ret;
5098         }
5099
5100         nand_maf_id = chip->id.data[0];
5101         nand_dev_id = chip->id.data[1];
5102
5103         nand_deselect_target(chip);
5104
5105         /* Check for a chip array */
5106         for (i = 1; i < maxchips; i++) {
5107                 u8 id[2];
5108
5109                 /* See comment in nand_get_flash_type for reset */
5110                 ret = nand_reset(chip, i);
5111                 if (ret)
5112                         break;
5113
5114                 nand_select_target(chip, i);
5115                 /* Send the command for reading device ID */
5116                 ret = nand_readid_op(chip, 0, id, sizeof(id));
5117                 if (ret)
5118                         break;
5119                 /* Read manufacturer and device IDs */
5120                 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5121                         nand_deselect_target(chip);
5122                         break;
5123                 }
5124                 nand_deselect_target(chip);
5125         }
5126         if (i > 1)
5127                 pr_info("%d chips detected\n", i);
5128
5129         /* Store the number of chips and calc total size for mtd */
5130         memorg->ntargets = i;
5131         mtd->size = i * nanddev_target_size(&chip->base);
5132
5133         return 0;
5134 }
5135
5136 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5137 {
5138         kfree(chip->parameters.model);
5139         kfree(chip->parameters.onfi);
5140 }
5141
5142 int rawnand_sw_hamming_init(struct nand_chip *chip)
5143 {
5144         struct nand_ecc_sw_hamming_conf *engine_conf;
5145         struct nand_device *base = &chip->base;
5146         int ret;
5147
5148         base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5149         base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
5150         base->ecc.user_conf.strength = chip->ecc.strength;
5151         base->ecc.user_conf.step_size = chip->ecc.size;
5152
5153         ret = nand_ecc_sw_hamming_init_ctx(base);
5154         if (ret)
5155                 return ret;
5156
5157         engine_conf = base->ecc.ctx.priv;
5158
5159         if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
5160                 engine_conf->sm_order = true;
5161
5162         chip->ecc.size = base->ecc.ctx.conf.step_size;
5163         chip->ecc.strength = base->ecc.ctx.conf.strength;
5164         chip->ecc.total = base->ecc.ctx.total;
5165         chip->ecc.steps = engine_conf->nsteps;
5166         chip->ecc.bytes = engine_conf->code_size;
5167
5168         return 0;
5169 }
5170 EXPORT_SYMBOL(rawnand_sw_hamming_init);
5171
5172 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
5173                                  const unsigned char *buf,
5174                                  unsigned char *code)
5175 {
5176         struct nand_device *base = &chip->base;
5177
5178         return nand_ecc_sw_hamming_calculate(base, buf, code);
5179 }
5180 EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
5181
5182 int rawnand_sw_hamming_correct(struct nand_chip *chip,
5183                                unsigned char *buf,
5184                                unsigned char *read_ecc,
5185                                unsigned char *calc_ecc)
5186 {
5187         struct nand_device *base = &chip->base;
5188
5189         return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
5190 }
5191 EXPORT_SYMBOL(rawnand_sw_hamming_correct);
5192
5193 void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
5194 {
5195         struct nand_device *base = &chip->base;
5196
5197         nand_ecc_sw_hamming_cleanup_ctx(base);
5198 }
5199 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
5200
5201 int rawnand_sw_bch_init(struct nand_chip *chip)
5202 {
5203         struct nand_device *base = &chip->base;
5204         struct nand_ecc_sw_bch_conf *engine_conf;
5205         int ret;
5206
5207         base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5208         base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
5209         base->ecc.user_conf.step_size = chip->ecc.size;
5210         base->ecc.user_conf.strength = chip->ecc.strength;
5211
5212         ret = nand_ecc_sw_bch_init_ctx(base);
5213         if (ret)
5214                 return ret;
5215
5216         engine_conf = base->ecc.ctx.priv;
5217
5218         chip->ecc.size = base->ecc.ctx.conf.step_size;
5219         chip->ecc.strength = base->ecc.ctx.conf.strength;
5220         chip->ecc.total = base->ecc.ctx.total;
5221         chip->ecc.steps = engine_conf->nsteps;
5222         chip->ecc.bytes = engine_conf->code_size;
5223
5224         return 0;
5225 }
5226 EXPORT_SYMBOL(rawnand_sw_bch_init);
5227
5228 static int rawnand_sw_bch_calculate(struct nand_chip *chip,
5229                                     const unsigned char *buf,
5230                                     unsigned char *code)
5231 {
5232         struct nand_device *base = &chip->base;
5233
5234         return nand_ecc_sw_bch_calculate(base, buf, code);
5235 }
5236
5237 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
5238                            unsigned char *read_ecc, unsigned char *calc_ecc)
5239 {
5240         struct nand_device *base = &chip->base;
5241
5242         return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
5243 }
5244 EXPORT_SYMBOL(rawnand_sw_bch_correct);
5245
5246 void rawnand_sw_bch_cleanup(struct nand_chip *chip)
5247 {
5248         struct nand_device *base = &chip->base;
5249
5250         nand_ecc_sw_bch_cleanup_ctx(base);
5251 }
5252 EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
5253
5254 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5255 {
5256         struct nand_ecc_ctrl *ecc = &chip->ecc;
5257
5258         switch (ecc->placement) {
5259         case NAND_ECC_PLACEMENT_UNKNOWN:
5260         case NAND_ECC_PLACEMENT_OOB:
5261                 /* Use standard hwecc read page function? */
5262                 if (!ecc->read_page)
5263                         ecc->read_page = nand_read_page_hwecc;
5264                 if (!ecc->write_page)
5265                         ecc->write_page = nand_write_page_hwecc;
5266                 if (!ecc->read_page_raw)
5267                         ecc->read_page_raw = nand_read_page_raw;
5268                 if (!ecc->write_page_raw)
5269                         ecc->write_page_raw = nand_write_page_raw;
5270                 if (!ecc->read_oob)
5271                         ecc->read_oob = nand_read_oob_std;
5272                 if (!ecc->write_oob)
5273                         ecc->write_oob = nand_write_oob_std;
5274                 if (!ecc->read_subpage)
5275                         ecc->read_subpage = nand_read_subpage;
5276                 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5277                         ecc->write_subpage = nand_write_subpage_hwecc;
5278                 fallthrough;
5279
5280         case NAND_ECC_PLACEMENT_INTERLEAVED:
5281                 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5282                     (!ecc->read_page ||
5283                      ecc->read_page == nand_read_page_hwecc ||
5284                      !ecc->write_page ||
5285                      ecc->write_page == nand_write_page_hwecc)) {
5286                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5287                         return -EINVAL;
5288                 }
5289                 /* Use standard syndrome read/write page function? */
5290                 if (!ecc->read_page)
5291                         ecc->read_page = nand_read_page_syndrome;
5292                 if (!ecc->write_page)
5293                         ecc->write_page = nand_write_page_syndrome;
5294                 if (!ecc->read_page_raw)
5295                         ecc->read_page_raw = nand_read_page_raw_syndrome;
5296                 if (!ecc->write_page_raw)
5297                         ecc->write_page_raw = nand_write_page_raw_syndrome;
5298                 if (!ecc->read_oob)
5299                         ecc->read_oob = nand_read_oob_syndrome;
5300                 if (!ecc->write_oob)
5301                         ecc->write_oob = nand_write_oob_syndrome;
5302                 break;
5303
5304         default:
5305                 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5306                         ecc->placement);
5307                 return -EINVAL;
5308         }
5309
5310         return 0;
5311 }
5312
5313 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5314 {
5315         struct mtd_info *mtd = nand_to_mtd(chip);
5316         struct nand_device *nanddev = mtd_to_nanddev(mtd);
5317         struct nand_ecc_ctrl *ecc = &chip->ecc;
5318         int ret;
5319
5320         if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5321                 return -EINVAL;
5322
5323         switch (ecc->algo) {
5324         case NAND_ECC_ALGO_HAMMING:
5325                 ecc->calculate = rawnand_sw_hamming_calculate;
5326                 ecc->correct = rawnand_sw_hamming_correct;
5327                 ecc->read_page = nand_read_page_swecc;
5328                 ecc->read_subpage = nand_read_subpage;
5329                 ecc->write_page = nand_write_page_swecc;
5330                 if (!ecc->read_page_raw)
5331                         ecc->read_page_raw = nand_read_page_raw;
5332                 if (!ecc->write_page_raw)
5333                         ecc->write_page_raw = nand_write_page_raw;
5334                 ecc->read_oob = nand_read_oob_std;
5335                 ecc->write_oob = nand_write_oob_std;
5336                 if (!ecc->size)
5337                         ecc->size = 256;
5338                 ecc->bytes = 3;
5339                 ecc->strength = 1;
5340
5341                 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5342                         ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5343
5344                 ret = rawnand_sw_hamming_init(chip);
5345                 if (ret) {
5346                         WARN(1, "Hamming ECC initialization failed!\n");
5347                         return ret;
5348                 }
5349
5350                 return 0;
5351         case NAND_ECC_ALGO_BCH:
5352                 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
5353                         WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5354                         return -EINVAL;
5355                 }
5356                 ecc->calculate = rawnand_sw_bch_calculate;
5357                 ecc->correct = rawnand_sw_bch_correct;
5358                 ecc->read_page = nand_read_page_swecc;
5359                 ecc->read_subpage = nand_read_subpage;
5360                 ecc->write_page = nand_write_page_swecc;
5361                 if (!ecc->read_page_raw)
5362                         ecc->read_page_raw = nand_read_page_raw;
5363                 if (!ecc->write_page_raw)
5364                         ecc->write_page_raw = nand_write_page_raw;
5365                 ecc->read_oob = nand_read_oob_std;
5366                 ecc->write_oob = nand_write_oob_std;
5367
5368                 /*
5369                  * We can only maximize ECC config when the default layout is
5370                  * used, otherwise we don't know how many bytes can really be
5371                  * used.
5372                  */
5373                 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
5374                     mtd->ooblayout != nand_get_large_page_ooblayout())
5375                         nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
5376
5377                 ret = rawnand_sw_bch_init(chip);
5378                 if (ret) {
5379                         WARN(1, "BCH ECC initialization failed!\n");
5380                         return ret;
5381                 }
5382
5383                 return 0;
5384         default:
5385                 WARN(1, "Unsupported ECC algorithm!\n");
5386                 return -EINVAL;
5387         }
5388 }
5389
5390 /**
5391  * nand_check_ecc_caps - check the sanity of preset ECC settings
5392  * @chip: nand chip info structure
5393  * @caps: ECC caps info structure
5394  * @oobavail: OOB size that the ECC engine can use
5395  *
5396  * When ECC step size and strength are already set, check if they are supported
5397  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5398  * On success, the calculated ECC bytes is set.
5399  */
5400 static int
5401 nand_check_ecc_caps(struct nand_chip *chip,
5402                     const struct nand_ecc_caps *caps, int oobavail)
5403 {
5404         struct mtd_info *mtd = nand_to_mtd(chip);
5405         const struct nand_ecc_step_info *stepinfo;
5406         int preset_step = chip->ecc.size;
5407         int preset_strength = chip->ecc.strength;
5408         int ecc_bytes, nsteps = mtd->writesize / preset_step;
5409         int i, j;
5410
5411         for (i = 0; i < caps->nstepinfos; i++) {
5412                 stepinfo = &caps->stepinfos[i];
5413
5414                 if (stepinfo->stepsize != preset_step)
5415                         continue;
5416
5417                 for (j = 0; j < stepinfo->nstrengths; j++) {
5418                         if (stepinfo->strengths[j] != preset_strength)
5419                                 continue;
5420
5421                         ecc_bytes = caps->calc_ecc_bytes(preset_step,
5422                                                          preset_strength);
5423                         if (WARN_ON_ONCE(ecc_bytes < 0))
5424                                 return ecc_bytes;
5425
5426                         if (ecc_bytes * nsteps > oobavail) {
5427                                 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5428                                        preset_step, preset_strength);
5429                                 return -ENOSPC;
5430                         }
5431
5432                         chip->ecc.bytes = ecc_bytes;
5433
5434                         return 0;
5435                 }
5436         }
5437
5438         pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5439                preset_step, preset_strength);
5440
5441         return -ENOTSUPP;
5442 }
5443
5444 /**
5445  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5446  * @chip: nand chip info structure
5447  * @caps: ECC engine caps info structure
5448  * @oobavail: OOB size that the ECC engine can use
5449  *
5450  * If a chip's ECC requirement is provided, try to meet it with the least
5451  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5452  * On success, the chosen ECC settings are set.
5453  */
5454 static int
5455 nand_match_ecc_req(struct nand_chip *chip,
5456                    const struct nand_ecc_caps *caps, int oobavail)
5457 {
5458         const struct nand_ecc_props *requirements =
5459                 nanddev_get_ecc_requirements(&chip->base);
5460         struct mtd_info *mtd = nand_to_mtd(chip);
5461         const struct nand_ecc_step_info *stepinfo;
5462         int req_step = requirements->step_size;
5463         int req_strength = requirements->strength;
5464         int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5465         int best_step, best_strength, best_ecc_bytes;
5466         int best_ecc_bytes_total = INT_MAX;
5467         int i, j;
5468
5469         /* No information provided by the NAND chip */
5470         if (!req_step || !req_strength)
5471                 return -ENOTSUPP;
5472
5473         /* number of correctable bits the chip requires in a page */
5474         req_corr = mtd->writesize / req_step * req_strength;
5475
5476         for (i = 0; i < caps->nstepinfos; i++) {
5477                 stepinfo = &caps->stepinfos[i];
5478                 step_size = stepinfo->stepsize;
5479
5480                 for (j = 0; j < stepinfo->nstrengths; j++) {
5481                         strength = stepinfo->strengths[j];
5482
5483                         /*
5484                          * If both step size and strength are smaller than the
5485                          * chip's requirement, it is not easy to compare the
5486                          * resulted reliability.
5487                          */
5488                         if (step_size < req_step && strength < req_strength)
5489                                 continue;
5490
5491                         if (mtd->writesize % step_size)
5492                                 continue;
5493
5494                         nsteps = mtd->writesize / step_size;
5495
5496                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5497                         if (WARN_ON_ONCE(ecc_bytes < 0))
5498                                 continue;
5499                         ecc_bytes_total = ecc_bytes * nsteps;
5500
5501                         if (ecc_bytes_total > oobavail ||
5502                             strength * nsteps < req_corr)
5503                                 continue;
5504
5505                         /*
5506                          * We assume the best is to meet the chip's requrement
5507                          * with the least number of ECC bytes.
5508                          */
5509                         if (ecc_bytes_total < best_ecc_bytes_total) {
5510                                 best_ecc_bytes_total = ecc_bytes_total;
5511                                 best_step = step_size;
5512                                 best_strength = strength;
5513                                 best_ecc_bytes = ecc_bytes;
5514                         }
5515                 }
5516         }
5517
5518         if (best_ecc_bytes_total == INT_MAX)
5519                 return -ENOTSUPP;
5520
5521         chip->ecc.size = best_step;
5522         chip->ecc.strength = best_strength;
5523         chip->ecc.bytes = best_ecc_bytes;
5524
5525         return 0;
5526 }
5527
5528 /**
5529  * nand_maximize_ecc - choose the max ECC strength available
5530  * @chip: nand chip info structure
5531  * @caps: ECC engine caps info structure
5532  * @oobavail: OOB size that the ECC engine can use
5533  *
5534  * Choose the max ECC strength that is supported on the controller, and can fit
5535  * within the chip's OOB.  On success, the chosen ECC settings are set.
5536  */
5537 static int
5538 nand_maximize_ecc(struct nand_chip *chip,
5539                   const struct nand_ecc_caps *caps, int oobavail)
5540 {
5541         struct mtd_info *mtd = nand_to_mtd(chip);
5542         const struct nand_ecc_step_info *stepinfo;
5543         int step_size, strength, nsteps, ecc_bytes, corr;
5544         int best_corr = 0;
5545         int best_step = 0;
5546         int best_strength, best_ecc_bytes;
5547         int i, j;
5548
5549         for (i = 0; i < caps->nstepinfos; i++) {
5550                 stepinfo = &caps->stepinfos[i];
5551                 step_size = stepinfo->stepsize;
5552
5553                 /* If chip->ecc.size is already set, respect it */
5554                 if (chip->ecc.size && step_size != chip->ecc.size)
5555                         continue;
5556
5557                 for (j = 0; j < stepinfo->nstrengths; j++) {
5558                         strength = stepinfo->strengths[j];
5559
5560                         if (mtd->writesize % step_size)
5561                                 continue;
5562
5563                         nsteps = mtd->writesize / step_size;
5564
5565                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5566                         if (WARN_ON_ONCE(ecc_bytes < 0))
5567                                 continue;
5568
5569                         if (ecc_bytes * nsteps > oobavail)
5570                                 continue;
5571
5572                         corr = strength * nsteps;
5573
5574                         /*
5575                          * If the number of correctable bits is the same,
5576                          * bigger step_size has more reliability.
5577                          */
5578                         if (corr > best_corr ||
5579                             (corr == best_corr && step_size > best_step)) {
5580                                 best_corr = corr;
5581                                 best_step = step_size;
5582                                 best_strength = strength;
5583                                 best_ecc_bytes = ecc_bytes;
5584                         }
5585                 }
5586         }
5587
5588         if (!best_corr)
5589                 return -ENOTSUPP;
5590
5591         chip->ecc.size = best_step;
5592         chip->ecc.strength = best_strength;
5593         chip->ecc.bytes = best_ecc_bytes;
5594
5595         return 0;
5596 }
5597
5598 /**
5599  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5600  * @chip: nand chip info structure
5601  * @caps: ECC engine caps info structure
5602  * @oobavail: OOB size that the ECC engine can use
5603  *
5604  * Choose the ECC configuration according to following logic.
5605  *
5606  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5607  *    then check if it is supported by this controller.
5608  * 2. If the user provided the nand-ecc-maximize property, then select maximum
5609  *    ECC strength.
5610  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5611  *    to the chip's requirement. If available OOB size can't fit the chip
5612  *    requirement then fallback to the maximum ECC step size and ECC strength.
5613  *
5614  * On success, the chosen ECC settings are set.
5615  */
5616 int nand_ecc_choose_conf(struct nand_chip *chip,
5617                          const struct nand_ecc_caps *caps, int oobavail)
5618 {
5619         struct mtd_info *mtd = nand_to_mtd(chip);
5620         struct nand_device *nanddev = mtd_to_nanddev(mtd);
5621
5622         if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5623                 return -EINVAL;
5624
5625         if (chip->ecc.size && chip->ecc.strength)
5626                 return nand_check_ecc_caps(chip, caps, oobavail);
5627
5628         if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5629                 return nand_maximize_ecc(chip, caps, oobavail);
5630
5631         if (!nand_match_ecc_req(chip, caps, oobavail))
5632                 return 0;
5633
5634         return nand_maximize_ecc(chip, caps, oobavail);
5635 }
5636 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5637
5638 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5639 {
5640         struct nand_chip *chip = container_of(nand, struct nand_chip,
5641                                               base);
5642         unsigned int eb = nanddev_pos_to_row(nand, pos);
5643         int ret;
5644
5645         eb >>= nand->rowconv.eraseblock_addr_shift;
5646
5647         nand_select_target(chip, pos->target);
5648         ret = nand_erase_op(chip, eb);
5649         nand_deselect_target(chip);
5650
5651         return ret;
5652 }
5653
5654 static int rawnand_markbad(struct nand_device *nand,
5655                            const struct nand_pos *pos)
5656 {
5657         struct nand_chip *chip = container_of(nand, struct nand_chip,
5658                                               base);
5659
5660         return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5661 }
5662
5663 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5664 {
5665         struct nand_chip *chip = container_of(nand, struct nand_chip,
5666                                               base);
5667         int ret;
5668
5669         nand_select_target(chip, pos->target);
5670         ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5671         nand_deselect_target(chip);
5672
5673         return ret;
5674 }
5675
5676 static const struct nand_ops rawnand_ops = {
5677         .erase = rawnand_erase,
5678         .markbad = rawnand_markbad,
5679         .isbad = rawnand_isbad,
5680 };
5681
5682 /**
5683  * nand_scan_tail - Scan for the NAND device
5684  * @chip: NAND chip object
5685  *
5686  * This is the second phase of the normal nand_scan() function. It fills out
5687  * all the uninitialized function pointers with the defaults and scans for a
5688  * bad block table if appropriate.
5689  */
5690 static int nand_scan_tail(struct nand_chip *chip)
5691 {
5692         struct mtd_info *mtd = nand_to_mtd(chip);
5693         struct nand_ecc_ctrl *ecc = &chip->ecc;
5694         int ret, i;
5695
5696         /* New bad blocks should be marked in OOB, flash-based BBT, or both */
5697         if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5698                    !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5699                 return -EINVAL;
5700         }
5701
5702         chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5703         if (!chip->data_buf)
5704                 return -ENOMEM;
5705
5706         /*
5707          * FIXME: some NAND manufacturer drivers expect the first die to be
5708          * selected when manufacturer->init() is called. They should be fixed
5709          * to explictly select the relevant die when interacting with the NAND
5710          * chip.
5711          */
5712         nand_select_target(chip, 0);
5713         ret = nand_manufacturer_init(chip);
5714         nand_deselect_target(chip);
5715         if (ret)
5716                 goto err_free_buf;
5717
5718         /* Set the internal oob buffer location, just after the page data */
5719         chip->oob_poi = chip->data_buf + mtd->writesize;
5720
5721         /*
5722          * If no default placement scheme is given, select an appropriate one.
5723          */
5724         if (!mtd->ooblayout &&
5725             !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5726               ecc->algo == NAND_ECC_ALGO_BCH) &&
5727             !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5728               ecc->algo == NAND_ECC_ALGO_HAMMING)) {
5729                 switch (mtd->oobsize) {
5730                 case 8:
5731                 case 16:
5732                         mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
5733                         break;
5734                 case 64:
5735                 case 128:
5736                         mtd_set_ooblayout(mtd,
5737                                           nand_get_large_page_hamming_ooblayout());
5738                         break;
5739                 default:
5740                         /*
5741                          * Expose the whole OOB area to users if ECC_NONE
5742                          * is passed. We could do that for all kind of
5743                          * ->oobsize, but we must keep the old large/small
5744                          * page with ECC layout when ->oobsize <= 128 for
5745                          * compatibility reasons.
5746                          */
5747                         if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
5748                                 mtd_set_ooblayout(mtd,
5749                                                   nand_get_large_page_ooblayout());
5750                                 break;
5751                         }
5752
5753                         WARN(1, "No oob scheme defined for oobsize %d\n",
5754                                 mtd->oobsize);
5755                         ret = -EINVAL;
5756                         goto err_nand_manuf_cleanup;
5757                 }
5758         }
5759
5760         /*
5761          * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5762          * selected and we have 256 byte pagesize fallback to software ECC
5763          */
5764
5765         switch (ecc->engine_type) {
5766         case NAND_ECC_ENGINE_TYPE_ON_HOST:
5767                 ret = nand_set_ecc_on_host_ops(chip);
5768                 if (ret)
5769                         goto err_nand_manuf_cleanup;
5770
5771                 if (mtd->writesize >= ecc->size) {
5772                         if (!ecc->strength) {
5773                                 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5774                                 ret = -EINVAL;
5775                                 goto err_nand_manuf_cleanup;
5776                         }
5777                         break;
5778                 }
5779                 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5780                         ecc->size, mtd->writesize);
5781                 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5782                 ecc->algo = NAND_ECC_ALGO_HAMMING;
5783                 fallthrough;
5784
5785         case NAND_ECC_ENGINE_TYPE_SOFT:
5786                 ret = nand_set_ecc_soft_ops(chip);
5787                 if (ret)
5788                         goto err_nand_manuf_cleanup;
5789                 break;
5790
5791         case NAND_ECC_ENGINE_TYPE_ON_DIE:
5792                 if (!ecc->read_page || !ecc->write_page) {
5793                         WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5794                         ret = -EINVAL;
5795                         goto err_nand_manuf_cleanup;
5796                 }
5797                 if (!ecc->read_oob)
5798                         ecc->read_oob = nand_read_oob_std;
5799                 if (!ecc->write_oob)
5800                         ecc->write_oob = nand_write_oob_std;
5801                 break;
5802
5803         case NAND_ECC_ENGINE_TYPE_NONE:
5804                 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
5805                 ecc->read_page = nand_read_page_raw;
5806                 ecc->write_page = nand_write_page_raw;
5807                 ecc->read_oob = nand_read_oob_std;
5808                 ecc->read_page_raw = nand_read_page_raw;
5809                 ecc->write_page_raw = nand_write_page_raw;
5810                 ecc->write_oob = nand_write_oob_std;
5811                 ecc->size = mtd->writesize;
5812                 ecc->bytes = 0;
5813                 ecc->strength = 0;
5814                 break;
5815
5816         default:
5817                 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
5818                 ret = -EINVAL;
5819                 goto err_nand_manuf_cleanup;
5820         }
5821
5822         if (ecc->correct || ecc->calculate) {
5823                 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5824                 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5825                 if (!ecc->calc_buf || !ecc->code_buf) {
5826                         ret = -ENOMEM;
5827                         goto err_nand_manuf_cleanup;
5828                 }
5829         }
5830
5831         /* For many systems, the standard OOB write also works for raw */
5832         if (!ecc->read_oob_raw)
5833                 ecc->read_oob_raw = ecc->read_oob;
5834         if (!ecc->write_oob_raw)
5835                 ecc->write_oob_raw = ecc->write_oob;
5836
5837         /* propagate ecc info to mtd_info */
5838         mtd->ecc_strength = ecc->strength;
5839         mtd->ecc_step_size = ecc->size;
5840
5841         /*
5842          * Set the number of read / write steps for one page depending on ECC
5843          * mode.
5844          */
5845         if (!ecc->steps)
5846                 ecc->steps = mtd->writesize / ecc->size;
5847         if (ecc->steps * ecc->size != mtd->writesize) {
5848                 WARN(1, "Invalid ECC parameters\n");
5849                 ret = -EINVAL;
5850                 goto err_nand_manuf_cleanup;
5851         }
5852
5853         if (!ecc->total) {
5854                 ecc->total = ecc->steps * ecc->bytes;
5855                 chip->base.ecc.ctx.total = ecc->total;
5856         }
5857
5858         if (ecc->total > mtd->oobsize) {
5859                 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5860                 ret = -EINVAL;
5861                 goto err_nand_manuf_cleanup;
5862         }
5863
5864         /*
5865          * The number of bytes available for a client to place data into
5866          * the out of band area.
5867          */
5868         ret = mtd_ooblayout_count_freebytes(mtd);
5869         if (ret < 0)
5870                 ret = 0;
5871
5872         mtd->oobavail = ret;
5873
5874         /* ECC sanity check: warn if it's too weak */
5875         if (!nand_ecc_is_strong_enough(&chip->base))
5876                 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
5877                         mtd->name, chip->ecc.strength, chip->ecc.size,
5878                         nanddev_get_ecc_requirements(&chip->base)->strength,
5879                         nanddev_get_ecc_requirements(&chip->base)->step_size);
5880
5881         /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5882         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5883                 switch (ecc->steps) {
5884                 case 2:
5885                         mtd->subpage_sft = 1;
5886                         break;
5887                 case 4:
5888                 case 8:
5889                 case 16:
5890                         mtd->subpage_sft = 2;
5891                         break;
5892                 }
5893         }
5894         chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5895
5896         /* Invalidate the pagebuffer reference */
5897         chip->pagecache.page = -1;
5898
5899         /* Large page NAND with SOFT_ECC should support subpage reads */
5900         switch (ecc->engine_type) {
5901         case NAND_ECC_ENGINE_TYPE_SOFT:
5902                 if (chip->page_shift > 9)
5903                         chip->options |= NAND_SUBPAGE_READ;
5904                 break;
5905
5906         default:
5907                 break;
5908         }
5909
5910         ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5911         if (ret)
5912                 goto err_nand_manuf_cleanup;
5913
5914         /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
5915         if (chip->options & NAND_ROM)
5916                 mtd->flags = MTD_CAP_ROM;
5917
5918         /* Fill in remaining MTD driver data */
5919         mtd->_erase = nand_erase;
5920         mtd->_point = NULL;
5921         mtd->_unpoint = NULL;
5922         mtd->_panic_write = panic_nand_write;
5923         mtd->_read_oob = nand_read_oob;
5924         mtd->_write_oob = nand_write_oob;
5925         mtd->_sync = nand_sync;
5926         mtd->_lock = nand_lock;
5927         mtd->_unlock = nand_unlock;
5928         mtd->_suspend = nand_suspend;
5929         mtd->_resume = nand_resume;
5930         mtd->_reboot = nand_shutdown;
5931         mtd->_block_isreserved = nand_block_isreserved;
5932         mtd->_block_isbad = nand_block_isbad;
5933         mtd->_block_markbad = nand_block_markbad;
5934         mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5935
5936         /*
5937          * Initialize bitflip_threshold to its default prior scan_bbt() call.
5938          * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5939          * properly set.
5940          */
5941         if (!mtd->bitflip_threshold)
5942                 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5943
5944         /* Find the fastest data interface for this chip */
5945         ret = nand_choose_interface_config(chip);
5946         if (ret)
5947                 goto err_nanddev_cleanup;
5948
5949         /* Enter fastest possible mode on all dies. */
5950         for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5951                 ret = nand_setup_interface(chip, i);
5952                 if (ret)
5953                         goto err_free_interface_config;
5954         }
5955
5956         /* Check, if we should skip the bad block table scan */
5957         if (chip->options & NAND_SKIP_BBTSCAN)
5958                 return 0;
5959
5960         /* Build bad block table */
5961         ret = nand_create_bbt(chip);
5962         if (ret)
5963                 goto err_free_interface_config;
5964
5965         return 0;
5966
5967 err_free_interface_config:
5968         kfree(chip->best_interface_config);
5969
5970 err_nanddev_cleanup:
5971         nanddev_cleanup(&chip->base);
5972
5973 err_nand_manuf_cleanup:
5974         nand_manufacturer_cleanup(chip);
5975
5976 err_free_buf:
5977         kfree(chip->data_buf);
5978         kfree(ecc->code_buf);
5979         kfree(ecc->calc_buf);
5980
5981         return ret;
5982 }
5983
5984 static int nand_attach(struct nand_chip *chip)
5985 {
5986         if (chip->controller->ops && chip->controller->ops->attach_chip)
5987                 return chip->controller->ops->attach_chip(chip);
5988
5989         return 0;
5990 }
5991
5992 static void nand_detach(struct nand_chip *chip)
5993 {
5994         if (chip->controller->ops && chip->controller->ops->detach_chip)
5995                 chip->controller->ops->detach_chip(chip);
5996 }
5997
5998 /**
5999  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
6000  * @chip: NAND chip object
6001  * @maxchips: number of chips to scan for.
6002  * @ids: optional flash IDs table
6003  *
6004  * This fills out all the uninitialized function pointers with the defaults.
6005  * The flash ID is read and the mtd/chip structures are filled with the
6006  * appropriate values.
6007  */
6008 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6009                        struct nand_flash_dev *ids)
6010 {
6011         int ret;
6012
6013         if (!maxchips)
6014                 return -EINVAL;
6015
6016         ret = nand_scan_ident(chip, maxchips, ids);
6017         if (ret)
6018                 return ret;
6019
6020         ret = nand_attach(chip);
6021         if (ret)
6022                 goto cleanup_ident;
6023
6024         ret = nand_scan_tail(chip);
6025         if (ret)
6026                 goto detach_chip;
6027
6028         return 0;
6029
6030 detach_chip:
6031         nand_detach(chip);
6032 cleanup_ident:
6033         nand_scan_ident_cleanup(chip);
6034
6035         return ret;
6036 }
6037 EXPORT_SYMBOL(nand_scan_with_ids);
6038
6039 /**
6040  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
6041  * @chip: NAND chip object
6042  */
6043 void nand_cleanup(struct nand_chip *chip)
6044 {
6045         if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
6046                 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
6047                         rawnand_sw_hamming_cleanup(chip);
6048                 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
6049                         rawnand_sw_bch_cleanup(chip);
6050         }
6051
6052         nanddev_cleanup(&chip->base);
6053
6054         /* Free bad block table memory */
6055         kfree(chip->bbt);
6056         kfree(chip->data_buf);
6057         kfree(chip->ecc.code_buf);
6058         kfree(chip->ecc.calc_buf);
6059
6060         /* Free bad block descriptor memory */
6061         if (chip->badblock_pattern && chip->badblock_pattern->options
6062                         & NAND_BBT_DYNAMICSTRUCT)
6063                 kfree(chip->badblock_pattern);
6064
6065         /* Free the data interface */
6066         kfree(chip->best_interface_config);
6067
6068         /* Free manufacturer priv data. */
6069         nand_manufacturer_cleanup(chip);
6070
6071         /* Free controller specific allocations after chip identification */
6072         nand_detach(chip);
6073
6074         /* Free identification phase allocations */
6075         nand_scan_ident_cleanup(chip);
6076 }
6077
6078 EXPORT_SYMBOL_GPL(nand_cleanup);
6079
6080 MODULE_LICENSE("GPL");
6081 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6082 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6083 MODULE_DESCRIPTION("Generic NAND flash driver code");