Merge tag 'efi_updates_for_v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / mtd / nand / spi / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *      Peter Pan <peterpandong@micron.com>
7  *      Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9
10 #define pr_fmt(fmt)     "spi-nand: " fmt
11
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22
23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
24 {
25         struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
26                                                       spinand->scratchbuf);
27         int ret;
28
29         ret = spi_mem_exec_op(spinand->spimem, &op);
30         if (ret)
31                 return ret;
32
33         *val = *spinand->scratchbuf;
34         return 0;
35 }
36
37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
38 {
39         struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
40                                                       spinand->scratchbuf);
41
42         *spinand->scratchbuf = val;
43         return spi_mem_exec_op(spinand->spimem, &op);
44 }
45
46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
47 {
48         return spinand_read_reg_op(spinand, REG_STATUS, status);
49 }
50
51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
52 {
53         struct nand_device *nand = spinand_to_nand(spinand);
54
55         if (WARN_ON(spinand->cur_target < 0 ||
56                     spinand->cur_target >= nand->memorg.ntargets))
57                 return -EINVAL;
58
59         *cfg = spinand->cfg_cache[spinand->cur_target];
60         return 0;
61 }
62
63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
64 {
65         struct nand_device *nand = spinand_to_nand(spinand);
66         int ret;
67
68         if (WARN_ON(spinand->cur_target < 0 ||
69                     spinand->cur_target >= nand->memorg.ntargets))
70                 return -EINVAL;
71
72         if (spinand->cfg_cache[spinand->cur_target] == cfg)
73                 return 0;
74
75         ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
76         if (ret)
77                 return ret;
78
79         spinand->cfg_cache[spinand->cur_target] = cfg;
80         return 0;
81 }
82
83 /**
84  * spinand_upd_cfg() - Update the configuration register
85  * @spinand: the spinand device
86  * @mask: the mask encoding the bits to update in the config reg
87  * @val: the new value to apply
88  *
89  * Update the configuration register.
90  *
91  * Return: 0 on success, a negative error code otherwise.
92  */
93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
94 {
95         int ret;
96         u8 cfg;
97
98         ret = spinand_get_cfg(spinand, &cfg);
99         if (ret)
100                 return ret;
101
102         cfg &= ~mask;
103         cfg |= val;
104
105         return spinand_set_cfg(spinand, cfg);
106 }
107
108 /**
109  * spinand_select_target() - Select a specific NAND target/die
110  * @spinand: the spinand device
111  * @target: the target/die to select
112  *
113  * Select a new target/die. If chip only has one die, this function is a NOOP.
114  *
115  * Return: 0 on success, a negative error code otherwise.
116  */
117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
118 {
119         struct nand_device *nand = spinand_to_nand(spinand);
120         int ret;
121
122         if (WARN_ON(target >= nand->memorg.ntargets))
123                 return -EINVAL;
124
125         if (spinand->cur_target == target)
126                 return 0;
127
128         if (nand->memorg.ntargets == 1) {
129                 spinand->cur_target = target;
130                 return 0;
131         }
132
133         ret = spinand->select_target(spinand, target);
134         if (ret)
135                 return ret;
136
137         spinand->cur_target = target;
138         return 0;
139 }
140
141 static int spinand_init_cfg_cache(struct spinand_device *spinand)
142 {
143         struct nand_device *nand = spinand_to_nand(spinand);
144         struct device *dev = &spinand->spimem->spi->dev;
145         unsigned int target;
146         int ret;
147
148         spinand->cfg_cache = devm_kcalloc(dev,
149                                           nand->memorg.ntargets,
150                                           sizeof(*spinand->cfg_cache),
151                                           GFP_KERNEL);
152         if (!spinand->cfg_cache)
153                 return -ENOMEM;
154
155         for (target = 0; target < nand->memorg.ntargets; target++) {
156                 ret = spinand_select_target(spinand, target);
157                 if (ret)
158                         return ret;
159
160                 /*
161                  * We use spinand_read_reg_op() instead of spinand_get_cfg()
162                  * here to bypass the config cache.
163                  */
164                 ret = spinand_read_reg_op(spinand, REG_CFG,
165                                           &spinand->cfg_cache[target]);
166                 if (ret)
167                         return ret;
168         }
169
170         return 0;
171 }
172
173 static int spinand_init_quad_enable(struct spinand_device *spinand)
174 {
175         bool enable = false;
176
177         if (!(spinand->flags & SPINAND_HAS_QE_BIT))
178                 return 0;
179
180         if (spinand->op_templates.read_cache->data.buswidth == 4 ||
181             spinand->op_templates.write_cache->data.buswidth == 4 ||
182             spinand->op_templates.update_cache->data.buswidth == 4)
183                 enable = true;
184
185         return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
186                                enable ? CFG_QUAD_ENABLE : 0);
187 }
188
189 static int spinand_ecc_enable(struct spinand_device *spinand,
190                               bool enable)
191 {
192         return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
193                                enable ? CFG_ECC_ENABLE : 0);
194 }
195
196 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
197 {
198         struct nand_device *nand = spinand_to_nand(spinand);
199
200         if (spinand->eccinfo.get_status)
201                 return spinand->eccinfo.get_status(spinand, status);
202
203         switch (status & STATUS_ECC_MASK) {
204         case STATUS_ECC_NO_BITFLIPS:
205                 return 0;
206
207         case STATUS_ECC_HAS_BITFLIPS:
208                 /*
209                  * We have no way to know exactly how many bitflips have been
210                  * fixed, so let's return the maximum possible value so that
211                  * wear-leveling layers move the data immediately.
212                  */
213                 return nanddev_get_ecc_conf(nand)->strength;
214
215         case STATUS_ECC_UNCOR_ERROR:
216                 return -EBADMSG;
217
218         default:
219                 break;
220         }
221
222         return -EINVAL;
223 }
224
225 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
226                                        struct mtd_oob_region *region)
227 {
228         return -ERANGE;
229 }
230
231 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
232                                         struct mtd_oob_region *region)
233 {
234         if (section)
235                 return -ERANGE;
236
237         /* Reserve 2 bytes for the BBM. */
238         region->offset = 2;
239         region->length = 62;
240
241         return 0;
242 }
243
244 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
245         .ecc = spinand_noecc_ooblayout_ecc,
246         .free = spinand_noecc_ooblayout_free,
247 };
248
249 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
250 {
251         struct spinand_device *spinand = nand_to_spinand(nand);
252         struct mtd_info *mtd = nanddev_to_mtd(nand);
253         struct spinand_ondie_ecc_conf *engine_conf;
254
255         nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
256         nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
257         nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
258
259         engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
260         if (!engine_conf)
261                 return -ENOMEM;
262
263         nand->ecc.ctx.priv = engine_conf;
264
265         if (spinand->eccinfo.ooblayout)
266                 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
267         else
268                 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
269
270         return 0;
271 }
272
273 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
274 {
275         kfree(nand->ecc.ctx.priv);
276 }
277
278 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
279                                             struct nand_page_io_req *req)
280 {
281         struct spinand_device *spinand = nand_to_spinand(nand);
282         bool enable = (req->mode != MTD_OPS_RAW);
283
284         /* Only enable or disable the engine */
285         return spinand_ecc_enable(spinand, enable);
286 }
287
288 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
289                                            struct nand_page_io_req *req)
290 {
291         struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
292         struct spinand_device *spinand = nand_to_spinand(nand);
293
294         if (req->mode == MTD_OPS_RAW)
295                 return 0;
296
297         /* Nothing to do when finishing a page write */
298         if (req->type == NAND_PAGE_WRITE)
299                 return 0;
300
301         /* Finish a page write: check the status, report errors/bitflips */
302         return spinand_check_ecc_status(spinand, engine_conf->status);
303 }
304
305 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
306         .init_ctx = spinand_ondie_ecc_init_ctx,
307         .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
308         .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
309         .finish_io_req = spinand_ondie_ecc_finish_io_req,
310 };
311
312 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
313         .ops = &spinand_ondie_ecc_engine_ops,
314 };
315
316 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
317 {
318         struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
319
320         if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
321             engine_conf)
322                 engine_conf->status = status;
323 }
324
325 static int spinand_write_enable_op(struct spinand_device *spinand)
326 {
327         struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
328
329         return spi_mem_exec_op(spinand->spimem, &op);
330 }
331
332 static int spinand_load_page_op(struct spinand_device *spinand,
333                                 const struct nand_page_io_req *req)
334 {
335         struct nand_device *nand = spinand_to_nand(spinand);
336         unsigned int row = nanddev_pos_to_row(nand, &req->pos);
337         struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
338
339         return spi_mem_exec_op(spinand->spimem, &op);
340 }
341
342 static int spinand_read_from_cache_op(struct spinand_device *spinand,
343                                       const struct nand_page_io_req *req)
344 {
345         struct nand_device *nand = spinand_to_nand(spinand);
346         struct spi_mem_dirmap_desc *rdesc;
347         unsigned int nbytes = 0;
348         void *buf = NULL;
349         u16 column = 0;
350         ssize_t ret;
351
352         if (req->datalen) {
353                 buf = spinand->databuf;
354                 nbytes = nanddev_page_size(nand);
355                 column = 0;
356         }
357
358         if (req->ooblen) {
359                 nbytes += nanddev_per_page_oobsize(nand);
360                 if (!buf) {
361                         buf = spinand->oobbuf;
362                         column = nanddev_page_size(nand);
363                 }
364         }
365
366         rdesc = spinand->dirmaps[req->pos.plane].rdesc;
367
368         while (nbytes) {
369                 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
370                 if (ret < 0)
371                         return ret;
372
373                 if (!ret || ret > nbytes)
374                         return -EIO;
375
376                 nbytes -= ret;
377                 column += ret;
378                 buf += ret;
379         }
380
381         if (req->datalen)
382                 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
383                        req->datalen);
384
385         if (req->ooblen)
386                 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
387                        req->ooblen);
388
389         return 0;
390 }
391
392 static int spinand_write_to_cache_op(struct spinand_device *spinand,
393                                      const struct nand_page_io_req *req)
394 {
395         struct nand_device *nand = spinand_to_nand(spinand);
396         struct mtd_info *mtd = spinand_to_mtd(spinand);
397         struct spi_mem_dirmap_desc *wdesc;
398         unsigned int nbytes, column = 0;
399         void *buf = spinand->databuf;
400         ssize_t ret;
401
402         /*
403          * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
404          * the cache content to 0xFF (depends on vendor implementation), so we
405          * must fill the page cache entirely even if we only want to program
406          * the data portion of the page, otherwise we might corrupt the BBM or
407          * user data previously programmed in OOB area.
408          *
409          * Only reset the data buffer manually, the OOB buffer is prepared by
410          * ECC engines ->prepare_io_req() callback.
411          */
412         nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
413         memset(spinand->databuf, 0xff, nanddev_page_size(nand));
414
415         if (req->datalen)
416                 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
417                        req->datalen);
418
419         if (req->ooblen) {
420                 if (req->mode == MTD_OPS_AUTO_OOB)
421                         mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
422                                                     spinand->oobbuf,
423                                                     req->ooboffs,
424                                                     req->ooblen);
425                 else
426                         memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
427                                req->ooblen);
428         }
429
430         wdesc = spinand->dirmaps[req->pos.plane].wdesc;
431
432         while (nbytes) {
433                 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
434                 if (ret < 0)
435                         return ret;
436
437                 if (!ret || ret > nbytes)
438                         return -EIO;
439
440                 nbytes -= ret;
441                 column += ret;
442                 buf += ret;
443         }
444
445         return 0;
446 }
447
448 static int spinand_program_op(struct spinand_device *spinand,
449                               const struct nand_page_io_req *req)
450 {
451         struct nand_device *nand = spinand_to_nand(spinand);
452         unsigned int row = nanddev_pos_to_row(nand, &req->pos);
453         struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
454
455         return spi_mem_exec_op(spinand->spimem, &op);
456 }
457
458 static int spinand_erase_op(struct spinand_device *spinand,
459                             const struct nand_pos *pos)
460 {
461         struct nand_device *nand = spinand_to_nand(spinand);
462         unsigned int row = nanddev_pos_to_row(nand, pos);
463         struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
464
465         return spi_mem_exec_op(spinand->spimem, &op);
466 }
467
468 static int spinand_wait(struct spinand_device *spinand, u8 *s)
469 {
470         unsigned long timeo =  jiffies + msecs_to_jiffies(400);
471         u8 status;
472         int ret;
473
474         do {
475                 ret = spinand_read_status(spinand, &status);
476                 if (ret)
477                         return ret;
478
479                 if (!(status & STATUS_BUSY))
480                         goto out;
481         } while (time_before(jiffies, timeo));
482
483         /*
484          * Extra read, just in case the STATUS_READY bit has changed
485          * since our last check
486          */
487         ret = spinand_read_status(spinand, &status);
488         if (ret)
489                 return ret;
490
491 out:
492         if (s)
493                 *s = status;
494
495         return status & STATUS_BUSY ? -ETIMEDOUT : 0;
496 }
497
498 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
499                               u8 ndummy, u8 *buf)
500 {
501         struct spi_mem_op op = SPINAND_READID_OP(
502                 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
503         int ret;
504
505         ret = spi_mem_exec_op(spinand->spimem, &op);
506         if (!ret)
507                 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
508
509         return ret;
510 }
511
512 static int spinand_reset_op(struct spinand_device *spinand)
513 {
514         struct spi_mem_op op = SPINAND_RESET_OP;
515         int ret;
516
517         ret = spi_mem_exec_op(spinand->spimem, &op);
518         if (ret)
519                 return ret;
520
521         return spinand_wait(spinand, NULL);
522 }
523
524 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
525 {
526         return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
527 }
528
529 static int spinand_read_page(struct spinand_device *spinand,
530                              const struct nand_page_io_req *req)
531 {
532         struct nand_device *nand = spinand_to_nand(spinand);
533         u8 status;
534         int ret;
535
536         ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
537         if (ret)
538                 return ret;
539
540         ret = spinand_load_page_op(spinand, req);
541         if (ret)
542                 return ret;
543
544         ret = spinand_wait(spinand, &status);
545         if (ret < 0)
546                 return ret;
547
548         spinand_ondie_ecc_save_status(nand, status);
549
550         ret = spinand_read_from_cache_op(spinand, req);
551         if (ret)
552                 return ret;
553
554         return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
555 }
556
557 static int spinand_write_page(struct spinand_device *spinand,
558                               const struct nand_page_io_req *req)
559 {
560         struct nand_device *nand = spinand_to_nand(spinand);
561         u8 status;
562         int ret;
563
564         ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
565         if (ret)
566                 return ret;
567
568         ret = spinand_write_enable_op(spinand);
569         if (ret)
570                 return ret;
571
572         ret = spinand_write_to_cache_op(spinand, req);
573         if (ret)
574                 return ret;
575
576         ret = spinand_program_op(spinand, req);
577         if (ret)
578                 return ret;
579
580         ret = spinand_wait(spinand, &status);
581         if (!ret && (status & STATUS_PROG_FAILED))
582                 return -EIO;
583
584         return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
585 }
586
587 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
588                             struct mtd_oob_ops *ops)
589 {
590         struct spinand_device *spinand = mtd_to_spinand(mtd);
591         struct nand_device *nand = mtd_to_nanddev(mtd);
592         unsigned int max_bitflips = 0;
593         struct nand_io_iter iter;
594         bool disable_ecc = false;
595         bool ecc_failed = false;
596         int ret = 0;
597
598         if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
599                 disable_ecc = true;
600
601         mutex_lock(&spinand->lock);
602
603         nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
604                 if (disable_ecc)
605                         iter.req.mode = MTD_OPS_RAW;
606
607                 ret = spinand_select_target(spinand, iter.req.pos.target);
608                 if (ret)
609                         break;
610
611                 ret = spinand_read_page(spinand, &iter.req);
612                 if (ret < 0 && ret != -EBADMSG)
613                         break;
614
615                 if (ret == -EBADMSG) {
616                         ecc_failed = true;
617                         mtd->ecc_stats.failed++;
618                 } else {
619                         mtd->ecc_stats.corrected += ret;
620                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
621                 }
622
623                 ret = 0;
624                 ops->retlen += iter.req.datalen;
625                 ops->oobretlen += iter.req.ooblen;
626         }
627
628         mutex_unlock(&spinand->lock);
629
630         if (ecc_failed && !ret)
631                 ret = -EBADMSG;
632
633         return ret ? ret : max_bitflips;
634 }
635
636 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
637                              struct mtd_oob_ops *ops)
638 {
639         struct spinand_device *spinand = mtd_to_spinand(mtd);
640         struct nand_device *nand = mtd_to_nanddev(mtd);
641         struct nand_io_iter iter;
642         bool disable_ecc = false;
643         int ret = 0;
644
645         if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
646                 disable_ecc = true;
647
648         mutex_lock(&spinand->lock);
649
650         nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
651                 if (disable_ecc)
652                         iter.req.mode = MTD_OPS_RAW;
653
654                 ret = spinand_select_target(spinand, iter.req.pos.target);
655                 if (ret)
656                         break;
657
658                 ret = spinand_write_page(spinand, &iter.req);
659                 if (ret)
660                         break;
661
662                 ops->retlen += iter.req.datalen;
663                 ops->oobretlen += iter.req.ooblen;
664         }
665
666         mutex_unlock(&spinand->lock);
667
668         return ret;
669 }
670
671 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
672 {
673         struct spinand_device *spinand = nand_to_spinand(nand);
674         u8 marker[2] = { };
675         struct nand_page_io_req req = {
676                 .pos = *pos,
677                 .ooblen = sizeof(marker),
678                 .ooboffs = 0,
679                 .oobbuf.in = marker,
680                 .mode = MTD_OPS_RAW,
681         };
682
683         spinand_select_target(spinand, pos->target);
684         spinand_read_page(spinand, &req);
685         if (marker[0] != 0xff || marker[1] != 0xff)
686                 return true;
687
688         return false;
689 }
690
691 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
692 {
693         struct nand_device *nand = mtd_to_nanddev(mtd);
694         struct spinand_device *spinand = nand_to_spinand(nand);
695         struct nand_pos pos;
696         int ret;
697
698         nanddev_offs_to_pos(nand, offs, &pos);
699         mutex_lock(&spinand->lock);
700         ret = nanddev_isbad(nand, &pos);
701         mutex_unlock(&spinand->lock);
702
703         return ret;
704 }
705
706 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
707 {
708         struct spinand_device *spinand = nand_to_spinand(nand);
709         u8 marker[2] = { };
710         struct nand_page_io_req req = {
711                 .pos = *pos,
712                 .ooboffs = 0,
713                 .ooblen = sizeof(marker),
714                 .oobbuf.out = marker,
715                 .mode = MTD_OPS_RAW,
716         };
717         int ret;
718
719         ret = spinand_select_target(spinand, pos->target);
720         if (ret)
721                 return ret;
722
723         ret = spinand_write_enable_op(spinand);
724         if (ret)
725                 return ret;
726
727         return spinand_write_page(spinand, &req);
728 }
729
730 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
731 {
732         struct nand_device *nand = mtd_to_nanddev(mtd);
733         struct spinand_device *spinand = nand_to_spinand(nand);
734         struct nand_pos pos;
735         int ret;
736
737         nanddev_offs_to_pos(nand, offs, &pos);
738         mutex_lock(&spinand->lock);
739         ret = nanddev_markbad(nand, &pos);
740         mutex_unlock(&spinand->lock);
741
742         return ret;
743 }
744
745 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
746 {
747         struct spinand_device *spinand = nand_to_spinand(nand);
748         u8 status;
749         int ret;
750
751         ret = spinand_select_target(spinand, pos->target);
752         if (ret)
753                 return ret;
754
755         ret = spinand_write_enable_op(spinand);
756         if (ret)
757                 return ret;
758
759         ret = spinand_erase_op(spinand, pos);
760         if (ret)
761                 return ret;
762
763         ret = spinand_wait(spinand, &status);
764         if (!ret && (status & STATUS_ERASE_FAILED))
765                 ret = -EIO;
766
767         return ret;
768 }
769
770 static int spinand_mtd_erase(struct mtd_info *mtd,
771                              struct erase_info *einfo)
772 {
773         struct spinand_device *spinand = mtd_to_spinand(mtd);
774         int ret;
775
776         mutex_lock(&spinand->lock);
777         ret = nanddev_mtd_erase(mtd, einfo);
778         mutex_unlock(&spinand->lock);
779
780         return ret;
781 }
782
783 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
784 {
785         struct spinand_device *spinand = mtd_to_spinand(mtd);
786         struct nand_device *nand = mtd_to_nanddev(mtd);
787         struct nand_pos pos;
788         int ret;
789
790         nanddev_offs_to_pos(nand, offs, &pos);
791         mutex_lock(&spinand->lock);
792         ret = nanddev_isreserved(nand, &pos);
793         mutex_unlock(&spinand->lock);
794
795         return ret;
796 }
797
798 static int spinand_create_dirmap(struct spinand_device *spinand,
799                                  unsigned int plane)
800 {
801         struct nand_device *nand = spinand_to_nand(spinand);
802         struct spi_mem_dirmap_info info = {
803                 .length = nanddev_page_size(nand) +
804                           nanddev_per_page_oobsize(nand),
805         };
806         struct spi_mem_dirmap_desc *desc;
807
808         /* The plane number is passed in MSB just above the column address */
809         info.offset = plane << fls(nand->memorg.pagesize);
810
811         info.op_tmpl = *spinand->op_templates.update_cache;
812         desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
813                                           spinand->spimem, &info);
814         if (IS_ERR(desc))
815                 return PTR_ERR(desc);
816
817         spinand->dirmaps[plane].wdesc = desc;
818
819         info.op_tmpl = *spinand->op_templates.read_cache;
820         desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
821                                           spinand->spimem, &info);
822         if (IS_ERR(desc))
823                 return PTR_ERR(desc);
824
825         spinand->dirmaps[plane].rdesc = desc;
826
827         return 0;
828 }
829
830 static int spinand_create_dirmaps(struct spinand_device *spinand)
831 {
832         struct nand_device *nand = spinand_to_nand(spinand);
833         int i, ret;
834
835         spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
836                                         sizeof(*spinand->dirmaps) *
837                                         nand->memorg.planes_per_lun,
838                                         GFP_KERNEL);
839         if (!spinand->dirmaps)
840                 return -ENOMEM;
841
842         for (i = 0; i < nand->memorg.planes_per_lun; i++) {
843                 ret = spinand_create_dirmap(spinand, i);
844                 if (ret)
845                         return ret;
846         }
847
848         return 0;
849 }
850
851 static const struct nand_ops spinand_ops = {
852         .erase = spinand_erase,
853         .markbad = spinand_markbad,
854         .isbad = spinand_isbad,
855 };
856
857 static const struct spinand_manufacturer *spinand_manufacturers[] = {
858         &gigadevice_spinand_manufacturer,
859         &macronix_spinand_manufacturer,
860         &micron_spinand_manufacturer,
861         &paragon_spinand_manufacturer,
862         &toshiba_spinand_manufacturer,
863         &winbond_spinand_manufacturer,
864 };
865
866 static int spinand_manufacturer_match(struct spinand_device *spinand,
867                                       enum spinand_readid_method rdid_method)
868 {
869         u8 *id = spinand->id.data;
870         unsigned int i;
871         int ret;
872
873         for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
874                 const struct spinand_manufacturer *manufacturer =
875                         spinand_manufacturers[i];
876
877                 if (id[0] != manufacturer->id)
878                         continue;
879
880                 ret = spinand_match_and_init(spinand,
881                                              manufacturer->chips,
882                                              manufacturer->nchips,
883                                              rdid_method);
884                 if (ret < 0)
885                         continue;
886
887                 spinand->manufacturer = manufacturer;
888                 return 0;
889         }
890         return -ENOTSUPP;
891 }
892
893 static int spinand_id_detect(struct spinand_device *spinand)
894 {
895         u8 *id = spinand->id.data;
896         int ret;
897
898         ret = spinand_read_id_op(spinand, 0, 0, id);
899         if (ret)
900                 return ret;
901         ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
902         if (!ret)
903                 return 0;
904
905         ret = spinand_read_id_op(spinand, 1, 0, id);
906         if (ret)
907                 return ret;
908         ret = spinand_manufacturer_match(spinand,
909                                          SPINAND_READID_METHOD_OPCODE_ADDR);
910         if (!ret)
911                 return 0;
912
913         ret = spinand_read_id_op(spinand, 0, 1, id);
914         if (ret)
915                 return ret;
916         ret = spinand_manufacturer_match(spinand,
917                                          SPINAND_READID_METHOD_OPCODE_DUMMY);
918
919         return ret;
920 }
921
922 static int spinand_manufacturer_init(struct spinand_device *spinand)
923 {
924         if (spinand->manufacturer->ops->init)
925                 return spinand->manufacturer->ops->init(spinand);
926
927         return 0;
928 }
929
930 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
931 {
932         /* Release manufacturer private data */
933         if (spinand->manufacturer->ops->cleanup)
934                 return spinand->manufacturer->ops->cleanup(spinand);
935 }
936
937 static const struct spi_mem_op *
938 spinand_select_op_variant(struct spinand_device *spinand,
939                           const struct spinand_op_variants *variants)
940 {
941         struct nand_device *nand = spinand_to_nand(spinand);
942         unsigned int i;
943
944         for (i = 0; i < variants->nops; i++) {
945                 struct spi_mem_op op = variants->ops[i];
946                 unsigned int nbytes;
947                 int ret;
948
949                 nbytes = nanddev_per_page_oobsize(nand) +
950                          nanddev_page_size(nand);
951
952                 while (nbytes) {
953                         op.data.nbytes = nbytes;
954                         ret = spi_mem_adjust_op_size(spinand->spimem, &op);
955                         if (ret)
956                                 break;
957
958                         if (!spi_mem_supports_op(spinand->spimem, &op))
959                                 break;
960
961                         nbytes -= op.data.nbytes;
962                 }
963
964                 if (!nbytes)
965                         return &variants->ops[i];
966         }
967
968         return NULL;
969 }
970
971 /**
972  * spinand_match_and_init() - Try to find a match between a device ID and an
973  *                            entry in a spinand_info table
974  * @spinand: SPI NAND object
975  * @table: SPI NAND device description table
976  * @table_size: size of the device description table
977  * @rdid_method: read id method to match
978  *
979  * Match between a device ID retrieved through the READ_ID command and an
980  * entry in the SPI NAND description table. If a match is found, the spinand
981  * object will be initialized with information provided by the matching
982  * spinand_info entry.
983  *
984  * Return: 0 on success, a negative error code otherwise.
985  */
986 int spinand_match_and_init(struct spinand_device *spinand,
987                            const struct spinand_info *table,
988                            unsigned int table_size,
989                            enum spinand_readid_method rdid_method)
990 {
991         u8 *id = spinand->id.data;
992         struct nand_device *nand = spinand_to_nand(spinand);
993         unsigned int i;
994
995         for (i = 0; i < table_size; i++) {
996                 const struct spinand_info *info = &table[i];
997                 const struct spi_mem_op *op;
998
999                 if (rdid_method != info->devid.method)
1000                         continue;
1001
1002                 if (memcmp(id + 1, info->devid.id, info->devid.len))
1003                         continue;
1004
1005                 nand->memorg = table[i].memorg;
1006                 nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1007                 spinand->eccinfo = table[i].eccinfo;
1008                 spinand->flags = table[i].flags;
1009                 spinand->id.len = 1 + table[i].devid.len;
1010                 spinand->select_target = table[i].select_target;
1011
1012                 op = spinand_select_op_variant(spinand,
1013                                                info->op_variants.read_cache);
1014                 if (!op)
1015                         return -ENOTSUPP;
1016
1017                 spinand->op_templates.read_cache = op;
1018
1019                 op = spinand_select_op_variant(spinand,
1020                                                info->op_variants.write_cache);
1021                 if (!op)
1022                         return -ENOTSUPP;
1023
1024                 spinand->op_templates.write_cache = op;
1025
1026                 op = spinand_select_op_variant(spinand,
1027                                                info->op_variants.update_cache);
1028                 spinand->op_templates.update_cache = op;
1029
1030                 return 0;
1031         }
1032
1033         return -ENOTSUPP;
1034 }
1035
1036 static int spinand_detect(struct spinand_device *spinand)
1037 {
1038         struct device *dev = &spinand->spimem->spi->dev;
1039         struct nand_device *nand = spinand_to_nand(spinand);
1040         int ret;
1041
1042         ret = spinand_reset_op(spinand);
1043         if (ret)
1044                 return ret;
1045
1046         ret = spinand_id_detect(spinand);
1047         if (ret) {
1048                 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1049                         spinand->id.data);
1050                 return ret;
1051         }
1052
1053         if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1054                 dev_err(dev,
1055                         "SPI NANDs with more than one die must implement ->select_target()\n");
1056                 return -EINVAL;
1057         }
1058
1059         dev_info(&spinand->spimem->spi->dev,
1060                  "%s SPI NAND was found.\n", spinand->manufacturer->name);
1061         dev_info(&spinand->spimem->spi->dev,
1062                  "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1063                  nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1064                  nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1065
1066         return 0;
1067 }
1068
1069 static int spinand_init(struct spinand_device *spinand)
1070 {
1071         struct device *dev = &spinand->spimem->spi->dev;
1072         struct mtd_info *mtd = spinand_to_mtd(spinand);
1073         struct nand_device *nand = mtd_to_nanddev(mtd);
1074         int ret, i;
1075
1076         /*
1077          * We need a scratch buffer because the spi_mem interface requires that
1078          * buf passed in spi_mem_op->data.buf be DMA-able.
1079          */
1080         spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1081         if (!spinand->scratchbuf)
1082                 return -ENOMEM;
1083
1084         ret = spinand_detect(spinand);
1085         if (ret)
1086                 goto err_free_bufs;
1087
1088         /*
1089          * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1090          * may use this buffer for DMA access.
1091          * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1092          */
1093         spinand->databuf = kzalloc(nanddev_page_size(nand) +
1094                                nanddev_per_page_oobsize(nand),
1095                                GFP_KERNEL);
1096         if (!spinand->databuf) {
1097                 ret = -ENOMEM;
1098                 goto err_free_bufs;
1099         }
1100
1101         spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1102
1103         ret = spinand_init_cfg_cache(spinand);
1104         if (ret)
1105                 goto err_free_bufs;
1106
1107         ret = spinand_init_quad_enable(spinand);
1108         if (ret)
1109                 goto err_free_bufs;
1110
1111         ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1112         if (ret)
1113                 goto err_free_bufs;
1114
1115         ret = spinand_manufacturer_init(spinand);
1116         if (ret) {
1117                 dev_err(dev,
1118                         "Failed to initialize the SPI NAND chip (err = %d)\n",
1119                         ret);
1120                 goto err_free_bufs;
1121         }
1122
1123         ret = spinand_create_dirmaps(spinand);
1124         if (ret) {
1125                 dev_err(dev,
1126                         "Failed to create direct mappings for read/write operations (err = %d)\n",
1127                         ret);
1128                 goto err_manuf_cleanup;
1129         }
1130
1131         /* After power up, all blocks are locked, so unlock them here. */
1132         for (i = 0; i < nand->memorg.ntargets; i++) {
1133                 ret = spinand_select_target(spinand, i);
1134                 if (ret)
1135                         goto err_manuf_cleanup;
1136
1137                 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1138                 if (ret)
1139                         goto err_manuf_cleanup;
1140         }
1141
1142         ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1143         if (ret)
1144                 goto err_manuf_cleanup;
1145
1146         /* SPI-NAND default ECC engine is on-die */
1147         nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1148         nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1149
1150         spinand_ecc_enable(spinand, false);
1151         ret = nanddev_ecc_engine_init(nand);
1152         if (ret)
1153                 goto err_cleanup_nanddev;
1154
1155         mtd->_read_oob = spinand_mtd_read;
1156         mtd->_write_oob = spinand_mtd_write;
1157         mtd->_block_isbad = spinand_mtd_block_isbad;
1158         mtd->_block_markbad = spinand_mtd_block_markbad;
1159         mtd->_block_isreserved = spinand_mtd_block_isreserved;
1160         mtd->_erase = spinand_mtd_erase;
1161         mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1162
1163         if (nand->ecc.engine) {
1164                 ret = mtd_ooblayout_count_freebytes(mtd);
1165                 if (ret < 0)
1166                         goto err_cleanup_ecc_engine;
1167         }
1168
1169         mtd->oobavail = ret;
1170
1171         /* Propagate ECC information to mtd_info */
1172         mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1173         mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1174
1175         return 0;
1176
1177 err_cleanup_ecc_engine:
1178         nanddev_ecc_engine_cleanup(nand);
1179
1180 err_cleanup_nanddev:
1181         nanddev_cleanup(nand);
1182
1183 err_manuf_cleanup:
1184         spinand_manufacturer_cleanup(spinand);
1185
1186 err_free_bufs:
1187         kfree(spinand->databuf);
1188         kfree(spinand->scratchbuf);
1189         return ret;
1190 }
1191
1192 static void spinand_cleanup(struct spinand_device *spinand)
1193 {
1194         struct nand_device *nand = spinand_to_nand(spinand);
1195
1196         nanddev_cleanup(nand);
1197         spinand_manufacturer_cleanup(spinand);
1198         kfree(spinand->databuf);
1199         kfree(spinand->scratchbuf);
1200 }
1201
1202 static int spinand_probe(struct spi_mem *mem)
1203 {
1204         struct spinand_device *spinand;
1205         struct mtd_info *mtd;
1206         int ret;
1207
1208         spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1209                                GFP_KERNEL);
1210         if (!spinand)
1211                 return -ENOMEM;
1212
1213         spinand->spimem = mem;
1214         spi_mem_set_drvdata(mem, spinand);
1215         spinand_set_of_node(spinand, mem->spi->dev.of_node);
1216         mutex_init(&spinand->lock);
1217         mtd = spinand_to_mtd(spinand);
1218         mtd->dev.parent = &mem->spi->dev;
1219
1220         ret = spinand_init(spinand);
1221         if (ret)
1222                 return ret;
1223
1224         ret = mtd_device_register(mtd, NULL, 0);
1225         if (ret)
1226                 goto err_spinand_cleanup;
1227
1228         return 0;
1229
1230 err_spinand_cleanup:
1231         spinand_cleanup(spinand);
1232
1233         return ret;
1234 }
1235
1236 static int spinand_remove(struct spi_mem *mem)
1237 {
1238         struct spinand_device *spinand;
1239         struct mtd_info *mtd;
1240         int ret;
1241
1242         spinand = spi_mem_get_drvdata(mem);
1243         mtd = spinand_to_mtd(spinand);
1244
1245         ret = mtd_device_unregister(mtd);
1246         if (ret)
1247                 return ret;
1248
1249         spinand_cleanup(spinand);
1250
1251         return 0;
1252 }
1253
1254 static const struct spi_device_id spinand_ids[] = {
1255         { .name = "spi-nand" },
1256         { /* sentinel */ },
1257 };
1258
1259 #ifdef CONFIG_OF
1260 static const struct of_device_id spinand_of_ids[] = {
1261         { .compatible = "spi-nand" },
1262         { /* sentinel */ },
1263 };
1264 #endif
1265
1266 static struct spi_mem_driver spinand_drv = {
1267         .spidrv = {
1268                 .id_table = spinand_ids,
1269                 .driver = {
1270                         .name = "spi-nand",
1271                         .of_match_table = of_match_ptr(spinand_of_ids),
1272                 },
1273         },
1274         .probe = spinand_probe,
1275         .remove = spinand_remove,
1276 };
1277 module_spi_mem_driver(spinand_drv);
1278
1279 MODULE_DESCRIPTION("SPI NAND framework");
1280 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1281 MODULE_LICENSE("GPL v2");