1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
25 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
29 ret = spi_mem_exec_op(spinand->spimem, &op);
33 *val = *spinand->scratchbuf;
37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
39 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
42 *spinand->scratchbuf = val;
43 return spi_mem_exec_op(spinand->spimem, &op);
46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
48 return spinand_read_reg_op(spinand, REG_STATUS, status);
51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
53 struct nand_device *nand = spinand_to_nand(spinand);
55 if (WARN_ON(spinand->cur_target < 0 ||
56 spinand->cur_target >= nand->memorg.ntargets))
59 *cfg = spinand->cfg_cache[spinand->cur_target];
63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
65 struct nand_device *nand = spinand_to_nand(spinand);
68 if (WARN_ON(spinand->cur_target < 0 ||
69 spinand->cur_target >= nand->memorg.ntargets))
72 if (spinand->cfg_cache[spinand->cur_target] == cfg)
75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
79 spinand->cfg_cache[spinand->cur_target] = cfg;
84 * spinand_upd_cfg() - Update the configuration register
85 * @spinand: the spinand device
86 * @mask: the mask encoding the bits to update in the config reg
87 * @val: the new value to apply
89 * Update the configuration register.
91 * Return: 0 on success, a negative error code otherwise.
93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
98 ret = spinand_get_cfg(spinand, &cfg);
105 return spinand_set_cfg(spinand, cfg);
109 * spinand_select_target() - Select a specific NAND target/die
110 * @spinand: the spinand device
111 * @target: the target/die to select
113 * Select a new target/die. If chip only has one die, this function is a NOOP.
115 * Return: 0 on success, a negative error code otherwise.
117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
119 struct nand_device *nand = spinand_to_nand(spinand);
122 if (WARN_ON(target >= nand->memorg.ntargets))
125 if (spinand->cur_target == target)
128 if (nand->memorg.ntargets == 1) {
129 spinand->cur_target = target;
133 ret = spinand->select_target(spinand, target);
137 spinand->cur_target = target;
141 static int spinand_read_cfg(struct spinand_device *spinand)
143 struct nand_device *nand = spinand_to_nand(spinand);
147 for (target = 0; target < nand->memorg.ntargets; target++) {
148 ret = spinand_select_target(spinand, target);
153 * We use spinand_read_reg_op() instead of spinand_get_cfg()
154 * here to bypass the config cache.
156 ret = spinand_read_reg_op(spinand, REG_CFG,
157 &spinand->cfg_cache[target]);
165 static int spinand_init_cfg_cache(struct spinand_device *spinand)
167 struct nand_device *nand = spinand_to_nand(spinand);
168 struct device *dev = &spinand->spimem->spi->dev;
170 spinand->cfg_cache = devm_kcalloc(dev,
171 nand->memorg.ntargets,
172 sizeof(*spinand->cfg_cache),
174 if (!spinand->cfg_cache)
180 static int spinand_init_quad_enable(struct spinand_device *spinand)
184 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
187 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
188 spinand->op_templates.write_cache->data.buswidth == 4 ||
189 spinand->op_templates.update_cache->data.buswidth == 4)
192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
193 enable ? CFG_QUAD_ENABLE : 0);
196 static int spinand_ecc_enable(struct spinand_device *spinand,
199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
200 enable ? CFG_ECC_ENABLE : 0);
203 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
205 struct nand_device *nand = spinand_to_nand(spinand);
207 if (spinand->eccinfo.get_status)
208 return spinand->eccinfo.get_status(spinand, status);
210 switch (status & STATUS_ECC_MASK) {
211 case STATUS_ECC_NO_BITFLIPS:
214 case STATUS_ECC_HAS_BITFLIPS:
216 * We have no way to know exactly how many bitflips have been
217 * fixed, so let's return the maximum possible value so that
218 * wear-leveling layers move the data immediately.
220 return nanddev_get_ecc_conf(nand)->strength;
222 case STATUS_ECC_UNCOR_ERROR:
232 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
233 struct mtd_oob_region *region)
238 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
239 struct mtd_oob_region *region)
244 /* Reserve 2 bytes for the BBM. */
251 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
252 .ecc = spinand_noecc_ooblayout_ecc,
253 .free = spinand_noecc_ooblayout_free,
256 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
258 struct spinand_device *spinand = nand_to_spinand(nand);
259 struct mtd_info *mtd = nanddev_to_mtd(nand);
260 struct spinand_ondie_ecc_conf *engine_conf;
262 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
263 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
264 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
266 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
270 nand->ecc.ctx.priv = engine_conf;
272 if (spinand->eccinfo.ooblayout)
273 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
275 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
280 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
282 kfree(nand->ecc.ctx.priv);
285 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
286 struct nand_page_io_req *req)
288 struct spinand_device *spinand = nand_to_spinand(nand);
289 bool enable = (req->mode != MTD_OPS_RAW);
291 /* Only enable or disable the engine */
292 return spinand_ecc_enable(spinand, enable);
295 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
296 struct nand_page_io_req *req)
298 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
299 struct spinand_device *spinand = nand_to_spinand(nand);
300 struct mtd_info *mtd = spinand_to_mtd(spinand);
303 if (req->mode == MTD_OPS_RAW)
306 /* Nothing to do when finishing a page write */
307 if (req->type == NAND_PAGE_WRITE)
310 /* Finish a page write: check the status, report errors/bitflips */
311 ret = spinand_check_ecc_status(spinand, engine_conf->status);
313 mtd->ecc_stats.failed++;
315 mtd->ecc_stats.corrected += ret;
320 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
321 .init_ctx = spinand_ondie_ecc_init_ctx,
322 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
323 .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
324 .finish_io_req = spinand_ondie_ecc_finish_io_req,
327 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
328 .ops = &spinand_ondie_ecc_engine_ops,
331 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
333 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
335 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
337 engine_conf->status = status;
340 static int spinand_write_enable_op(struct spinand_device *spinand)
342 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
344 return spi_mem_exec_op(spinand->spimem, &op);
347 static int spinand_load_page_op(struct spinand_device *spinand,
348 const struct nand_page_io_req *req)
350 struct nand_device *nand = spinand_to_nand(spinand);
351 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
352 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
354 return spi_mem_exec_op(spinand->spimem, &op);
357 static int spinand_read_from_cache_op(struct spinand_device *spinand,
358 const struct nand_page_io_req *req)
360 struct nand_device *nand = spinand_to_nand(spinand);
361 struct mtd_info *mtd = spinand_to_mtd(spinand);
362 struct spi_mem_dirmap_desc *rdesc;
363 unsigned int nbytes = 0;
369 buf = spinand->databuf;
370 nbytes = nanddev_page_size(nand);
375 nbytes += nanddev_per_page_oobsize(nand);
377 buf = spinand->oobbuf;
378 column = nanddev_page_size(nand);
382 rdesc = spinand->dirmaps[req->pos.plane].rdesc;
385 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
389 if (!ret || ret > nbytes)
398 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
402 if (req->mode == MTD_OPS_AUTO_OOB)
403 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
408 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
415 static int spinand_write_to_cache_op(struct spinand_device *spinand,
416 const struct nand_page_io_req *req)
418 struct nand_device *nand = spinand_to_nand(spinand);
419 struct mtd_info *mtd = spinand_to_mtd(spinand);
420 struct spi_mem_dirmap_desc *wdesc;
421 unsigned int nbytes, column = 0;
422 void *buf = spinand->databuf;
426 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
427 * the cache content to 0xFF (depends on vendor implementation), so we
428 * must fill the page cache entirely even if we only want to program
429 * the data portion of the page, otherwise we might corrupt the BBM or
430 * user data previously programmed in OOB area.
432 * Only reset the data buffer manually, the OOB buffer is prepared by
433 * ECC engines ->prepare_io_req() callback.
435 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
436 memset(spinand->databuf, 0xff, nanddev_page_size(nand));
439 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
443 if (req->mode == MTD_OPS_AUTO_OOB)
444 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
449 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
453 wdesc = spinand->dirmaps[req->pos.plane].wdesc;
456 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
460 if (!ret || ret > nbytes)
471 static int spinand_program_op(struct spinand_device *spinand,
472 const struct nand_page_io_req *req)
474 struct nand_device *nand = spinand_to_nand(spinand);
475 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
476 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
478 return spi_mem_exec_op(spinand->spimem, &op);
481 static int spinand_erase_op(struct spinand_device *spinand,
482 const struct nand_pos *pos)
484 struct nand_device *nand = spinand_to_nand(spinand);
485 unsigned int row = nanddev_pos_to_row(nand, pos);
486 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
488 return spi_mem_exec_op(spinand->spimem, &op);
491 static int spinand_wait(struct spinand_device *spinand,
492 unsigned long initial_delay_us,
493 unsigned long poll_delay_us,
496 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
497 spinand->scratchbuf);
501 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
504 SPINAND_WAITRDY_TIMEOUT_MS);
508 status = *spinand->scratchbuf;
509 if (!(status & STATUS_BUSY))
513 * Extra read, just in case the STATUS_READY bit has changed
514 * since our last check
516 ret = spinand_read_status(spinand, &status);
524 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
527 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
530 struct spi_mem_op op = SPINAND_READID_OP(
531 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
534 ret = spi_mem_exec_op(spinand->spimem, &op);
536 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
541 static int spinand_reset_op(struct spinand_device *spinand)
543 struct spi_mem_op op = SPINAND_RESET_OP;
546 ret = spi_mem_exec_op(spinand->spimem, &op);
550 return spinand_wait(spinand,
551 SPINAND_RESET_INITIAL_DELAY_US,
552 SPINAND_RESET_POLL_DELAY_US,
556 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
558 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
561 static int spinand_read_page(struct spinand_device *spinand,
562 const struct nand_page_io_req *req)
564 struct nand_device *nand = spinand_to_nand(spinand);
568 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
572 ret = spinand_load_page_op(spinand, req);
576 ret = spinand_wait(spinand,
577 SPINAND_READ_INITIAL_DELAY_US,
578 SPINAND_READ_POLL_DELAY_US,
583 spinand_ondie_ecc_save_status(nand, status);
585 ret = spinand_read_from_cache_op(spinand, req);
589 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
592 static int spinand_write_page(struct spinand_device *spinand,
593 const struct nand_page_io_req *req)
595 struct nand_device *nand = spinand_to_nand(spinand);
599 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
603 ret = spinand_write_enable_op(spinand);
607 ret = spinand_write_to_cache_op(spinand, req);
611 ret = spinand_program_op(spinand, req);
615 ret = spinand_wait(spinand,
616 SPINAND_WRITE_INITIAL_DELAY_US,
617 SPINAND_WRITE_POLL_DELAY_US,
619 if (!ret && (status & STATUS_PROG_FAILED))
622 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
625 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
626 struct mtd_oob_ops *ops)
628 struct spinand_device *spinand = mtd_to_spinand(mtd);
629 struct nand_device *nand = mtd_to_nanddev(mtd);
630 unsigned int max_bitflips = 0;
631 struct nand_io_iter iter;
632 bool disable_ecc = false;
633 bool ecc_failed = false;
636 if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
639 mutex_lock(&spinand->lock);
641 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
643 iter.req.mode = MTD_OPS_RAW;
645 ret = spinand_select_target(spinand, iter.req.pos.target);
649 ret = spinand_read_page(spinand, &iter.req);
650 if (ret < 0 && ret != -EBADMSG)
656 max_bitflips = max_t(unsigned int, max_bitflips, ret);
659 ops->retlen += iter.req.datalen;
660 ops->oobretlen += iter.req.ooblen;
663 mutex_unlock(&spinand->lock);
665 if (ecc_failed && !ret)
668 return ret ? ret : max_bitflips;
671 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
672 struct mtd_oob_ops *ops)
674 struct spinand_device *spinand = mtd_to_spinand(mtd);
675 struct nand_device *nand = mtd_to_nanddev(mtd);
676 struct nand_io_iter iter;
677 bool disable_ecc = false;
680 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
683 mutex_lock(&spinand->lock);
685 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
687 iter.req.mode = MTD_OPS_RAW;
689 ret = spinand_select_target(spinand, iter.req.pos.target);
693 ret = spinand_write_page(spinand, &iter.req);
697 ops->retlen += iter.req.datalen;
698 ops->oobretlen += iter.req.ooblen;
701 mutex_unlock(&spinand->lock);
706 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
708 struct spinand_device *spinand = nand_to_spinand(nand);
710 struct nand_page_io_req req = {
712 .ooblen = sizeof(marker),
718 spinand_select_target(spinand, pos->target);
719 spinand_read_page(spinand, &req);
720 if (marker[0] != 0xff || marker[1] != 0xff)
726 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
728 struct nand_device *nand = mtd_to_nanddev(mtd);
729 struct spinand_device *spinand = nand_to_spinand(nand);
733 nanddev_offs_to_pos(nand, offs, &pos);
734 mutex_lock(&spinand->lock);
735 ret = nanddev_isbad(nand, &pos);
736 mutex_unlock(&spinand->lock);
741 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
743 struct spinand_device *spinand = nand_to_spinand(nand);
745 struct nand_page_io_req req = {
748 .ooblen = sizeof(marker),
749 .oobbuf.out = marker,
754 ret = spinand_select_target(spinand, pos->target);
758 ret = spinand_write_enable_op(spinand);
762 return spinand_write_page(spinand, &req);
765 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
767 struct nand_device *nand = mtd_to_nanddev(mtd);
768 struct spinand_device *spinand = nand_to_spinand(nand);
772 nanddev_offs_to_pos(nand, offs, &pos);
773 mutex_lock(&spinand->lock);
774 ret = nanddev_markbad(nand, &pos);
775 mutex_unlock(&spinand->lock);
780 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
782 struct spinand_device *spinand = nand_to_spinand(nand);
786 ret = spinand_select_target(spinand, pos->target);
790 ret = spinand_write_enable_op(spinand);
794 ret = spinand_erase_op(spinand, pos);
798 ret = spinand_wait(spinand,
799 SPINAND_ERASE_INITIAL_DELAY_US,
800 SPINAND_ERASE_POLL_DELAY_US,
803 if (!ret && (status & STATUS_ERASE_FAILED))
809 static int spinand_mtd_erase(struct mtd_info *mtd,
810 struct erase_info *einfo)
812 struct spinand_device *spinand = mtd_to_spinand(mtd);
815 mutex_lock(&spinand->lock);
816 ret = nanddev_mtd_erase(mtd, einfo);
817 mutex_unlock(&spinand->lock);
822 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
824 struct spinand_device *spinand = mtd_to_spinand(mtd);
825 struct nand_device *nand = mtd_to_nanddev(mtd);
829 nanddev_offs_to_pos(nand, offs, &pos);
830 mutex_lock(&spinand->lock);
831 ret = nanddev_isreserved(nand, &pos);
832 mutex_unlock(&spinand->lock);
837 static int spinand_create_dirmap(struct spinand_device *spinand,
840 struct nand_device *nand = spinand_to_nand(spinand);
841 struct spi_mem_dirmap_info info = {
842 .length = nanddev_page_size(nand) +
843 nanddev_per_page_oobsize(nand),
845 struct spi_mem_dirmap_desc *desc;
847 /* The plane number is passed in MSB just above the column address */
848 info.offset = plane << fls(nand->memorg.pagesize);
850 info.op_tmpl = *spinand->op_templates.update_cache;
851 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
852 spinand->spimem, &info);
854 return PTR_ERR(desc);
856 spinand->dirmaps[plane].wdesc = desc;
858 info.op_tmpl = *spinand->op_templates.read_cache;
859 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
860 spinand->spimem, &info);
862 return PTR_ERR(desc);
864 spinand->dirmaps[plane].rdesc = desc;
869 static int spinand_create_dirmaps(struct spinand_device *spinand)
871 struct nand_device *nand = spinand_to_nand(spinand);
874 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
875 sizeof(*spinand->dirmaps) *
876 nand->memorg.planes_per_lun,
878 if (!spinand->dirmaps)
881 for (i = 0; i < nand->memorg.planes_per_lun; i++) {
882 ret = spinand_create_dirmap(spinand, i);
890 static const struct nand_ops spinand_ops = {
891 .erase = spinand_erase,
892 .markbad = spinand_markbad,
893 .isbad = spinand_isbad,
896 static const struct spinand_manufacturer *spinand_manufacturers[] = {
897 &gigadevice_spinand_manufacturer,
898 ¯onix_spinand_manufacturer,
899 µn_spinand_manufacturer,
900 ¶gon_spinand_manufacturer,
901 &toshiba_spinand_manufacturer,
902 &winbond_spinand_manufacturer,
905 static int spinand_manufacturer_match(struct spinand_device *spinand,
906 enum spinand_readid_method rdid_method)
908 u8 *id = spinand->id.data;
912 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
913 const struct spinand_manufacturer *manufacturer =
914 spinand_manufacturers[i];
916 if (id[0] != manufacturer->id)
919 ret = spinand_match_and_init(spinand,
921 manufacturer->nchips,
926 spinand->manufacturer = manufacturer;
932 static int spinand_id_detect(struct spinand_device *spinand)
934 u8 *id = spinand->id.data;
937 ret = spinand_read_id_op(spinand, 0, 0, id);
940 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
944 ret = spinand_read_id_op(spinand, 1, 0, id);
947 ret = spinand_manufacturer_match(spinand,
948 SPINAND_READID_METHOD_OPCODE_ADDR);
952 ret = spinand_read_id_op(spinand, 0, 1, id);
955 ret = spinand_manufacturer_match(spinand,
956 SPINAND_READID_METHOD_OPCODE_DUMMY);
961 static int spinand_manufacturer_init(struct spinand_device *spinand)
963 if (spinand->manufacturer->ops->init)
964 return spinand->manufacturer->ops->init(spinand);
969 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
971 /* Release manufacturer private data */
972 if (spinand->manufacturer->ops->cleanup)
973 return spinand->manufacturer->ops->cleanup(spinand);
976 static const struct spi_mem_op *
977 spinand_select_op_variant(struct spinand_device *spinand,
978 const struct spinand_op_variants *variants)
980 struct nand_device *nand = spinand_to_nand(spinand);
983 for (i = 0; i < variants->nops; i++) {
984 struct spi_mem_op op = variants->ops[i];
988 nbytes = nanddev_per_page_oobsize(nand) +
989 nanddev_page_size(nand);
992 op.data.nbytes = nbytes;
993 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
997 if (!spi_mem_supports_op(spinand->spimem, &op))
1000 nbytes -= op.data.nbytes;
1004 return &variants->ops[i];
1011 * spinand_match_and_init() - Try to find a match between a device ID and an
1012 * entry in a spinand_info table
1013 * @spinand: SPI NAND object
1014 * @table: SPI NAND device description table
1015 * @table_size: size of the device description table
1016 * @rdid_method: read id method to match
1018 * Match between a device ID retrieved through the READ_ID command and an
1019 * entry in the SPI NAND description table. If a match is found, the spinand
1020 * object will be initialized with information provided by the matching
1021 * spinand_info entry.
1023 * Return: 0 on success, a negative error code otherwise.
1025 int spinand_match_and_init(struct spinand_device *spinand,
1026 const struct spinand_info *table,
1027 unsigned int table_size,
1028 enum spinand_readid_method rdid_method)
1030 u8 *id = spinand->id.data;
1031 struct nand_device *nand = spinand_to_nand(spinand);
1034 for (i = 0; i < table_size; i++) {
1035 const struct spinand_info *info = &table[i];
1036 const struct spi_mem_op *op;
1038 if (rdid_method != info->devid.method)
1041 if (memcmp(id + 1, info->devid.id, info->devid.len))
1044 nand->memorg = table[i].memorg;
1045 nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1046 spinand->eccinfo = table[i].eccinfo;
1047 spinand->flags = table[i].flags;
1048 spinand->id.len = 1 + table[i].devid.len;
1049 spinand->select_target = table[i].select_target;
1051 op = spinand_select_op_variant(spinand,
1052 info->op_variants.read_cache);
1056 spinand->op_templates.read_cache = op;
1058 op = spinand_select_op_variant(spinand,
1059 info->op_variants.write_cache);
1063 spinand->op_templates.write_cache = op;
1065 op = spinand_select_op_variant(spinand,
1066 info->op_variants.update_cache);
1067 spinand->op_templates.update_cache = op;
1075 static int spinand_detect(struct spinand_device *spinand)
1077 struct device *dev = &spinand->spimem->spi->dev;
1078 struct nand_device *nand = spinand_to_nand(spinand);
1081 ret = spinand_reset_op(spinand);
1085 ret = spinand_id_detect(spinand);
1087 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1092 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1094 "SPI NANDs with more than one die must implement ->select_target()\n");
1098 dev_info(&spinand->spimem->spi->dev,
1099 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1100 dev_info(&spinand->spimem->spi->dev,
1101 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1102 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1103 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1108 static int spinand_init_flash(struct spinand_device *spinand)
1110 struct device *dev = &spinand->spimem->spi->dev;
1111 struct nand_device *nand = spinand_to_nand(spinand);
1114 ret = spinand_read_cfg(spinand);
1118 ret = spinand_init_quad_enable(spinand);
1122 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1126 ret = spinand_manufacturer_init(spinand);
1129 "Failed to initialize the SPI NAND chip (err = %d)\n",
1134 /* After power up, all blocks are locked, so unlock them here. */
1135 for (i = 0; i < nand->memorg.ntargets; i++) {
1136 ret = spinand_select_target(spinand, i);
1140 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1146 spinand_manufacturer_cleanup(spinand);
1151 static void spinand_mtd_resume(struct mtd_info *mtd)
1153 struct spinand_device *spinand = mtd_to_spinand(mtd);
1156 ret = spinand_reset_op(spinand);
1160 ret = spinand_init_flash(spinand);
1164 spinand_ecc_enable(spinand, false);
1167 static int spinand_init(struct spinand_device *spinand)
1169 struct device *dev = &spinand->spimem->spi->dev;
1170 struct mtd_info *mtd = spinand_to_mtd(spinand);
1171 struct nand_device *nand = mtd_to_nanddev(mtd);
1175 * We need a scratch buffer because the spi_mem interface requires that
1176 * buf passed in spi_mem_op->data.buf be DMA-able.
1178 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1179 if (!spinand->scratchbuf)
1182 ret = spinand_detect(spinand);
1187 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1188 * may use this buffer for DMA access.
1189 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1191 spinand->databuf = kzalloc(nanddev_page_size(nand) +
1192 nanddev_per_page_oobsize(nand),
1194 if (!spinand->databuf) {
1199 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1201 ret = spinand_init_cfg_cache(spinand);
1205 ret = spinand_init_flash(spinand);
1209 ret = spinand_create_dirmaps(spinand);
1212 "Failed to create direct mappings for read/write operations (err = %d)\n",
1214 goto err_manuf_cleanup;
1217 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1219 goto err_manuf_cleanup;
1221 /* SPI-NAND default ECC engine is on-die */
1222 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1223 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1225 spinand_ecc_enable(spinand, false);
1226 ret = nanddev_ecc_engine_init(nand);
1228 goto err_cleanup_nanddev;
1230 mtd->_read_oob = spinand_mtd_read;
1231 mtd->_write_oob = spinand_mtd_write;
1232 mtd->_block_isbad = spinand_mtd_block_isbad;
1233 mtd->_block_markbad = spinand_mtd_block_markbad;
1234 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1235 mtd->_erase = spinand_mtd_erase;
1236 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1237 mtd->_resume = spinand_mtd_resume;
1239 if (nand->ecc.engine) {
1240 ret = mtd_ooblayout_count_freebytes(mtd);
1242 goto err_cleanup_ecc_engine;
1245 mtd->oobavail = ret;
1247 /* Propagate ECC information to mtd_info */
1248 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1249 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1253 err_cleanup_ecc_engine:
1254 nanddev_ecc_engine_cleanup(nand);
1256 err_cleanup_nanddev:
1257 nanddev_cleanup(nand);
1260 spinand_manufacturer_cleanup(spinand);
1263 kfree(spinand->databuf);
1264 kfree(spinand->scratchbuf);
1268 static void spinand_cleanup(struct spinand_device *spinand)
1270 struct nand_device *nand = spinand_to_nand(spinand);
1272 nanddev_cleanup(nand);
1273 spinand_manufacturer_cleanup(spinand);
1274 kfree(spinand->databuf);
1275 kfree(spinand->scratchbuf);
1278 static int spinand_probe(struct spi_mem *mem)
1280 struct spinand_device *spinand;
1281 struct mtd_info *mtd;
1284 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1289 spinand->spimem = mem;
1290 spi_mem_set_drvdata(mem, spinand);
1291 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1292 mutex_init(&spinand->lock);
1293 mtd = spinand_to_mtd(spinand);
1294 mtd->dev.parent = &mem->spi->dev;
1296 ret = spinand_init(spinand);
1300 ret = mtd_device_register(mtd, NULL, 0);
1302 goto err_spinand_cleanup;
1306 err_spinand_cleanup:
1307 spinand_cleanup(spinand);
1312 static int spinand_remove(struct spi_mem *mem)
1314 struct spinand_device *spinand;
1315 struct mtd_info *mtd;
1318 spinand = spi_mem_get_drvdata(mem);
1319 mtd = spinand_to_mtd(spinand);
1321 ret = mtd_device_unregister(mtd);
1325 spinand_cleanup(spinand);
1330 static const struct spi_device_id spinand_ids[] = {
1331 { .name = "spi-nand" },
1334 MODULE_DEVICE_TABLE(spi, spinand_ids);
1337 static const struct of_device_id spinand_of_ids[] = {
1338 { .compatible = "spi-nand" },
1341 MODULE_DEVICE_TABLE(of, spinand_of_ids);
1344 static struct spi_mem_driver spinand_drv = {
1346 .id_table = spinand_ids,
1349 .of_match_table = of_match_ptr(spinand_of_ids),
1352 .probe = spinand_probe,
1353 .remove = spinand_remove,
1355 module_spi_mem_driver(spinand_drv);
1357 MODULE_DESCRIPTION("SPI NAND framework");
1358 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1359 MODULE_LICENSE("GPL v2");