1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
25 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
29 ret = spi_mem_exec_op(spinand->spimem, &op);
33 *val = *spinand->scratchbuf;
37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
39 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
42 *spinand->scratchbuf = val;
43 return spi_mem_exec_op(spinand->spimem, &op);
46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
48 return spinand_read_reg_op(spinand, REG_STATUS, status);
51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
53 struct nand_device *nand = spinand_to_nand(spinand);
55 if (WARN_ON(spinand->cur_target < 0 ||
56 spinand->cur_target >= nand->memorg.ntargets))
59 *cfg = spinand->cfg_cache[spinand->cur_target];
63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
65 struct nand_device *nand = spinand_to_nand(spinand);
68 if (WARN_ON(spinand->cur_target < 0 ||
69 spinand->cur_target >= nand->memorg.ntargets))
72 if (spinand->cfg_cache[spinand->cur_target] == cfg)
75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
79 spinand->cfg_cache[spinand->cur_target] = cfg;
84 * spinand_upd_cfg() - Update the configuration register
85 * @spinand: the spinand device
86 * @mask: the mask encoding the bits to update in the config reg
87 * @val: the new value to apply
89 * Update the configuration register.
91 * Return: 0 on success, a negative error code otherwise.
93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
98 ret = spinand_get_cfg(spinand, &cfg);
105 return spinand_set_cfg(spinand, cfg);
109 * spinand_select_target() - Select a specific NAND target/die
110 * @spinand: the spinand device
111 * @target: the target/die to select
113 * Select a new target/die. If chip only has one die, this function is a NOOP.
115 * Return: 0 on success, a negative error code otherwise.
117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
119 struct nand_device *nand = spinand_to_nand(spinand);
122 if (WARN_ON(target >= nand->memorg.ntargets))
125 if (spinand->cur_target == target)
128 if (nand->memorg.ntargets == 1) {
129 spinand->cur_target = target;
133 ret = spinand->select_target(spinand, target);
137 spinand->cur_target = target;
141 static int spinand_init_cfg_cache(struct spinand_device *spinand)
143 struct nand_device *nand = spinand_to_nand(spinand);
144 struct device *dev = &spinand->spimem->spi->dev;
148 spinand->cfg_cache = devm_kcalloc(dev,
149 nand->memorg.ntargets,
150 sizeof(*spinand->cfg_cache),
152 if (!spinand->cfg_cache)
155 for (target = 0; target < nand->memorg.ntargets; target++) {
156 ret = spinand_select_target(spinand, target);
161 * We use spinand_read_reg_op() instead of spinand_get_cfg()
162 * here to bypass the config cache.
164 ret = spinand_read_reg_op(spinand, REG_CFG,
165 &spinand->cfg_cache[target]);
173 static int spinand_init_quad_enable(struct spinand_device *spinand)
177 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
180 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
181 spinand->op_templates.write_cache->data.buswidth == 4 ||
182 spinand->op_templates.update_cache->data.buswidth == 4)
185 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
186 enable ? CFG_QUAD_ENABLE : 0);
189 static int spinand_ecc_enable(struct spinand_device *spinand,
192 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
193 enable ? CFG_ECC_ENABLE : 0);
196 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
198 struct nand_device *nand = spinand_to_nand(spinand);
200 if (spinand->eccinfo.get_status)
201 return spinand->eccinfo.get_status(spinand, status);
203 switch (status & STATUS_ECC_MASK) {
204 case STATUS_ECC_NO_BITFLIPS:
207 case STATUS_ECC_HAS_BITFLIPS:
209 * We have no way to know exactly how many bitflips have been
210 * fixed, so let's return the maximum possible value so that
211 * wear-leveling layers move the data immediately.
213 return nanddev_get_ecc_conf(nand)->strength;
215 case STATUS_ECC_UNCOR_ERROR:
225 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
226 struct mtd_oob_region *region)
231 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
232 struct mtd_oob_region *region)
237 /* Reserve 2 bytes for the BBM. */
244 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
245 .ecc = spinand_noecc_ooblayout_ecc,
246 .free = spinand_noecc_ooblayout_free,
249 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
251 struct spinand_device *spinand = nand_to_spinand(nand);
252 struct mtd_info *mtd = nanddev_to_mtd(nand);
253 struct spinand_ondie_ecc_conf *engine_conf;
255 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
256 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
257 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
259 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
263 nand->ecc.ctx.priv = engine_conf;
265 if (spinand->eccinfo.ooblayout)
266 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
268 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
273 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
275 kfree(nand->ecc.ctx.priv);
278 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
279 struct nand_page_io_req *req)
281 struct spinand_device *spinand = nand_to_spinand(nand);
282 bool enable = (req->mode != MTD_OPS_RAW);
284 /* Only enable or disable the engine */
285 return spinand_ecc_enable(spinand, enable);
288 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
289 struct nand_page_io_req *req)
291 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
292 struct spinand_device *spinand = nand_to_spinand(nand);
294 if (req->mode == MTD_OPS_RAW)
297 /* Nothing to do when finishing a page write */
298 if (req->type == NAND_PAGE_WRITE)
301 /* Finish a page write: check the status, report errors/bitflips */
302 return spinand_check_ecc_status(spinand, engine_conf->status);
305 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
306 .init_ctx = spinand_ondie_ecc_init_ctx,
307 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
308 .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
309 .finish_io_req = spinand_ondie_ecc_finish_io_req,
312 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
313 .ops = &spinand_ondie_ecc_engine_ops,
316 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
318 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
320 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
322 engine_conf->status = status;
325 static int spinand_write_enable_op(struct spinand_device *spinand)
327 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
329 return spi_mem_exec_op(spinand->spimem, &op);
332 static int spinand_load_page_op(struct spinand_device *spinand,
333 const struct nand_page_io_req *req)
335 struct nand_device *nand = spinand_to_nand(spinand);
336 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
337 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
339 return spi_mem_exec_op(spinand->spimem, &op);
342 static int spinand_read_from_cache_op(struct spinand_device *spinand,
343 const struct nand_page_io_req *req)
345 struct nand_device *nand = spinand_to_nand(spinand);
346 struct spi_mem_dirmap_desc *rdesc;
347 unsigned int nbytes = 0;
353 buf = spinand->databuf;
354 nbytes = nanddev_page_size(nand);
359 nbytes += nanddev_per_page_oobsize(nand);
361 buf = spinand->oobbuf;
362 column = nanddev_page_size(nand);
366 rdesc = spinand->dirmaps[req->pos.plane].rdesc;
369 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
373 if (!ret || ret > nbytes)
382 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
386 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
392 static int spinand_write_to_cache_op(struct spinand_device *spinand,
393 const struct nand_page_io_req *req)
395 struct nand_device *nand = spinand_to_nand(spinand);
396 struct mtd_info *mtd = spinand_to_mtd(spinand);
397 struct spi_mem_dirmap_desc *wdesc;
398 unsigned int nbytes, column = 0;
399 void *buf = spinand->databuf;
403 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
404 * the cache content to 0xFF (depends on vendor implementation), so we
405 * must fill the page cache entirely even if we only want to program
406 * the data portion of the page, otherwise we might corrupt the BBM or
407 * user data previously programmed in OOB area.
409 * Only reset the data buffer manually, the OOB buffer is prepared by
410 * ECC engines ->prepare_io_req() callback.
412 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
413 memset(spinand->databuf, 0xff, nanddev_page_size(nand));
416 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
420 if (req->mode == MTD_OPS_AUTO_OOB)
421 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
426 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
430 wdesc = spinand->dirmaps[req->pos.plane].wdesc;
433 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
437 if (!ret || ret > nbytes)
448 static int spinand_program_op(struct spinand_device *spinand,
449 const struct nand_page_io_req *req)
451 struct nand_device *nand = spinand_to_nand(spinand);
452 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
453 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
455 return spi_mem_exec_op(spinand->spimem, &op);
458 static int spinand_erase_op(struct spinand_device *spinand,
459 const struct nand_pos *pos)
461 struct nand_device *nand = spinand_to_nand(spinand);
462 unsigned int row = nanddev_pos_to_row(nand, pos);
463 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
465 return spi_mem_exec_op(spinand->spimem, &op);
468 static int spinand_wait(struct spinand_device *spinand, u8 *s)
470 unsigned long timeo = jiffies + msecs_to_jiffies(400);
475 ret = spinand_read_status(spinand, &status);
479 if (!(status & STATUS_BUSY))
481 } while (time_before(jiffies, timeo));
484 * Extra read, just in case the STATUS_READY bit has changed
485 * since our last check
487 ret = spinand_read_status(spinand, &status);
495 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
498 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
501 struct spi_mem_op op = SPINAND_READID_OP(
502 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
505 ret = spi_mem_exec_op(spinand->spimem, &op);
507 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
512 static int spinand_reset_op(struct spinand_device *spinand)
514 struct spi_mem_op op = SPINAND_RESET_OP;
517 ret = spi_mem_exec_op(spinand->spimem, &op);
521 return spinand_wait(spinand, NULL);
524 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
526 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
529 static int spinand_read_page(struct spinand_device *spinand,
530 const struct nand_page_io_req *req)
532 struct nand_device *nand = spinand_to_nand(spinand);
536 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
540 ret = spinand_load_page_op(spinand, req);
544 ret = spinand_wait(spinand, &status);
548 spinand_ondie_ecc_save_status(nand, status);
550 ret = spinand_read_from_cache_op(spinand, req);
554 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
557 static int spinand_write_page(struct spinand_device *spinand,
558 const struct nand_page_io_req *req)
560 struct nand_device *nand = spinand_to_nand(spinand);
564 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
568 ret = spinand_write_enable_op(spinand);
572 ret = spinand_write_to_cache_op(spinand, req);
576 ret = spinand_program_op(spinand, req);
580 ret = spinand_wait(spinand, &status);
581 if (!ret && (status & STATUS_PROG_FAILED))
584 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
587 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
588 struct mtd_oob_ops *ops)
590 struct spinand_device *spinand = mtd_to_spinand(mtd);
591 struct nand_device *nand = mtd_to_nanddev(mtd);
592 unsigned int max_bitflips = 0;
593 struct nand_io_iter iter;
594 bool disable_ecc = false;
595 bool ecc_failed = false;
598 if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
601 mutex_lock(&spinand->lock);
603 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
605 iter.req.mode = MTD_OPS_RAW;
607 ret = spinand_select_target(spinand, iter.req.pos.target);
611 ret = spinand_read_page(spinand, &iter.req);
612 if (ret < 0 && ret != -EBADMSG)
615 if (ret == -EBADMSG) {
617 mtd->ecc_stats.failed++;
619 mtd->ecc_stats.corrected += ret;
620 max_bitflips = max_t(unsigned int, max_bitflips, ret);
624 ops->retlen += iter.req.datalen;
625 ops->oobretlen += iter.req.ooblen;
628 mutex_unlock(&spinand->lock);
630 if (ecc_failed && !ret)
633 return ret ? ret : max_bitflips;
636 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
637 struct mtd_oob_ops *ops)
639 struct spinand_device *spinand = mtd_to_spinand(mtd);
640 struct nand_device *nand = mtd_to_nanddev(mtd);
641 struct nand_io_iter iter;
642 bool disable_ecc = false;
645 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
648 mutex_lock(&spinand->lock);
650 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
652 iter.req.mode = MTD_OPS_RAW;
654 ret = spinand_select_target(spinand, iter.req.pos.target);
658 ret = spinand_write_page(spinand, &iter.req);
662 ops->retlen += iter.req.datalen;
663 ops->oobretlen += iter.req.ooblen;
666 mutex_unlock(&spinand->lock);
671 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
673 struct spinand_device *spinand = nand_to_spinand(nand);
675 struct nand_page_io_req req = {
677 .ooblen = sizeof(marker),
683 spinand_select_target(spinand, pos->target);
684 spinand_read_page(spinand, &req);
685 if (marker[0] != 0xff || marker[1] != 0xff)
691 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
693 struct nand_device *nand = mtd_to_nanddev(mtd);
694 struct spinand_device *spinand = nand_to_spinand(nand);
698 nanddev_offs_to_pos(nand, offs, &pos);
699 mutex_lock(&spinand->lock);
700 ret = nanddev_isbad(nand, &pos);
701 mutex_unlock(&spinand->lock);
706 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
708 struct spinand_device *spinand = nand_to_spinand(nand);
710 struct nand_page_io_req req = {
713 .ooblen = sizeof(marker),
714 .oobbuf.out = marker,
719 ret = spinand_select_target(spinand, pos->target);
723 ret = spinand_write_enable_op(spinand);
727 return spinand_write_page(spinand, &req);
730 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
732 struct nand_device *nand = mtd_to_nanddev(mtd);
733 struct spinand_device *spinand = nand_to_spinand(nand);
737 nanddev_offs_to_pos(nand, offs, &pos);
738 mutex_lock(&spinand->lock);
739 ret = nanddev_markbad(nand, &pos);
740 mutex_unlock(&spinand->lock);
745 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
747 struct spinand_device *spinand = nand_to_spinand(nand);
751 ret = spinand_select_target(spinand, pos->target);
755 ret = spinand_write_enable_op(spinand);
759 ret = spinand_erase_op(spinand, pos);
763 ret = spinand_wait(spinand, &status);
764 if (!ret && (status & STATUS_ERASE_FAILED))
770 static int spinand_mtd_erase(struct mtd_info *mtd,
771 struct erase_info *einfo)
773 struct spinand_device *spinand = mtd_to_spinand(mtd);
776 mutex_lock(&spinand->lock);
777 ret = nanddev_mtd_erase(mtd, einfo);
778 mutex_unlock(&spinand->lock);
783 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
785 struct spinand_device *spinand = mtd_to_spinand(mtd);
786 struct nand_device *nand = mtd_to_nanddev(mtd);
790 nanddev_offs_to_pos(nand, offs, &pos);
791 mutex_lock(&spinand->lock);
792 ret = nanddev_isreserved(nand, &pos);
793 mutex_unlock(&spinand->lock);
798 static int spinand_create_dirmap(struct spinand_device *spinand,
801 struct nand_device *nand = spinand_to_nand(spinand);
802 struct spi_mem_dirmap_info info = {
803 .length = nanddev_page_size(nand) +
804 nanddev_per_page_oobsize(nand),
806 struct spi_mem_dirmap_desc *desc;
808 /* The plane number is passed in MSB just above the column address */
809 info.offset = plane << fls(nand->memorg.pagesize);
811 info.op_tmpl = *spinand->op_templates.update_cache;
812 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
813 spinand->spimem, &info);
815 return PTR_ERR(desc);
817 spinand->dirmaps[plane].wdesc = desc;
819 info.op_tmpl = *spinand->op_templates.read_cache;
820 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
821 spinand->spimem, &info);
823 return PTR_ERR(desc);
825 spinand->dirmaps[plane].rdesc = desc;
830 static int spinand_create_dirmaps(struct spinand_device *spinand)
832 struct nand_device *nand = spinand_to_nand(spinand);
835 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
836 sizeof(*spinand->dirmaps) *
837 nand->memorg.planes_per_lun,
839 if (!spinand->dirmaps)
842 for (i = 0; i < nand->memorg.planes_per_lun; i++) {
843 ret = spinand_create_dirmap(spinand, i);
851 static const struct nand_ops spinand_ops = {
852 .erase = spinand_erase,
853 .markbad = spinand_markbad,
854 .isbad = spinand_isbad,
857 static const struct spinand_manufacturer *spinand_manufacturers[] = {
858 &gigadevice_spinand_manufacturer,
859 ¯onix_spinand_manufacturer,
860 µn_spinand_manufacturer,
861 ¶gon_spinand_manufacturer,
862 &toshiba_spinand_manufacturer,
863 &winbond_spinand_manufacturer,
866 static int spinand_manufacturer_match(struct spinand_device *spinand,
867 enum spinand_readid_method rdid_method)
869 u8 *id = spinand->id.data;
873 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
874 const struct spinand_manufacturer *manufacturer =
875 spinand_manufacturers[i];
877 if (id[0] != manufacturer->id)
880 ret = spinand_match_and_init(spinand,
882 manufacturer->nchips,
887 spinand->manufacturer = manufacturer;
893 static int spinand_id_detect(struct spinand_device *spinand)
895 u8 *id = spinand->id.data;
898 ret = spinand_read_id_op(spinand, 0, 0, id);
901 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
905 ret = spinand_read_id_op(spinand, 1, 0, id);
908 ret = spinand_manufacturer_match(spinand,
909 SPINAND_READID_METHOD_OPCODE_ADDR);
913 ret = spinand_read_id_op(spinand, 0, 1, id);
916 ret = spinand_manufacturer_match(spinand,
917 SPINAND_READID_METHOD_OPCODE_DUMMY);
922 static int spinand_manufacturer_init(struct spinand_device *spinand)
924 if (spinand->manufacturer->ops->init)
925 return spinand->manufacturer->ops->init(spinand);
930 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
932 /* Release manufacturer private data */
933 if (spinand->manufacturer->ops->cleanup)
934 return spinand->manufacturer->ops->cleanup(spinand);
937 static const struct spi_mem_op *
938 spinand_select_op_variant(struct spinand_device *spinand,
939 const struct spinand_op_variants *variants)
941 struct nand_device *nand = spinand_to_nand(spinand);
944 for (i = 0; i < variants->nops; i++) {
945 struct spi_mem_op op = variants->ops[i];
949 nbytes = nanddev_per_page_oobsize(nand) +
950 nanddev_page_size(nand);
953 op.data.nbytes = nbytes;
954 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
958 if (!spi_mem_supports_op(spinand->spimem, &op))
961 nbytes -= op.data.nbytes;
965 return &variants->ops[i];
972 * spinand_match_and_init() - Try to find a match between a device ID and an
973 * entry in a spinand_info table
974 * @spinand: SPI NAND object
975 * @table: SPI NAND device description table
976 * @table_size: size of the device description table
977 * @rdid_method: read id method to match
979 * Match between a device ID retrieved through the READ_ID command and an
980 * entry in the SPI NAND description table. If a match is found, the spinand
981 * object will be initialized with information provided by the matching
982 * spinand_info entry.
984 * Return: 0 on success, a negative error code otherwise.
986 int spinand_match_and_init(struct spinand_device *spinand,
987 const struct spinand_info *table,
988 unsigned int table_size,
989 enum spinand_readid_method rdid_method)
991 u8 *id = spinand->id.data;
992 struct nand_device *nand = spinand_to_nand(spinand);
995 for (i = 0; i < table_size; i++) {
996 const struct spinand_info *info = &table[i];
997 const struct spi_mem_op *op;
999 if (rdid_method != info->devid.method)
1002 if (memcmp(id + 1, info->devid.id, info->devid.len))
1005 nand->memorg = table[i].memorg;
1006 nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1007 spinand->eccinfo = table[i].eccinfo;
1008 spinand->flags = table[i].flags;
1009 spinand->id.len = 1 + table[i].devid.len;
1010 spinand->select_target = table[i].select_target;
1012 op = spinand_select_op_variant(spinand,
1013 info->op_variants.read_cache);
1017 spinand->op_templates.read_cache = op;
1019 op = spinand_select_op_variant(spinand,
1020 info->op_variants.write_cache);
1024 spinand->op_templates.write_cache = op;
1026 op = spinand_select_op_variant(spinand,
1027 info->op_variants.update_cache);
1028 spinand->op_templates.update_cache = op;
1036 static int spinand_detect(struct spinand_device *spinand)
1038 struct device *dev = &spinand->spimem->spi->dev;
1039 struct nand_device *nand = spinand_to_nand(spinand);
1042 ret = spinand_reset_op(spinand);
1046 ret = spinand_id_detect(spinand);
1048 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1053 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1055 "SPI NANDs with more than one die must implement ->select_target()\n");
1059 dev_info(&spinand->spimem->spi->dev,
1060 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1061 dev_info(&spinand->spimem->spi->dev,
1062 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1063 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1064 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1069 static int spinand_init(struct spinand_device *spinand)
1071 struct device *dev = &spinand->spimem->spi->dev;
1072 struct mtd_info *mtd = spinand_to_mtd(spinand);
1073 struct nand_device *nand = mtd_to_nanddev(mtd);
1077 * We need a scratch buffer because the spi_mem interface requires that
1078 * buf passed in spi_mem_op->data.buf be DMA-able.
1080 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1081 if (!spinand->scratchbuf)
1084 ret = spinand_detect(spinand);
1089 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1090 * may use this buffer for DMA access.
1091 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1093 spinand->databuf = kzalloc(nanddev_page_size(nand) +
1094 nanddev_per_page_oobsize(nand),
1096 if (!spinand->databuf) {
1101 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1103 ret = spinand_init_cfg_cache(spinand);
1107 ret = spinand_init_quad_enable(spinand);
1111 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1115 ret = spinand_manufacturer_init(spinand);
1118 "Failed to initialize the SPI NAND chip (err = %d)\n",
1123 ret = spinand_create_dirmaps(spinand);
1126 "Failed to create direct mappings for read/write operations (err = %d)\n",
1128 goto err_manuf_cleanup;
1131 /* After power up, all blocks are locked, so unlock them here. */
1132 for (i = 0; i < nand->memorg.ntargets; i++) {
1133 ret = spinand_select_target(spinand, i);
1135 goto err_manuf_cleanup;
1137 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1139 goto err_manuf_cleanup;
1142 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1144 goto err_manuf_cleanup;
1146 /* SPI-NAND default ECC engine is on-die */
1147 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1148 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1150 spinand_ecc_enable(spinand, false);
1151 ret = nanddev_ecc_engine_init(nand);
1153 goto err_cleanup_nanddev;
1156 * Right now, we don't support ECC, so let the whole oob
1157 * area available for the user.
1159 mtd->_read_oob = spinand_mtd_read;
1160 mtd->_write_oob = spinand_mtd_write;
1161 mtd->_block_isbad = spinand_mtd_block_isbad;
1162 mtd->_block_markbad = spinand_mtd_block_markbad;
1163 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1164 mtd->_erase = spinand_mtd_erase;
1165 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1167 if (nand->ecc.engine) {
1168 ret = mtd_ooblayout_count_freebytes(mtd);
1170 goto err_cleanup_ecc_engine;
1173 mtd->oobavail = ret;
1175 /* Propagate ECC information to mtd_info */
1176 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1177 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1181 err_cleanup_ecc_engine:
1182 nanddev_ecc_engine_cleanup(nand);
1184 err_cleanup_nanddev:
1185 nanddev_cleanup(nand);
1188 spinand_manufacturer_cleanup(spinand);
1191 kfree(spinand->databuf);
1192 kfree(spinand->scratchbuf);
1196 static void spinand_cleanup(struct spinand_device *spinand)
1198 struct nand_device *nand = spinand_to_nand(spinand);
1200 nanddev_cleanup(nand);
1201 spinand_manufacturer_cleanup(spinand);
1202 kfree(spinand->databuf);
1203 kfree(spinand->scratchbuf);
1206 static int spinand_probe(struct spi_mem *mem)
1208 struct spinand_device *spinand;
1209 struct mtd_info *mtd;
1212 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1217 spinand->spimem = mem;
1218 spi_mem_set_drvdata(mem, spinand);
1219 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1220 mutex_init(&spinand->lock);
1221 mtd = spinand_to_mtd(spinand);
1222 mtd->dev.parent = &mem->spi->dev;
1224 ret = spinand_init(spinand);
1228 ret = mtd_device_register(mtd, NULL, 0);
1230 goto err_spinand_cleanup;
1234 err_spinand_cleanup:
1235 spinand_cleanup(spinand);
1240 static int spinand_remove(struct spi_mem *mem)
1242 struct spinand_device *spinand;
1243 struct mtd_info *mtd;
1246 spinand = spi_mem_get_drvdata(mem);
1247 mtd = spinand_to_mtd(spinand);
1249 ret = mtd_device_unregister(mtd);
1253 spinand_cleanup(spinand);
1258 static const struct spi_device_id spinand_ids[] = {
1259 { .name = "spi-nand" },
1264 static const struct of_device_id spinand_of_ids[] = {
1265 { .compatible = "spi-nand" },
1270 static struct spi_mem_driver spinand_drv = {
1272 .id_table = spinand_ids,
1275 .of_match_table = of_match_ptr(spinand_of_ids),
1278 .probe = spinand_probe,
1279 .remove = spinand_remove,
1281 module_spi_mem_driver(spinand_drv);
1283 MODULE_DESCRIPTION("SPI NAND framework");
1284 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1285 MODULE_LICENSE("GPL v2");