Lines Matching +full:spi +full:- +full:nand
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
10 #define pr_fmt(fmt) "spi-nand: " fmt
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
26 spinand->scratchbuf); in spinand_read_reg_op()
29 ret = spi_mem_exec_op(spinand->spimem, &op); in spinand_read_reg_op()
33 *val = *spinand->scratchbuf; in spinand_read_reg_op()
40 spinand->scratchbuf); in spinand_write_reg_op()
42 *spinand->scratchbuf = val; in spinand_write_reg_op()
43 return spi_mem_exec_op(spinand->spimem, &op); in spinand_write_reg_op()
53 struct nand_device *nand = spinand_to_nand(spinand); in spinand_get_cfg() local
55 if (WARN_ON(spinand->cur_target < 0 || in spinand_get_cfg()
56 spinand->cur_target >= nand->memorg.ntargets)) in spinand_get_cfg()
57 return -EINVAL; in spinand_get_cfg()
59 *cfg = spinand->cfg_cache[spinand->cur_target]; in spinand_get_cfg()
65 struct nand_device *nand = spinand_to_nand(spinand); in spinand_set_cfg() local
68 if (WARN_ON(spinand->cur_target < 0 || in spinand_set_cfg()
69 spinand->cur_target >= nand->memorg.ntargets)) in spinand_set_cfg()
70 return -EINVAL; in spinand_set_cfg()
72 if (spinand->cfg_cache[spinand->cur_target] == cfg) in spinand_set_cfg()
79 spinand->cfg_cache[spinand->cur_target] = cfg; in spinand_set_cfg()
84 * spinand_upd_cfg() - Update the configuration register
109 * spinand_select_target() - Select a specific NAND target/die
119 struct nand_device *nand = spinand_to_nand(spinand); in spinand_select_target() local
122 if (WARN_ON(target >= nand->memorg.ntargets)) in spinand_select_target()
123 return -EINVAL; in spinand_select_target()
125 if (spinand->cur_target == target) in spinand_select_target()
128 if (nand->memorg.ntargets == 1) { in spinand_select_target()
129 spinand->cur_target = target; in spinand_select_target()
133 ret = spinand->select_target(spinand, target); in spinand_select_target()
137 spinand->cur_target = target; in spinand_select_target()
143 struct nand_device *nand = spinand_to_nand(spinand); in spinand_init_cfg_cache() local
144 struct device *dev = &spinand->spimem->spi->dev; in spinand_init_cfg_cache()
148 spinand->cfg_cache = devm_kcalloc(dev, in spinand_init_cfg_cache()
149 nand->memorg.ntargets, in spinand_init_cfg_cache()
150 sizeof(*spinand->cfg_cache), in spinand_init_cfg_cache()
152 if (!spinand->cfg_cache) in spinand_init_cfg_cache()
153 return -ENOMEM; in spinand_init_cfg_cache()
155 for (target = 0; target < nand->memorg.ntargets; target++) { in spinand_init_cfg_cache()
165 &spinand->cfg_cache[target]); in spinand_init_cfg_cache()
177 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) in spinand_init_quad_enable()
180 if (spinand->op_templates.read_cache->data.buswidth == 4 || in spinand_init_quad_enable()
181 spinand->op_templates.write_cache->data.buswidth == 4 || in spinand_init_quad_enable()
182 spinand->op_templates.update_cache->data.buswidth == 4) in spinand_init_quad_enable()
200 return spi_mem_exec_op(spinand->spimem, &op); in spinand_write_enable_op()
206 struct nand_device *nand = spinand_to_nand(spinand); in spinand_load_page_op() local
207 unsigned int row = nanddev_pos_to_row(nand, &req->pos); in spinand_load_page_op()
210 return spi_mem_exec_op(spinand->spimem, &op); in spinand_load_page_op()
216 struct nand_device *nand = spinand_to_nand(spinand); in spinand_read_from_cache_op() local
217 struct mtd_info *mtd = nanddev_to_mtd(nand); in spinand_read_from_cache_op()
224 if (req->datalen) { in spinand_read_from_cache_op()
225 buf = spinand->databuf; in spinand_read_from_cache_op()
226 nbytes = nanddev_page_size(nand); in spinand_read_from_cache_op()
230 if (req->ooblen) { in spinand_read_from_cache_op()
231 nbytes += nanddev_per_page_oobsize(nand); in spinand_read_from_cache_op()
233 buf = spinand->oobbuf; in spinand_read_from_cache_op()
234 column = nanddev_page_size(nand); in spinand_read_from_cache_op()
238 rdesc = spinand->dirmaps[req->pos.plane].rdesc; in spinand_read_from_cache_op()
246 return -EIO; in spinand_read_from_cache_op()
248 nbytes -= ret; in spinand_read_from_cache_op()
253 if (req->datalen) in spinand_read_from_cache_op()
254 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, in spinand_read_from_cache_op()
255 req->datalen); in spinand_read_from_cache_op()
257 if (req->ooblen) { in spinand_read_from_cache_op()
258 if (req->mode == MTD_OPS_AUTO_OOB) in spinand_read_from_cache_op()
259 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, in spinand_read_from_cache_op()
260 spinand->oobbuf, in spinand_read_from_cache_op()
261 req->ooboffs, in spinand_read_from_cache_op()
262 req->ooblen); in spinand_read_from_cache_op()
264 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, in spinand_read_from_cache_op()
265 req->ooblen); in spinand_read_from_cache_op()
274 struct nand_device *nand = spinand_to_nand(spinand); in spinand_write_to_cache_op() local
275 struct mtd_info *mtd = nanddev_to_mtd(nand); in spinand_write_to_cache_op()
278 void *buf = spinand->databuf; in spinand_write_to_cache_op()
288 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); in spinand_write_to_cache_op()
289 memset(spinand->databuf, 0xff, nbytes); in spinand_write_to_cache_op()
291 if (req->datalen) in spinand_write_to_cache_op()
292 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, in spinand_write_to_cache_op()
293 req->datalen); in spinand_write_to_cache_op()
295 if (req->ooblen) { in spinand_write_to_cache_op()
296 if (req->mode == MTD_OPS_AUTO_OOB) in spinand_write_to_cache_op()
297 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, in spinand_write_to_cache_op()
298 spinand->oobbuf, in spinand_write_to_cache_op()
299 req->ooboffs, in spinand_write_to_cache_op()
300 req->ooblen); in spinand_write_to_cache_op()
302 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, in spinand_write_to_cache_op()
303 req->ooblen); in spinand_write_to_cache_op()
306 wdesc = spinand->dirmaps[req->pos.plane].wdesc; in spinand_write_to_cache_op()
314 return -EIO; in spinand_write_to_cache_op()
316 nbytes -= ret; in spinand_write_to_cache_op()
327 struct nand_device *nand = spinand_to_nand(spinand); in spinand_program_op() local
328 unsigned int row = nanddev_pos_to_row(nand, &req->pos); in spinand_program_op()
331 return spi_mem_exec_op(spinand->spimem, &op); in spinand_program_op()
337 struct nand_device *nand = spinand_to_nand(spinand); in spinand_erase_op() local
338 unsigned int row = nanddev_pos_to_row(nand, pos); in spinand_erase_op()
341 return spi_mem_exec_op(spinand->spimem, &op); in spinand_erase_op()
371 return status & STATUS_BUSY ? -ETIMEDOUT : 0; in spinand_wait()
378 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); in spinand_read_id_op()
381 ret = spi_mem_exec_op(spinand->spimem, &op); in spinand_read_id_op()
383 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); in spinand_read_id_op()
393 ret = spi_mem_exec_op(spinand->spimem, &op); in spinand_reset_op()
407 struct nand_device *nand = spinand_to_nand(spinand); in spinand_check_ecc_status() local
409 if (spinand->eccinfo.get_status) in spinand_check_ecc_status()
410 return spinand->eccinfo.get_status(spinand, status); in spinand_check_ecc_status()
420 * wear-leveling layers move the data immediately. in spinand_check_ecc_status()
422 return nanddev_get_ecc_conf(nand)->strength; in spinand_check_ecc_status()
425 return -EBADMSG; in spinand_check_ecc_status()
431 return -EINVAL; in spinand_check_ecc_status()
479 ret = -EIO; in spinand_write_page()
488 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_read() local
495 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout) in spinand_mtd_read()
498 mutex_lock(&spinand->lock); in spinand_mtd_read()
500 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { in spinand_mtd_read()
510 if (ret < 0 && ret != -EBADMSG) in spinand_mtd_read()
513 if (ret == -EBADMSG) { in spinand_mtd_read()
515 mtd->ecc_stats.failed++; in spinand_mtd_read()
517 mtd->ecc_stats.corrected += ret; in spinand_mtd_read()
522 ops->retlen += iter.req.datalen; in spinand_mtd_read()
523 ops->oobretlen += iter.req.ooblen; in spinand_mtd_read()
526 mutex_unlock(&spinand->lock); in spinand_mtd_read()
529 ret = -EBADMSG; in spinand_mtd_read()
538 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_write() local
543 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout) in spinand_mtd_write()
546 mutex_lock(&spinand->lock); in spinand_mtd_write()
548 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { in spinand_mtd_write()
561 ops->retlen += iter.req.datalen; in spinand_mtd_write()
562 ops->oobretlen += iter.req.ooblen; in spinand_mtd_write()
565 mutex_unlock(&spinand->lock); in spinand_mtd_write()
570 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) in spinand_isbad() argument
572 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_isbad()
582 spinand_select_target(spinand, pos->target); in spinand_isbad()
592 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_block_isbad() local
593 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_mtd_block_isbad()
597 nanddev_offs_to_pos(nand, offs, &pos); in spinand_mtd_block_isbad()
598 mutex_lock(&spinand->lock); in spinand_mtd_block_isbad()
599 ret = nanddev_isbad(nand, &pos); in spinand_mtd_block_isbad()
600 mutex_unlock(&spinand->lock); in spinand_mtd_block_isbad()
605 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) in spinand_markbad() argument
607 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_markbad()
618 ret = spinand_select_target(spinand, pos->target); in spinand_markbad()
631 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_block_markbad() local
632 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_mtd_block_markbad()
636 nanddev_offs_to_pos(nand, offs, &pos); in spinand_mtd_block_markbad()
637 mutex_lock(&spinand->lock); in spinand_mtd_block_markbad()
638 ret = nanddev_markbad(nand, &pos); in spinand_mtd_block_markbad()
639 mutex_unlock(&spinand->lock); in spinand_mtd_block_markbad()
644 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) in spinand_erase() argument
646 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_erase()
650 ret = spinand_select_target(spinand, pos->target); in spinand_erase()
664 ret = -EIO; in spinand_erase()
675 mutex_lock(&spinand->lock); in spinand_mtd_erase()
677 mutex_unlock(&spinand->lock); in spinand_mtd_erase()
685 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_block_isreserved() local
689 nanddev_offs_to_pos(nand, offs, &pos); in spinand_mtd_block_isreserved()
690 mutex_lock(&spinand->lock); in spinand_mtd_block_isreserved()
691 ret = nanddev_isreserved(nand, &pos); in spinand_mtd_block_isreserved()
692 mutex_unlock(&spinand->lock); in spinand_mtd_block_isreserved()
700 struct nand_device *nand = spinand_to_nand(spinand); in spinand_create_dirmap() local
702 .length = nanddev_page_size(nand) + in spinand_create_dirmap()
703 nanddev_per_page_oobsize(nand), in spinand_create_dirmap()
708 info.offset = plane << fls(nand->memorg.pagesize); in spinand_create_dirmap()
710 info.op_tmpl = *spinand->op_templates.update_cache; in spinand_create_dirmap()
711 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, in spinand_create_dirmap()
712 spinand->spimem, &info); in spinand_create_dirmap()
716 spinand->dirmaps[plane].wdesc = desc; in spinand_create_dirmap()
718 info.op_tmpl = *spinand->op_templates.read_cache; in spinand_create_dirmap()
719 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, in spinand_create_dirmap()
720 spinand->spimem, &info); in spinand_create_dirmap()
724 spinand->dirmaps[plane].rdesc = desc; in spinand_create_dirmap()
731 struct nand_device *nand = spinand_to_nand(spinand); in spinand_create_dirmaps() local
734 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, in spinand_create_dirmaps()
735 sizeof(*spinand->dirmaps) * in spinand_create_dirmaps()
736 nand->memorg.planes_per_lun, in spinand_create_dirmaps()
738 if (!spinand->dirmaps) in spinand_create_dirmaps()
739 return -ENOMEM; in spinand_create_dirmaps()
741 for (i = 0; i < nand->memorg.planes_per_lun; i++) { in spinand_create_dirmaps()
768 u8 *id = spinand->id.data; in spinand_manufacturer_match()
776 if (id[0] != manufacturer->id) in spinand_manufacturer_match()
780 manufacturer->chips, in spinand_manufacturer_match()
781 manufacturer->nchips, in spinand_manufacturer_match()
786 spinand->manufacturer = manufacturer; in spinand_manufacturer_match()
789 return -ENOTSUPP; in spinand_manufacturer_match()
794 u8 *id = spinand->id.data; in spinand_id_detect()
823 if (spinand->manufacturer->ops->init) in spinand_manufacturer_init()
824 return spinand->manufacturer->ops->init(spinand); in spinand_manufacturer_init()
832 if (spinand->manufacturer->ops->cleanup) in spinand_manufacturer_cleanup()
833 return spinand->manufacturer->ops->cleanup(spinand); in spinand_manufacturer_cleanup()
840 struct nand_device *nand = spinand_to_nand(spinand); in spinand_select_op_variant() local
843 for (i = 0; i < variants->nops; i++) { in spinand_select_op_variant()
844 struct spi_mem_op op = variants->ops[i]; in spinand_select_op_variant()
848 nbytes = nanddev_per_page_oobsize(nand) + in spinand_select_op_variant()
849 nanddev_page_size(nand); in spinand_select_op_variant()
853 ret = spi_mem_adjust_op_size(spinand->spimem, &op); in spinand_select_op_variant()
857 if (!spi_mem_supports_op(spinand->spimem, &op)) in spinand_select_op_variant()
860 nbytes -= op.data.nbytes; in spinand_select_op_variant()
864 return &variants->ops[i]; in spinand_select_op_variant()
871 * spinand_match_and_init() - Try to find a match between a device ID and an
873 * @spinand: SPI NAND object
874 * @table: SPI NAND device description table
879 * entry in the SPI NAND description table. If a match is found, the spinand
890 u8 *id = spinand->id.data; in spinand_match_and_init()
891 struct nand_device *nand = spinand_to_nand(spinand); in spinand_match_and_init() local
898 if (rdid_method != info->devid.method) in spinand_match_and_init()
901 if (memcmp(id + 1, info->devid.id, info->devid.len)) in spinand_match_and_init()
904 nand->memorg = table[i].memorg; in spinand_match_and_init()
905 nanddev_set_ecc_requirements(nand, &table[i].eccreq); in spinand_match_and_init()
906 spinand->eccinfo = table[i].eccinfo; in spinand_match_and_init()
907 spinand->flags = table[i].flags; in spinand_match_and_init()
908 spinand->id.len = 1 + table[i].devid.len; in spinand_match_and_init()
909 spinand->select_target = table[i].select_target; in spinand_match_and_init()
912 info->op_variants.read_cache); in spinand_match_and_init()
914 return -ENOTSUPP; in spinand_match_and_init()
916 spinand->op_templates.read_cache = op; in spinand_match_and_init()
919 info->op_variants.write_cache); in spinand_match_and_init()
921 return -ENOTSUPP; in spinand_match_and_init()
923 spinand->op_templates.write_cache = op; in spinand_match_and_init()
926 info->op_variants.update_cache); in spinand_match_and_init()
927 spinand->op_templates.update_cache = op; in spinand_match_and_init()
932 return -ENOTSUPP; in spinand_match_and_init()
937 struct device *dev = &spinand->spimem->spi->dev; in spinand_detect()
938 struct nand_device *nand = spinand_to_nand(spinand); in spinand_detect() local
948 spinand->id.data); in spinand_detect()
952 if (nand->memorg.ntargets > 1 && !spinand->select_target) { in spinand_detect()
954 "SPI NANDs with more than one die must implement ->select_target()\n"); in spinand_detect()
955 return -EINVAL; in spinand_detect()
958 dev_info(&spinand->spimem->spi->dev, in spinand_detect()
959 "%s SPI NAND was found.\n", spinand->manufacturer->name); in spinand_detect()
960 dev_info(&spinand->spimem->spi->dev, in spinand_detect()
962 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, in spinand_detect()
963 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); in spinand_detect()
971 return -ERANGE; in spinand_noecc_ooblayout_ecc()
978 return -ERANGE; in spinand_noecc_ooblayout_free()
981 region->offset = 2; in spinand_noecc_ooblayout_free()
982 region->length = 62; in spinand_noecc_ooblayout_free()
994 struct device *dev = &spinand->spimem->spi->dev; in spinand_init()
996 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_init() local
1001 * buf passed in spi_mem_op->data.buf be DMA-able. in spinand_init()
1003 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); in spinand_init()
1004 if (!spinand->scratchbuf) in spinand_init()
1005 return -ENOMEM; in spinand_init()
1014 * Memory allocated by devm_ does not guarantee DMA-safe alignment. in spinand_init()
1016 spinand->databuf = kzalloc(nanddev_page_size(nand) + in spinand_init()
1017 nanddev_per_page_oobsize(nand), in spinand_init()
1019 if (!spinand->databuf) { in spinand_init()
1020 ret = -ENOMEM; in spinand_init()
1024 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); in spinand_init()
1041 "Failed to initialize the SPI NAND chip (err = %d)\n", in spinand_init()
1055 for (i = 0; i < nand->memorg.ntargets; i++) { in spinand_init()
1065 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); in spinand_init()
1073 mtd->_read_oob = spinand_mtd_read; in spinand_init()
1074 mtd->_write_oob = spinand_mtd_write; in spinand_init()
1075 mtd->_block_isbad = spinand_mtd_block_isbad; in spinand_init()
1076 mtd->_block_markbad = spinand_mtd_block_markbad; in spinand_init()
1077 mtd->_block_isreserved = spinand_mtd_block_isreserved; in spinand_init()
1078 mtd->_erase = spinand_mtd_erase; in spinand_init()
1079 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; in spinand_init()
1081 if (spinand->eccinfo.ooblayout) in spinand_init()
1082 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); in spinand_init()
1090 mtd->oobavail = ret; in spinand_init()
1093 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; in spinand_init()
1094 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; in spinand_init()
1099 nanddev_cleanup(nand); in spinand_init()
1105 kfree(spinand->databuf); in spinand_init()
1106 kfree(spinand->scratchbuf); in spinand_init()
1112 struct nand_device *nand = spinand_to_nand(spinand); in spinand_cleanup() local
1114 nanddev_cleanup(nand); in spinand_cleanup()
1116 kfree(spinand->databuf); in spinand_cleanup()
1117 kfree(spinand->scratchbuf); in spinand_cleanup()
1126 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), in spinand_probe()
1129 return -ENOMEM; in spinand_probe()
1131 spinand->spimem = mem; in spinand_probe()
1133 spinand_set_of_node(spinand, mem->spi->dev.of_node); in spinand_probe()
1134 mutex_init(&spinand->lock); in spinand_probe()
1136 mtd->dev.parent = &mem->spi->dev; in spinand_probe()
1173 { .name = "spi-nand" },
1179 { .compatible = "spi-nand" },
1188 .name = "spi-nand",
1197 MODULE_DESCRIPTION("SPI NAND framework");