Lines Matching +full:spi +full:- +full:nand
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
10 #define pr_fmt(fmt) "spi-nand: " fmt
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
26 spinand->scratchbuf); in spinand_read_reg_op()
29 ret = spi_mem_exec_op(spinand->spimem, &op); in spinand_read_reg_op()
33 *val = *spinand->scratchbuf; in spinand_read_reg_op()
40 spinand->scratchbuf); in spinand_write_reg_op()
42 *spinand->scratchbuf = val; in spinand_write_reg_op()
43 return spi_mem_exec_op(spinand->spimem, &op); in spinand_write_reg_op()
53 struct nand_device *nand = spinand_to_nand(spinand); in spinand_get_cfg() local
55 if (WARN_ON(spinand->cur_target < 0 || in spinand_get_cfg()
56 spinand->cur_target >= nand->memorg.ntargets)) in spinand_get_cfg()
57 return -EINVAL; in spinand_get_cfg()
59 *cfg = spinand->cfg_cache[spinand->cur_target]; in spinand_get_cfg()
65 struct nand_device *nand = spinand_to_nand(spinand); in spinand_set_cfg() local
68 if (WARN_ON(spinand->cur_target < 0 || in spinand_set_cfg()
69 spinand->cur_target >= nand->memorg.ntargets)) in spinand_set_cfg()
70 return -EINVAL; in spinand_set_cfg()
72 if (spinand->cfg_cache[spinand->cur_target] == cfg) in spinand_set_cfg()
79 spinand->cfg_cache[spinand->cur_target] = cfg; in spinand_set_cfg()
84 * spinand_upd_cfg() - Update the configuration register
109 * spinand_select_target() - Select a specific NAND target/die
119 struct nand_device *nand = spinand_to_nand(spinand); in spinand_select_target() local
122 if (WARN_ON(target >= nand->memorg.ntargets)) in spinand_select_target()
123 return -EINVAL; in spinand_select_target()
125 if (spinand->cur_target == target) in spinand_select_target()
128 if (nand->memorg.ntargets == 1) { in spinand_select_target()
129 spinand->cur_target = target; in spinand_select_target()
133 ret = spinand->select_target(spinand, target); in spinand_select_target()
137 spinand->cur_target = target; in spinand_select_target()
143 struct nand_device *nand = spinand_to_nand(spinand); in spinand_read_cfg() local
147 for (target = 0; target < nand->memorg.ntargets; target++) { in spinand_read_cfg()
157 &spinand->cfg_cache[target]); in spinand_read_cfg()
167 struct nand_device *nand = spinand_to_nand(spinand); in spinand_init_cfg_cache() local
168 struct device *dev = &spinand->spimem->spi->dev; in spinand_init_cfg_cache()
170 spinand->cfg_cache = devm_kcalloc(dev, in spinand_init_cfg_cache()
171 nand->memorg.ntargets, in spinand_init_cfg_cache()
172 sizeof(*spinand->cfg_cache), in spinand_init_cfg_cache()
174 if (!spinand->cfg_cache) in spinand_init_cfg_cache()
175 return -ENOMEM; in spinand_init_cfg_cache()
184 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) in spinand_init_quad_enable()
187 if (spinand->op_templates.read_cache->data.buswidth == 4 || in spinand_init_quad_enable()
188 spinand->op_templates.write_cache->data.buswidth == 4 || in spinand_init_quad_enable()
189 spinand->op_templates.update_cache->data.buswidth == 4) in spinand_init_quad_enable()
206 return spinand->set_cont_read(spinand, enable); in spinand_cont_read_enable()
211 struct nand_device *nand = spinand_to_nand(spinand); in spinand_check_ecc_status() local
213 if (spinand->eccinfo.get_status) in spinand_check_ecc_status()
214 return spinand->eccinfo.get_status(spinand, status); in spinand_check_ecc_status()
224 * wear-leveling layers move the data immediately. in spinand_check_ecc_status()
226 return nanddev_get_ecc_conf(nand)->strength; in spinand_check_ecc_status()
229 return -EBADMSG; in spinand_check_ecc_status()
235 return -EINVAL; in spinand_check_ecc_status()
241 return -ERANGE; in spinand_noecc_ooblayout_ecc()
248 return -ERANGE; in spinand_noecc_ooblayout_free()
251 region->offset = 2; in spinand_noecc_ooblayout_free()
252 region->length = 62; in spinand_noecc_ooblayout_free()
262 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) in spinand_ondie_ecc_init_ctx() argument
264 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_ondie_ecc_init_ctx()
265 struct mtd_info *mtd = nanddev_to_mtd(nand); in spinand_ondie_ecc_init_ctx()
268 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; in spinand_ondie_ecc_init_ctx()
269 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; in spinand_ondie_ecc_init_ctx()
270 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; in spinand_ondie_ecc_init_ctx()
274 return -ENOMEM; in spinand_ondie_ecc_init_ctx()
276 nand->ecc.ctx.priv = engine_conf; in spinand_ondie_ecc_init_ctx()
278 if (spinand->eccinfo.ooblayout) in spinand_ondie_ecc_init_ctx()
279 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); in spinand_ondie_ecc_init_ctx()
286 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) in spinand_ondie_ecc_cleanup_ctx() argument
288 kfree(nand->ecc.ctx.priv); in spinand_ondie_ecc_cleanup_ctx()
291 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, in spinand_ondie_ecc_prepare_io_req() argument
294 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_ondie_ecc_prepare_io_req()
295 bool enable = (req->mode != MTD_OPS_RAW); in spinand_ondie_ecc_prepare_io_req()
297 if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS) in spinand_ondie_ecc_prepare_io_req()
298 return -EOPNOTSUPP; in spinand_ondie_ecc_prepare_io_req()
300 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); in spinand_ondie_ecc_prepare_io_req()
306 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, in spinand_ondie_ecc_finish_io_req() argument
309 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; in spinand_ondie_ecc_finish_io_req()
310 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_ondie_ecc_finish_io_req()
314 if (req->mode == MTD_OPS_RAW) in spinand_ondie_ecc_finish_io_req()
318 if (req->type == NAND_PAGE_WRITE) in spinand_ondie_ecc_finish_io_req()
322 ret = spinand_check_ecc_status(spinand, engine_conf->status); in spinand_ondie_ecc_finish_io_req()
323 if (ret == -EBADMSG) { in spinand_ondie_ecc_finish_io_req()
324 mtd->ecc_stats.failed++; in spinand_ondie_ecc_finish_io_req()
332 if (!req->continuous) in spinand_ondie_ecc_finish_io_req()
335 pages = req->datalen / nanddev_page_size(nand); in spinand_ondie_ecc_finish_io_req()
337 mtd->ecc_stats.corrected += ret * pages; in spinand_ondie_ecc_finish_io_req()
354 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) in spinand_ondie_ecc_save_status() argument
356 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; in spinand_ondie_ecc_save_status()
358 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && in spinand_ondie_ecc_save_status()
360 engine_conf->status = status; in spinand_ondie_ecc_save_status()
367 return spi_mem_exec_op(spinand->spimem, &op); in spinand_write_enable_op()
373 struct nand_device *nand = spinand_to_nand(spinand); in spinand_load_page_op() local
374 unsigned int row = nanddev_pos_to_row(nand, &req->pos); in spinand_load_page_op()
377 return spi_mem_exec_op(spinand->spimem, &op); in spinand_load_page_op()
383 struct nand_device *nand = spinand_to_nand(spinand); in spinand_read_from_cache_op() local
391 if (req->datalen) { in spinand_read_from_cache_op()
392 buf = spinand->databuf; in spinand_read_from_cache_op()
393 if (!req->continuous) in spinand_read_from_cache_op()
394 nbytes = nanddev_page_size(nand); in spinand_read_from_cache_op()
396 nbytes = round_up(req->dataoffs + req->datalen, in spinand_read_from_cache_op()
397 nanddev_page_size(nand)); in spinand_read_from_cache_op()
401 if (req->ooblen) { in spinand_read_from_cache_op()
402 nbytes += nanddev_per_page_oobsize(nand); in spinand_read_from_cache_op()
404 buf = spinand->oobbuf; in spinand_read_from_cache_op()
405 column = nanddev_page_size(nand); in spinand_read_from_cache_op()
409 if (req->mode == MTD_OPS_RAW) in spinand_read_from_cache_op()
410 rdesc = spinand->dirmaps[req->pos.plane].rdesc; in spinand_read_from_cache_op()
412 rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; in spinand_read_from_cache_op()
414 if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT) in spinand_read_from_cache_op()
415 column |= req->pos.plane << fls(nanddev_page_size(nand)); in spinand_read_from_cache_op()
423 return -EIO; in spinand_read_from_cache_op()
425 nbytes -= ret; in spinand_read_from_cache_op()
433 if (nbytes && req->continuous) in spinand_read_from_cache_op()
434 return -EIO; in spinand_read_from_cache_op()
437 if (req->datalen) in spinand_read_from_cache_op()
438 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, in spinand_read_from_cache_op()
439 req->datalen); in spinand_read_from_cache_op()
441 if (req->ooblen) { in spinand_read_from_cache_op()
442 if (req->mode == MTD_OPS_AUTO_OOB) in spinand_read_from_cache_op()
443 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, in spinand_read_from_cache_op()
444 spinand->oobbuf, in spinand_read_from_cache_op()
445 req->ooboffs, in spinand_read_from_cache_op()
446 req->ooblen); in spinand_read_from_cache_op()
448 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, in spinand_read_from_cache_op()
449 req->ooblen); in spinand_read_from_cache_op()
458 struct nand_device *nand = spinand_to_nand(spinand); in spinand_write_to_cache_op() local
462 void *buf = spinand->databuf; in spinand_write_to_cache_op()
473 * ECC engines ->prepare_io_req() callback. in spinand_write_to_cache_op()
475 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); in spinand_write_to_cache_op()
476 memset(spinand->databuf, 0xff, nanddev_page_size(nand)); in spinand_write_to_cache_op()
478 if (req->datalen) in spinand_write_to_cache_op()
479 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, in spinand_write_to_cache_op()
480 req->datalen); in spinand_write_to_cache_op()
482 if (req->ooblen) { in spinand_write_to_cache_op()
483 if (req->mode == MTD_OPS_AUTO_OOB) in spinand_write_to_cache_op()
484 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, in spinand_write_to_cache_op()
485 spinand->oobbuf, in spinand_write_to_cache_op()
486 req->ooboffs, in spinand_write_to_cache_op()
487 req->ooblen); in spinand_write_to_cache_op()
489 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, in spinand_write_to_cache_op()
490 req->ooblen); in spinand_write_to_cache_op()
493 if (req->mode == MTD_OPS_RAW) in spinand_write_to_cache_op()
494 wdesc = spinand->dirmaps[req->pos.plane].wdesc; in spinand_write_to_cache_op()
496 wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; in spinand_write_to_cache_op()
498 if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT) in spinand_write_to_cache_op()
499 column |= req->pos.plane << fls(nanddev_page_size(nand)); in spinand_write_to_cache_op()
507 return -EIO; in spinand_write_to_cache_op()
509 nbytes -= ret; in spinand_write_to_cache_op()
520 struct nand_device *nand = spinand_to_nand(spinand); in spinand_program_op() local
521 unsigned int row = nanddev_pos_to_row(nand, &req->pos); in spinand_program_op()
524 return spi_mem_exec_op(spinand->spimem, &op); in spinand_program_op()
530 struct nand_device *nand = spinand_to_nand(spinand); in spinand_erase_op() local
531 unsigned int row = nanddev_pos_to_row(nand, pos); in spinand_erase_op()
534 return spi_mem_exec_op(spinand->spimem, &op); in spinand_erase_op()
538 * spinand_wait() - Poll memory device status
553 spinand->scratchbuf); in spinand_wait()
557 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0, in spinand_wait()
564 status = *spinand->scratchbuf; in spinand_wait()
580 return status & STATUS_BUSY ? -ETIMEDOUT : 0; in spinand_wait()
587 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); in spinand_read_id_op()
590 ret = spi_mem_exec_op(spinand->spimem, &op); in spinand_read_id_op()
592 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); in spinand_read_id_op()
602 ret = spi_mem_exec_op(spinand->spimem, &op); in spinand_reset_op()
618 * spinand_read_page() - Read a page
628 struct nand_device *nand = spinand_to_nand(spinand); in spinand_read_page() local
632 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); in spinand_read_page()
647 spinand_ondie_ecc_save_status(nand, status); in spinand_read_page()
653 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); in spinand_read_page()
657 * spinand_write_page() - Write a page
667 struct nand_device *nand = spinand_to_nand(spinand); in spinand_write_page() local
671 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); in spinand_write_page()
692 return -EIO; in spinand_write_page()
694 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); in spinand_write_page()
702 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_regular_page_read() local
710 old_stats = mtd->ecc_stats; in spinand_mtd_regular_page_read()
712 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) in spinand_mtd_regular_page_read()
715 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { in spinand_mtd_regular_page_read()
725 if (ret < 0 && ret != -EBADMSG) in spinand_mtd_regular_page_read()
728 if (ret == -EBADMSG && spinand->set_read_retry) { in spinand_mtd_regular_page_read()
729 if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) { in spinand_mtd_regular_page_read()
730 ret = spinand->set_read_retry(spinand, retry_mode); in spinand_mtd_regular_page_read()
732 spinand->set_read_retry(spinand, 0); in spinand_mtd_regular_page_read()
737 mtd->ecc_stats = old_stats; in spinand_mtd_regular_page_read()
743 } else if (ret == -EBADMSG) { in spinand_mtd_regular_page_read()
750 ops->retlen += iter.req.datalen; in spinand_mtd_regular_page_read()
751 ops->oobretlen += iter.req.ooblen; in spinand_mtd_regular_page_read()
756 ret = spinand->set_read_retry(spinand, retry_mode); in spinand_mtd_regular_page_read()
763 ret = -EBADMSG; in spinand_mtd_regular_page_read()
773 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_continuous_page_read() local
786 * Each data read must be a multiple of 4-bytes and full pages should be read; in spinand_mtd_continuous_page_read()
790 nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) { in spinand_mtd_continuous_page_read()
795 ret = nand_ecc_prepare_io_req(nand, &iter.req); in spinand_mtd_continuous_page_read()
812 ops->retlen += iter.req.datalen; in spinand_mtd_continuous_page_read()
818 spinand_ondie_ecc_save_status(nand, status); in spinand_mtd_continuous_page_read()
820 ret = nand_ecc_finish_io_req(nand, &iter.req); in spinand_mtd_continuous_page_read()
833 * guarantee the SPI controller drivers will effectively deassert the CS in spinand_mtd_continuous_page_read()
843 struct nand_device *nand = spinand_to_nand(spinand); in spinand_cont_read_init() local
844 enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type; in spinand_cont_read_init()
846 /* OOBs cannot be retrieved so external/on-host ECC engine won't work */ in spinand_cont_read_init()
847 if (spinand->set_cont_read && in spinand_cont_read_init()
850 spinand->cont_read_possible = true; in spinand_cont_read_init()
857 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_use_cont_read() local
858 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_use_cont_read()
861 if (!spinand->cont_read_possible) in spinand_use_cont_read()
865 if (ops->ooblen || ops->oobbuf) in spinand_use_cont_read()
868 nanddev_offs_to_pos(nand, from, &start_pos); in spinand_use_cont_read()
869 nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos); in spinand_use_cont_read()
895 mutex_lock(&spinand->lock); in spinand_mtd_read()
897 old_stats = mtd->ecc_stats; in spinand_mtd_read()
904 if (ops->stats) { in spinand_mtd_read()
905 ops->stats->uncorrectable_errors += in spinand_mtd_read()
906 mtd->ecc_stats.failed - old_stats.failed; in spinand_mtd_read()
907 ops->stats->corrected_bitflips += in spinand_mtd_read()
908 mtd->ecc_stats.corrected - old_stats.corrected; in spinand_mtd_read()
911 mutex_unlock(&spinand->lock); in spinand_mtd_read()
920 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_write() local
925 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) in spinand_mtd_write()
928 mutex_lock(&spinand->lock); in spinand_mtd_write()
930 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { in spinand_mtd_write()
942 ops->retlen += iter.req.datalen; in spinand_mtd_write()
943 ops->oobretlen += iter.req.ooblen; in spinand_mtd_write()
946 mutex_unlock(&spinand->lock); in spinand_mtd_write()
951 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) in spinand_isbad() argument
953 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_isbad()
964 spinand_select_target(spinand, pos->target); in spinand_isbad()
967 if (ret == -EOPNOTSUPP) { in spinand_isbad()
981 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_block_isbad() local
982 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_mtd_block_isbad()
986 nanddev_offs_to_pos(nand, offs, &pos); in spinand_mtd_block_isbad()
987 mutex_lock(&spinand->lock); in spinand_mtd_block_isbad()
988 ret = nanddev_isbad(nand, &pos); in spinand_mtd_block_isbad()
989 mutex_unlock(&spinand->lock); in spinand_mtd_block_isbad()
994 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) in spinand_markbad() argument
996 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_markbad()
1007 ret = spinand_select_target(spinand, pos->target); in spinand_markbad()
1012 if (ret == -EOPNOTSUPP) { in spinand_markbad()
1023 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_block_markbad() local
1024 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_mtd_block_markbad()
1028 nanddev_offs_to_pos(nand, offs, &pos); in spinand_mtd_block_markbad()
1029 mutex_lock(&spinand->lock); in spinand_mtd_block_markbad()
1030 ret = nanddev_markbad(nand, &pos); in spinand_mtd_block_markbad()
1031 mutex_unlock(&spinand->lock); in spinand_mtd_block_markbad()
1036 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) in spinand_erase() argument
1038 struct spinand_device *spinand = nand_to_spinand(nand); in spinand_erase()
1042 ret = spinand_select_target(spinand, pos->target); in spinand_erase()
1060 ret = -EIO; in spinand_erase()
1071 mutex_lock(&spinand->lock); in spinand_mtd_erase()
1073 mutex_unlock(&spinand->lock); in spinand_mtd_erase()
1081 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_mtd_block_isreserved() local
1085 nanddev_offs_to_pos(nand, offs, &pos); in spinand_mtd_block_isreserved()
1086 mutex_lock(&spinand->lock); in spinand_mtd_block_isreserved()
1087 ret = nanddev_isreserved(nand, &pos); in spinand_mtd_block_isreserved()
1088 mutex_unlock(&spinand->lock); in spinand_mtd_block_isreserved()
1096 struct nand_device *nand = spinand_to_nand(spinand); in spinand_create_dirmap() local
1098 .length = nanddev_page_size(nand) + in spinand_create_dirmap()
1099 nanddev_per_page_oobsize(nand), in spinand_create_dirmap()
1103 if (spinand->cont_read_possible) in spinand_create_dirmap()
1104 info.length = nanddev_eraseblock_size(nand); in spinand_create_dirmap()
1107 info.offset = plane << fls(nand->memorg.pagesize); in spinand_create_dirmap()
1109 info.op_tmpl = *spinand->op_templates.update_cache; in spinand_create_dirmap()
1110 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, in spinand_create_dirmap()
1111 spinand->spimem, &info); in spinand_create_dirmap()
1115 spinand->dirmaps[plane].wdesc = desc; in spinand_create_dirmap()
1117 info.op_tmpl = *spinand->op_templates.read_cache; in spinand_create_dirmap()
1118 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, in spinand_create_dirmap()
1119 spinand->spimem, &info); in spinand_create_dirmap()
1123 spinand->dirmaps[plane].rdesc = desc; in spinand_create_dirmap()
1125 if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) { in spinand_create_dirmap()
1126 spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc; in spinand_create_dirmap()
1127 spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc; in spinand_create_dirmap()
1132 info.op_tmpl = *spinand->op_templates.update_cache; in spinand_create_dirmap()
1134 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, in spinand_create_dirmap()
1135 spinand->spimem, &info); in spinand_create_dirmap()
1139 spinand->dirmaps[plane].wdesc_ecc = desc; in spinand_create_dirmap()
1141 info.op_tmpl = *spinand->op_templates.read_cache; in spinand_create_dirmap()
1143 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, in spinand_create_dirmap()
1144 spinand->spimem, &info); in spinand_create_dirmap()
1148 spinand->dirmaps[plane].rdesc_ecc = desc; in spinand_create_dirmap()
1155 struct nand_device *nand = spinand_to_nand(spinand); in spinand_create_dirmaps() local
1158 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, in spinand_create_dirmaps()
1159 sizeof(*spinand->dirmaps) * in spinand_create_dirmaps()
1160 nand->memorg.planes_per_lun, in spinand_create_dirmaps()
1162 if (!spinand->dirmaps) in spinand_create_dirmaps()
1163 return -ENOMEM; in spinand_create_dirmaps()
1165 for (i = 0; i < nand->memorg.planes_per_lun; i++) { in spinand_create_dirmaps()
1198 u8 *id = spinand->id.data; in spinand_manufacturer_match()
1206 if (id[0] != manufacturer->id) in spinand_manufacturer_match()
1210 manufacturer->chips, in spinand_manufacturer_match()
1211 manufacturer->nchips, in spinand_manufacturer_match()
1216 spinand->manufacturer = manufacturer; in spinand_manufacturer_match()
1219 return -EOPNOTSUPP; in spinand_manufacturer_match()
1224 u8 *id = spinand->id.data; in spinand_id_detect()
1253 if (spinand->manufacturer->ops->init) in spinand_manufacturer_init()
1254 return spinand->manufacturer->ops->init(spinand); in spinand_manufacturer_init()
1262 if (spinand->manufacturer->ops->cleanup) in spinand_manufacturer_cleanup()
1263 return spinand->manufacturer->ops->cleanup(spinand); in spinand_manufacturer_cleanup()
1270 struct nand_device *nand = spinand_to_nand(spinand); in spinand_select_op_variant() local
1275 for (i = 0; i < variants->nops; i++) { in spinand_select_op_variant()
1276 struct spi_mem_op op = variants->ops[i]; in spinand_select_op_variant()
1281 nbytes = nanddev_per_page_oobsize(nand) + in spinand_select_op_variant()
1282 nanddev_page_size(nand); in spinand_select_op_variant()
1286 ret = spi_mem_adjust_op_size(spinand->spimem, &op); in spinand_select_op_variant()
1290 spi_mem_adjust_op_freq(spinand->spimem, &op); in spinand_select_op_variant()
1292 if (!spi_mem_supports_op(spinand->spimem, &op)) in spinand_select_op_variant()
1295 nbytes -= op.data.nbytes; in spinand_select_op_variant()
1302 best_variant = &variants->ops[i]; in spinand_select_op_variant()
1310 * spinand_match_and_init() - Try to find a match between a device ID and an
1312 * @spinand: SPI NAND object
1313 * @table: SPI NAND device description table
1318 * entry in the SPI NAND description table. If a match is found, the spinand
1329 u8 *id = spinand->id.data; in spinand_match_and_init()
1330 struct nand_device *nand = spinand_to_nand(spinand); in spinand_match_and_init() local
1337 if (rdid_method != info->devid.method) in spinand_match_and_init()
1340 if (memcmp(id + 1, info->devid.id, info->devid.len)) in spinand_match_and_init()
1343 nand->memorg = table[i].memorg; in spinand_match_and_init()
1344 nanddev_set_ecc_requirements(nand, &table[i].eccreq); in spinand_match_and_init()
1345 spinand->eccinfo = table[i].eccinfo; in spinand_match_and_init()
1346 spinand->flags = table[i].flags; in spinand_match_and_init()
1347 spinand->id.len = 1 + table[i].devid.len; in spinand_match_and_init()
1348 spinand->select_target = table[i].select_target; in spinand_match_and_init()
1349 spinand->set_cont_read = table[i].set_cont_read; in spinand_match_and_init()
1350 spinand->fact_otp = &table[i].fact_otp; in spinand_match_and_init()
1351 spinand->user_otp = &table[i].user_otp; in spinand_match_and_init()
1352 spinand->read_retries = table[i].read_retries; in spinand_match_and_init()
1353 spinand->set_read_retry = table[i].set_read_retry; in spinand_match_and_init()
1356 info->op_variants.read_cache); in spinand_match_and_init()
1358 return -ENOTSUPP; in spinand_match_and_init()
1360 spinand->op_templates.read_cache = op; in spinand_match_and_init()
1363 info->op_variants.write_cache); in spinand_match_and_init()
1365 return -ENOTSUPP; in spinand_match_and_init()
1367 spinand->op_templates.write_cache = op; in spinand_match_and_init()
1370 info->op_variants.update_cache); in spinand_match_and_init()
1371 spinand->op_templates.update_cache = op; in spinand_match_and_init()
1376 return -ENOTSUPP; in spinand_match_and_init()
1381 struct device *dev = &spinand->spimem->spi->dev; in spinand_detect()
1382 struct nand_device *nand = spinand_to_nand(spinand); in spinand_detect() local
1392 spinand->id.data); in spinand_detect()
1396 if (nand->memorg.ntargets > 1 && !spinand->select_target) { in spinand_detect()
1398 "SPI NANDs with more than one die must implement ->select_target()\n"); in spinand_detect()
1399 return -EINVAL; in spinand_detect()
1402 dev_info(&spinand->spimem->spi->dev, in spinand_detect()
1403 "%s SPI NAND was found.\n", spinand->manufacturer->name); in spinand_detect()
1404 dev_info(&spinand->spimem->spi->dev, in spinand_detect()
1406 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, in spinand_detect()
1407 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); in spinand_detect()
1414 struct device *dev = &spinand->spimem->spi->dev; in spinand_init_flash()
1415 struct nand_device *nand = spinand_to_nand(spinand); in spinand_init_flash() local
1433 "Failed to initialize the SPI NAND chip (err = %d)\n", in spinand_init_flash()
1439 for (i = 0; i < nand->memorg.ntargets; i++) { in spinand_init_flash()
1473 struct device *dev = &spinand->spimem->spi->dev; in spinand_init()
1475 struct nand_device *nand = mtd_to_nanddev(mtd); in spinand_init() local
1480 * buf passed in spi_mem_op->data.buf be DMA-able. in spinand_init()
1482 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); in spinand_init()
1483 if (!spinand->scratchbuf) in spinand_init()
1484 return -ENOMEM; in spinand_init()
1493 * Memory allocated by devm_ does not guarantee DMA-safe alignment. in spinand_init()
1495 spinand->databuf = kzalloc(nanddev_eraseblock_size(nand), in spinand_init()
1497 if (!spinand->databuf) { in spinand_init()
1498 ret = -ENOMEM; in spinand_init()
1502 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); in spinand_init()
1512 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); in spinand_init()
1516 /* SPI-NAND default ECC engine is on-die */ in spinand_init()
1517 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; in spinand_init()
1518 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; in spinand_init()
1521 ret = nanddev_ecc_engine_init(nand); in spinand_init()
1526 * Continuous read can only be enabled with an on-die ECC engine, so the in spinand_init()
1531 mtd->_read_oob = spinand_mtd_read; in spinand_init()
1532 mtd->_write_oob = spinand_mtd_write; in spinand_init()
1533 mtd->_block_isbad = spinand_mtd_block_isbad; in spinand_init()
1534 mtd->_block_markbad = spinand_mtd_block_markbad; in spinand_init()
1535 mtd->_block_isreserved = spinand_mtd_block_isreserved; in spinand_init()
1536 mtd->_erase = spinand_mtd_erase; in spinand_init()
1537 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; in spinand_init()
1538 mtd->_resume = spinand_mtd_resume; in spinand_init()
1546 if (nand->ecc.engine) { in spinand_init()
1552 mtd->oobavail = ret; in spinand_init()
1555 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; in spinand_init()
1556 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; in spinand_init()
1557 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); in spinand_init()
1570 nanddev_ecc_engine_cleanup(nand); in spinand_init()
1573 nanddev_cleanup(nand); in spinand_init()
1579 kfree(spinand->databuf); in spinand_init()
1580 kfree(spinand->scratchbuf); in spinand_init()
1586 struct nand_device *nand = spinand_to_nand(spinand); in spinand_cleanup() local
1588 nanddev_cleanup(nand); in spinand_cleanup()
1590 kfree(spinand->databuf); in spinand_cleanup()
1591 kfree(spinand->scratchbuf); in spinand_cleanup()
1600 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), in spinand_probe()
1603 return -ENOMEM; in spinand_probe()
1605 spinand->spimem = mem; in spinand_probe()
1607 spinand_set_of_node(spinand, mem->spi->dev.of_node); in spinand_probe()
1608 mutex_init(&spinand->lock); in spinand_probe()
1610 mtd->dev.parent = &mem->spi->dev; in spinand_probe()
1647 { .name = "spi-nand" },
1650 MODULE_DEVICE_TABLE(spi, spinand_ids);
1654 { .compatible = "spi-nand" },
1664 .name = "spi-nand",
1673 MODULE_DESCRIPTION("SPI NAND framework");