Lines Matching defs:spinand

16 #include <linux/mtd/spinand.h>
23 int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
26 spinand->scratchbuf);
29 ret = spi_mem_exec_op(spinand->spimem, &op);
33 *val = *spinand->scratchbuf;
37 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
40 spinand->scratchbuf);
42 *spinand->scratchbuf = val;
43 return spi_mem_exec_op(spinand->spimem, &op);
46 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
48 return spinand_read_reg_op(spinand, REG_STATUS, status);
51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
53 struct nand_device *nand = spinand_to_nand(spinand);
55 if (WARN_ON(spinand->cur_target < 0 ||
56 spinand->cur_target >= nand->memorg.ntargets))
59 *cfg = spinand->cfg_cache[spinand->cur_target];
63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
65 struct nand_device *nand = spinand_to_nand(spinand);
68 if (WARN_ON(spinand->cur_target < 0 ||
69 spinand->cur_target >= nand->memorg.ntargets))
72 if (spinand->cfg_cache[spinand->cur_target] == cfg)
75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
79 spinand->cfg_cache[spinand->cur_target] = cfg;
85 * @spinand: the spinand device
93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
98 ret = spinand_get_cfg(spinand, &cfg);
105 return spinand_set_cfg(spinand, cfg);
110 * @spinand: the spinand device
117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
119 struct nand_device *nand = spinand_to_nand(spinand);
125 if (spinand->cur_target == target)
129 spinand->cur_target = target;
133 ret = spinand->select_target(spinand, target);
137 spinand->cur_target = target;
141 static int spinand_read_cfg(struct spinand_device *spinand)
143 struct nand_device *nand = spinand_to_nand(spinand);
148 ret = spinand_select_target(spinand, target);
156 ret = spinand_read_reg_op(spinand, REG_CFG,
157 &spinand->cfg_cache[target]);
165 static int spinand_init_cfg_cache(struct spinand_device *spinand)
167 struct nand_device *nand = spinand_to_nand(spinand);
168 struct device *dev = &spinand->spimem->spi->dev;
170 spinand->cfg_cache = devm_kcalloc(dev,
172 sizeof(*spinand->cfg_cache),
174 if (!spinand->cfg_cache)
180 static int spinand_init_quad_enable(struct spinand_device *spinand)
184 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
187 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
188 spinand->op_templates.write_cache->data.buswidth == 4 ||
189 spinand->op_templates.update_cache->data.buswidth == 4)
192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
196 static int spinand_ecc_enable(struct spinand_device *spinand,
199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
203 static int spinand_cont_read_enable(struct spinand_device *spinand,
206 return spinand->set_cont_read(spinand, enable);
209 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
211 struct nand_device *nand = spinand_to_nand(spinand);
213 if (spinand->eccinfo.get_status)
214 return spinand->eccinfo.get_status(spinand, status);
264 struct spinand_device *spinand = nand_to_spinand(nand);
278 if (spinand->eccinfo.ooblayout)
279 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
294 struct spinand_device *spinand = nand_to_spinand(nand);
297 if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
300 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
303 return spinand_ecc_enable(spinand, enable);
310 struct spinand_device *spinand = nand_to_spinand(nand);
311 struct mtd_info *mtd = spinand_to_mtd(spinand);
322 ret = spinand_check_ecc_status(spinand, engine_conf->status);
363 int spinand_write_enable_op(struct spinand_device *spinand)
367 return spi_mem_exec_op(spinand->spimem, &op);
370 static int spinand_load_page_op(struct spinand_device *spinand,
373 struct nand_device *nand = spinand_to_nand(spinand);
377 return spi_mem_exec_op(spinand->spimem, &op);
380 static int spinand_read_from_cache_op(struct spinand_device *spinand,
383 struct nand_device *nand = spinand_to_nand(spinand);
384 struct mtd_info *mtd = spinand_to_mtd(spinand);
392 buf = spinand->databuf;
404 buf = spinand->oobbuf;
410 rdesc = spinand->dirmaps[req->pos.plane].rdesc;
412 rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
414 if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
438 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
444 spinand->oobbuf,
448 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
455 static int spinand_write_to_cache_op(struct spinand_device *spinand,
458 struct nand_device *nand = spinand_to_nand(spinand);
459 struct mtd_info *mtd = spinand_to_mtd(spinand);
462 void *buf = spinand->databuf;
476 memset(spinand->databuf, 0xff, nanddev_page_size(nand));
479 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
485 spinand->oobbuf,
489 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
494 wdesc = spinand->dirmaps[req->pos.plane].wdesc;
496 wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
498 if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
517 static int spinand_program_op(struct spinand_device *spinand,
520 struct nand_device *nand = spinand_to_nand(spinand);
524 return spi_mem_exec_op(spinand->spimem, &op);
527 static int spinand_erase_op(struct spinand_device *spinand,
530 struct nand_device *nand = spinand_to_nand(spinand);
534 return spi_mem_exec_op(spinand->spimem, &op);
539 * @spinand: the spinand device
549 int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
553 spinand->scratchbuf);
557 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
564 status = *spinand->scratchbuf;
572 ret = spinand_read_status(spinand, &status);
583 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
587 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
590 ret = spi_mem_exec_op(spinand->spimem, &op);
592 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
597 static int spinand_reset_op(struct spinand_device *spinand)
602 ret = spi_mem_exec_op(spinand->spimem, &op);
606 return spinand_wait(spinand,
612 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
614 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
619 * @spinand: the spinand device
625 int spinand_read_page(struct spinand_device *spinand,
628 struct nand_device *nand = spinand_to_nand(spinand);
636 ret = spinand_load_page_op(spinand, req);
640 ret = spinand_wait(spinand,
649 ret = spinand_read_from_cache_op(spinand, req);
658 * @spinand: the spinand device
664 int spinand_write_page(struct spinand_device *spinand,
667 struct nand_device *nand = spinand_to_nand(spinand);
675 ret = spinand_write_enable_op(spinand);
679 ret = spinand_write_to_cache_op(spinand, req);
683 ret = spinand_program_op(spinand, req);
687 ret = spinand_wait(spinand,
704 struct spinand_device *spinand = mtd_to_spinand(mtd);
722 ret = spinand_select_target(spinand, iter.req.pos.target);
727 ret = spinand_read_page(spinand, &iter.req);
731 if (ret == -EBADMSG && spinand->set_read_retry) {
732 if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
733 ret = spinand->set_read_retry(spinand, retry_mode);
735 spinand->set_read_retry(spinand, 0);
759 ret = spinand->set_read_retry(spinand, retry_mode);
775 struct spinand_device *spinand = mtd_to_spinand(mtd);
781 ret = spinand_cont_read_enable(spinand, true);
794 ret = spinand_select_target(spinand, iter.req.pos.target);
802 ret = spinand_load_page_op(spinand, &iter.req);
806 ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
811 ret = spinand_read_from_cache_op(spinand, &iter.req);
817 ret = spinand_read_status(spinand, &status);
839 spinand_cont_read_enable(spinand, false);
844 static void spinand_cont_read_init(struct spinand_device *spinand)
846 struct nand_device *nand = spinand_to_nand(spinand);
850 if (spinand->set_cont_read &&
853 spinand->cont_read_possible = true;
861 struct spinand_device *spinand = nand_to_spinand(nand);
864 if (!spinand->cont_read_possible)
893 struct spinand_device *spinand = mtd_to_spinand(mtd);
898 mutex_lock(&spinand->lock);
914 mutex_unlock(&spinand->lock);
922 struct spinand_device *spinand = mtd_to_spinand(mtd);
931 mutex_lock(&spinand->lock);
937 ret = spinand_select_target(spinand, iter.req.pos.target);
941 ret = spinand_write_page(spinand, &iter.req);
949 mutex_unlock(&spinand->lock);
956 struct spinand_device *spinand = nand_to_spinand(nand);
967 spinand_select_target(spinand, pos->target);
969 ret = spinand_read_page(spinand, &req);
973 spinand_read_page(spinand, &req);
985 struct spinand_device *spinand = nand_to_spinand(nand);
990 mutex_lock(&spinand->lock);
992 mutex_unlock(&spinand->lock);
999 struct spinand_device *spinand = nand_to_spinand(nand);
1010 ret = spinand_select_target(spinand, pos->target);
1014 ret = spinand_write_page(spinand, &req);
1018 ret = spinand_write_page(spinand, &req);
1027 struct spinand_device *spinand = nand_to_spinand(nand);
1032 mutex_lock(&spinand->lock);
1034 mutex_unlock(&spinand->lock);
1041 struct spinand_device *spinand = nand_to_spinand(nand);
1045 ret = spinand_select_target(spinand, pos->target);
1049 ret = spinand_write_enable_op(spinand);
1053 ret = spinand_erase_op(spinand, pos);
1057 ret = spinand_wait(spinand,
1071 struct spinand_device *spinand = mtd_to_spinand(mtd);
1074 mutex_lock(&spinand->lock);
1076 mutex_unlock(&spinand->lock);
1083 struct spinand_device *spinand = mtd_to_spinand(mtd);
1089 mutex_lock(&spinand->lock);
1091 mutex_unlock(&spinand->lock);
1096 static int spinand_create_dirmap(struct spinand_device *spinand,
1099 struct nand_device *nand = spinand_to_nand(spinand);
1106 if (spinand->cont_read_possible)
1112 info.op_tmpl = *spinand->op_templates.update_cache;
1113 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1114 spinand->spimem, &info);
1118 spinand->dirmaps[plane].wdesc = desc;
1120 info.op_tmpl = *spinand->op_templates.read_cache;
1121 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1122 spinand->spimem, &info);
1126 spinand->dirmaps[plane].rdesc = desc;
1129 spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
1130 spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
1135 info.op_tmpl = *spinand->op_templates.update_cache;
1137 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1138 spinand->spimem, &info);
1142 spinand->dirmaps[plane].wdesc_ecc = desc;
1144 info.op_tmpl = *spinand->op_templates.read_cache;
1146 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
1147 spinand->spimem, &info);
1151 spinand->dirmaps[plane].rdesc_ecc = desc;
1156 static int spinand_create_dirmaps(struct spinand_device *spinand)
1158 struct nand_device *nand = spinand_to_nand(spinand);
1161 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
1162 sizeof(*spinand->dirmaps) *
1165 if (!spinand->dirmaps)
1169 ret = spinand_create_dirmap(spinand, i);
1198 static int spinand_manufacturer_match(struct spinand_device *spinand,
1201 u8 *id = spinand->id.data;
1212 ret = spinand_match_and_init(spinand,
1219 spinand->manufacturer = manufacturer;
1225 static int spinand_id_detect(struct spinand_device *spinand)
1227 u8 *id = spinand->id.data;
1230 ret = spinand_read_id_op(spinand, 0, 0, id);
1233 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
1237 ret = spinand_read_id_op(spinand, 1, 0, id);
1240 ret = spinand_manufacturer_match(spinand,
1245 ret = spinand_read_id_op(spinand, 0, 1, id);
1248 ret = spinand_manufacturer_match(spinand,
1254 static int spinand_manufacturer_init(struct spinand_device *spinand)
1258 if (spinand->manufacturer->ops->init) {
1259 ret = spinand->manufacturer->ops->init(spinand);
1264 if (spinand->configure_chip) {
1265 ret = spinand->configure_chip(spinand);
1273 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
1276 if (spinand->manufacturer->ops->cleanup)
1277 return spinand->manufacturer->ops->cleanup(spinand);
1281 spinand_select_op_variant(struct spinand_device *spinand,
1284 struct nand_device *nand = spinand_to_nand(spinand);
1300 ret = spi_mem_adjust_op_size(spinand->spimem, &op);
1304 spi_mem_adjust_op_freq(spinand->spimem, &op);
1306 if (!spi_mem_supports_op(spinand->spimem, &op))
1311 op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op);
1326 * @spinand: SPI NAND object
1332 * entry in the SPI NAND description table. If a match is found, the spinand
1338 int spinand_match_and_init(struct spinand_device *spinand,
1343 u8 *id = spinand->id.data;
1344 struct nand_device *nand = spinand_to_nand(spinand);
1359 spinand->eccinfo = table[i].eccinfo;
1360 spinand->flags = table[i].flags;
1361 spinand->id.len = 1 + table[i].devid.len;
1362 spinand->select_target = table[i].select_target;
1363 spinand->configure_chip = table[i].configure_chip;
1364 spinand->set_cont_read = table[i].set_cont_read;
1365 spinand->fact_otp = &table[i].fact_otp;
1366 spinand->user_otp = &table[i].user_otp;
1367 spinand->read_retries = table[i].read_retries;
1368 spinand->set_read_retry = table[i].set_read_retry;
1370 op = spinand_select_op_variant(spinand,
1375 spinand->op_templates.read_cache = op;
1377 op = spinand_select_op_variant(spinand,
1382 spinand->op_templates.write_cache = op;
1384 op = spinand_select_op_variant(spinand,
1386 spinand->op_templates.update_cache = op;
1394 static int spinand_detect(struct spinand_device *spinand)
1396 struct device *dev = &spinand->spimem->spi->dev;
1397 struct nand_device *nand = spinand_to_nand(spinand);
1400 ret = spinand_reset_op(spinand);
1404 ret = spinand_id_detect(spinand);
1407 spinand->id.data);
1411 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1417 dev_info(&spinand->spimem->spi->dev,
1418 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1419 dev_info(&spinand->spimem->spi->dev,
1427 static int spinand_init_flash(struct spinand_device *spinand)
1429 struct device *dev = &spinand->spimem->spi->dev;
1430 struct nand_device *nand = spinand_to_nand(spinand);
1433 ret = spinand_read_cfg(spinand);
1437 ret = spinand_init_quad_enable(spinand);
1441 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1445 ret = spinand_manufacturer_init(spinand);
1455 ret = spinand_select_target(spinand, i);
1459 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1465 spinand_manufacturer_cleanup(spinand);
1472 struct spinand_device *spinand = mtd_to_spinand(mtd);
1475 ret = spinand_reset_op(spinand);
1479 ret = spinand_init_flash(spinand);
1483 spinand_ecc_enable(spinand, false);
1486 static int spinand_init(struct spinand_device *spinand)
1488 struct device *dev = &spinand->spimem->spi->dev;
1489 struct mtd_info *mtd = spinand_to_mtd(spinand);
1497 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1498 if (!spinand->scratchbuf)
1501 ret = spinand_detect(spinand);
1510 spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
1512 if (!spinand->databuf) {
1517 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1519 ret = spinand_init_cfg_cache(spinand);
1523 ret = spinand_init_flash(spinand);
1535 spinand_ecc_enable(spinand, false);
1544 spinand_cont_read_init(spinand);
1555 if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
1556 ret = spinand_set_mtd_otp_ops(spinand);
1574 ret = spinand_create_dirmaps(spinand);
1591 spinand_manufacturer_cleanup(spinand);
1594 kfree(spinand->databuf);
1595 kfree(spinand->scratchbuf);
1599 static void spinand_cleanup(struct spinand_device *spinand)
1601 struct nand_device *nand = spinand_to_nand(spinand);
1605 spinand_manufacturer_cleanup(spinand);
1606 kfree(spinand->databuf);
1607 kfree(spinand->scratchbuf);
1612 struct spinand_device *spinand;
1616 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1618 if (!spinand)
1621 spinand->spimem = mem;
1622 spi_mem_set_drvdata(mem, spinand);
1623 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1624 mutex_init(&spinand->lock);
1625 mtd = spinand_to_mtd(spinand);
1628 ret = spinand_init(spinand);
1639 spinand_cleanup(spinand);
1646 struct spinand_device *spinand;
1650 spinand = spi_mem_get_drvdata(mem);
1651 mtd = spinand_to_mtd(spinand);
1657 spinand_cleanup(spinand);