Lines Matching +full:512 +full:- +full:bytes
1 // SPDX-License-Identifier: GPL-2.0-or-later
11 * - Read: Auto Decode
12 * - Write: Auto Encode
13 * - Tested Page Sizes: 2048, 4096
32 #include <linux/dma-mapping.h>
134 if (section >= nand_chip->ecc.steps) in lpc32xx_ooblayout_ecc()
135 return -ERANGE; in lpc32xx_ooblayout_ecc()
137 oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes; in lpc32xx_ooblayout_ecc()
138 oobregion->length = nand_chip->ecc.bytes; in lpc32xx_ooblayout_ecc()
148 if (section >= nand_chip->ecc.steps) in lpc32xx_ooblayout_free()
149 return -ERANGE; in lpc32xx_ooblayout_free()
151 oobregion->offset = 16 * section; in lpc32xx_ooblayout_free()
152 oobregion->length = 16 - nand_chip->ecc.bytes; in lpc32xx_ooblayout_free()
203 int mlcsubpages; /* number of 512bytes-subpages */
209 * Using the PL080 DMA Controller for transferring the 512 byte subpages
211 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
213 * - readl() of 128 x 32 bits in a loop: ~20us
214 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
215 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
220 * Note that the 512 bytes subpage transfer is done directly from/to a
221 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
222 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
236 writel(MLCCMD_RESET, MLC_CMD(host->io_base)); in lpc32xx_nand_setup()
240 clkrate = clk_get_rate(host->clk); in lpc32xx_nand_setup()
246 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); in lpc32xx_nand_setup()
250 writel(tmp, MLC_ICR(host->io_base)); in lpc32xx_nand_setup()
254 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); in lpc32xx_nand_setup()
258 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1); in lpc32xx_nand_setup()
259 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1); in lpc32xx_nand_setup()
260 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1); in lpc32xx_nand_setup()
261 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1); in lpc32xx_nand_setup()
262 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low); in lpc32xx_nand_setup()
263 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1); in lpc32xx_nand_setup()
264 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low); in lpc32xx_nand_setup()
265 writel(tmp, MLC_TIME_REG(host->io_base)); in lpc32xx_nand_setup()
269 MLC_IRQ_MR(host->io_base)); in lpc32xx_nand_setup()
272 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base)); in lpc32xx_nand_setup()
285 writel(cmd, MLC_CMD(host->io_base)); in lpc32xx_nand_cmd_ctrl()
287 writel(cmd, MLC_ADDR(host->io_base)); in lpc32xx_nand_cmd_ctrl()
298 if ((readb(MLC_ISR(host->io_base)) & in lpc32xx_nand_device_ready()
311 sr = readb(MLC_IRQ_SR(host->io_base)); in lpc3xxx_nand_irq()
313 complete(&host->comp_nand); in lpc3xxx_nand_irq()
315 complete(&host->comp_controller); in lpc3xxx_nand_irq()
325 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY) in lpc32xx_waitfunc_nand()
328 wait_for_completion(&host->comp_nand); in lpc32xx_waitfunc_nand()
330 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) { in lpc32xx_waitfunc_nand()
332 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n"); in lpc32xx_waitfunc_nand()
345 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY) in lpc32xx_waitfunc_controller()
348 wait_for_completion(&host->comp_controller); in lpc32xx_waitfunc_controller()
350 while (!(readb(MLC_ISR(host->io_base)) & in lpc32xx_waitfunc_controller()
352 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n"); in lpc32xx_waitfunc_controller()
373 if (host->wp_gpio) in lpc32xx_wp_enable()
374 gpiod_set_value_cansleep(host->wp_gpio, 1); in lpc32xx_wp_enable()
382 if (host->wp_gpio) in lpc32xx_wp_disable()
383 gpiod_set_value_cansleep(host->wp_gpio, 0); in lpc32xx_wp_disable()
400 sg_init_one(&host->sgl, mem, len); in lpc32xx_xmit_dma()
402 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, in lpc32xx_xmit_dma()
405 dev_err(mtd->dev.parent, "Failed to map sg list\n"); in lpc32xx_xmit_dma()
406 return -ENXIO; in lpc32xx_xmit_dma()
408 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, in lpc32xx_xmit_dma()
411 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n"); in lpc32xx_xmit_dma()
415 init_completion(&host->comp_dma); in lpc32xx_xmit_dma()
416 desc->callback = lpc32xx_dma_complete_func; in lpc32xx_xmit_dma()
417 desc->callback_param = &host->comp_dma; in lpc32xx_xmit_dma()
420 dma_async_issue_pending(host->dma_chan); in lpc32xx_xmit_dma()
422 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000)); in lpc32xx_xmit_dma()
424 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, in lpc32xx_xmit_dma()
428 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, in lpc32xx_xmit_dma()
430 return -ENXIO; in lpc32xx_xmit_dma()
439 uint8_t *oobbuf = chip->oob_poi; in lpc32xx_read_page()
449 dma_buf = host->dma_buf; in lpc32xx_read_page()
456 /* For all sub-pages */ in lpc32xx_read_page()
457 for (i = 0; i < host->mlcsubpages; i++) { in lpc32xx_read_page()
459 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base)); in lpc32xx_read_page()
465 mlc_isr = readl(MLC_ISR(host->io_base)); in lpc32xx_read_page()
467 mtd->ecc_stats.failed++; in lpc32xx_read_page()
468 dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__); in lpc32xx_read_page()
470 mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1; in lpc32xx_read_page()
473 /* Read 512 + 16 Bytes */ in lpc32xx_read_page()
475 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, in lpc32xx_read_page()
480 for (j = 0; j < (512 >> 2); j++) { in lpc32xx_read_page()
482 readl(MLC_BUFF(host->io_base)); in lpc32xx_read_page()
488 readl(MLC_BUFF(host->io_base)); in lpc32xx_read_page()
494 memcpy(buf, dma_buf, mtd->writesize); in lpc32xx_read_page()
505 const uint8_t *oobbuf = chip->oob_poi; in lpc32xx_write_page_lowlevel()
511 dma_buf = host->dma_buf; in lpc32xx_write_page_lowlevel()
512 memcpy(dma_buf, buf, mtd->writesize); in lpc32xx_write_page_lowlevel()
517 for (i = 0; i < host->mlcsubpages; i++) { in lpc32xx_write_page_lowlevel()
519 writeb(0x00, MLC_ECC_ENC_REG(host->io_base)); in lpc32xx_write_page_lowlevel()
521 /* Write 512 + 6 Bytes to Buffer */ in lpc32xx_write_page_lowlevel()
523 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, in lpc32xx_write_page_lowlevel()
528 for (j = 0; j < (512 >> 2); j++) { in lpc32xx_write_page_lowlevel()
530 MLC_BUFF(host->io_base)); in lpc32xx_write_page_lowlevel()
534 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base)); in lpc32xx_write_page_lowlevel()
536 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base)); in lpc32xx_write_page_lowlevel()
540 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base)); in lpc32xx_write_page_lowlevel()
553 /* Read whole page - necessary with MLC controller! */ in lpc32xx_read_oob()
554 lpc32xx_read_page(chip, host->dummy_buf, 1, page); in lpc32xx_read_oob()
573 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); in lpc32xx_dma_setup()
576 if (!host->pdata || !host->pdata->dma_filter) { in lpc32xx_dma_setup()
577 dev_err(mtd->dev.parent, "no DMA platform data\n"); in lpc32xx_dma_setup()
578 return -ENOENT; in lpc32xx_dma_setup()
583 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, in lpc32xx_dma_setup()
584 "nand-mlc"); in lpc32xx_dma_setup()
585 if (!host->dma_chan) { in lpc32xx_dma_setup()
586 dev_err(mtd->dev.parent, "Failed to request DMA channel\n"); in lpc32xx_dma_setup()
587 return -EBUSY; in lpc32xx_dma_setup()
592 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x in lpc32xx_dma_setup()
595 host->dma_slave_config.direction = DMA_DEV_TO_MEM; in lpc32xx_dma_setup()
596 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; in lpc32xx_dma_setup()
597 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; in lpc32xx_dma_setup()
598 host->dma_slave_config.src_maxburst = 128; in lpc32xx_dma_setup()
599 host->dma_slave_config.dst_maxburst = 128; in lpc32xx_dma_setup()
601 host->dma_slave_config.device_fc = false; in lpc32xx_dma_setup()
602 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy); in lpc32xx_dma_setup()
603 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy); in lpc32xx_dma_setup()
604 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { in lpc32xx_dma_setup()
605 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n"); in lpc32xx_dma_setup()
611 dma_release_channel(host->dma_chan); in lpc32xx_dma_setup()
612 return -ENXIO; in lpc32xx_dma_setup()
618 struct device_node *np = dev->of_node; in lpc32xx_parse_dt()
624 of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay); in lpc32xx_parse_dt()
625 of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay); in lpc32xx_parse_dt()
626 of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta); in lpc32xx_parse_dt()
627 of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high); in lpc32xx_parse_dt()
628 of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low); in lpc32xx_parse_dt()
629 of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high); in lpc32xx_parse_dt()
630 of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low); in lpc32xx_parse_dt()
632 if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta || in lpc32xx_parse_dt()
633 !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high || in lpc32xx_parse_dt()
634 !ncfg->wr_low) { in lpc32xx_parse_dt()
646 struct device *dev = &host->pdev->dev; in lpc32xx_nand_attach_chip()
648 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) in lpc32xx_nand_attach_chip()
651 host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL); in lpc32xx_nand_attach_chip()
652 if (!host->dma_buf) in lpc32xx_nand_attach_chip()
653 return -ENOMEM; in lpc32xx_nand_attach_chip()
655 host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL); in lpc32xx_nand_attach_chip()
656 if (!host->dummy_buf) in lpc32xx_nand_attach_chip()
657 return -ENOMEM; in lpc32xx_nand_attach_chip()
659 chip->ecc.size = 512; in lpc32xx_nand_attach_chip()
660 chip->ecc.hwctl = lpc32xx_ecc_enable; in lpc32xx_nand_attach_chip()
661 chip->ecc.read_page_raw = lpc32xx_read_page; in lpc32xx_nand_attach_chip()
662 chip->ecc.read_page = lpc32xx_read_page; in lpc32xx_nand_attach_chip()
663 chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel; in lpc32xx_nand_attach_chip()
664 chip->ecc.write_page = lpc32xx_write_page_lowlevel; in lpc32xx_nand_attach_chip()
665 chip->ecc.write_oob = lpc32xx_write_oob; in lpc32xx_nand_attach_chip()
666 chip->ecc.read_oob = lpc32xx_read_oob; in lpc32xx_nand_attach_chip()
667 chip->ecc.strength = 4; in lpc32xx_nand_attach_chip()
668 chip->ecc.bytes = 10; in lpc32xx_nand_attach_chip()
671 host->mlcsubpages = mtd->writesize / 512; in lpc32xx_nand_attach_chip()
692 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); in lpc32xx_nand_probe()
694 return -ENOMEM; in lpc32xx_nand_probe()
696 host->pdev = pdev; in lpc32xx_nand_probe()
698 host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc); in lpc32xx_nand_probe()
699 if (IS_ERR(host->io_base)) in lpc32xx_nand_probe()
700 return PTR_ERR(host->io_base); in lpc32xx_nand_probe()
702 host->io_base_phy = rc->start; in lpc32xx_nand_probe()
704 nand_chip = &host->nand_chip; in lpc32xx_nand_probe()
706 if (pdev->dev.of_node) in lpc32xx_nand_probe()
707 host->ncfg = lpc32xx_parse_dt(&pdev->dev); in lpc32xx_nand_probe()
708 if (!host->ncfg) { in lpc32xx_nand_probe()
709 dev_err(&pdev->dev, in lpc32xx_nand_probe()
711 return -ENOENT; in lpc32xx_nand_probe()
715 host->wp_gpio = gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW); in lpc32xx_nand_probe()
716 res = PTR_ERR_OR_ZERO(host->wp_gpio); in lpc32xx_nand_probe()
718 if (res != -EPROBE_DEFER) in lpc32xx_nand_probe()
719 dev_err(&pdev->dev, "WP GPIO is not available: %d\n", in lpc32xx_nand_probe()
724 gpiod_set_consumer_name(host->wp_gpio, "NAND WP"); in lpc32xx_nand_probe()
726 host->pdata = dev_get_platdata(&pdev->dev); in lpc32xx_nand_probe()
730 nand_set_flash_node(nand_chip, pdev->dev.of_node); in lpc32xx_nand_probe()
731 mtd->dev.parent = &pdev->dev; in lpc32xx_nand_probe()
734 host->clk = clk_get(&pdev->dev, NULL); in lpc32xx_nand_probe()
735 if (IS_ERR(host->clk)) { in lpc32xx_nand_probe()
736 dev_err(&pdev->dev, "Clock initialization failure\n"); in lpc32xx_nand_probe()
737 res = -ENOENT; in lpc32xx_nand_probe()
740 res = clk_prepare_enable(host->clk); in lpc32xx_nand_probe()
744 nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl; in lpc32xx_nand_probe()
745 nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready; in lpc32xx_nand_probe()
746 nand_chip->legacy.chip_delay = 25; /* us */ in lpc32xx_nand_probe()
747 nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base); in lpc32xx_nand_probe()
748 nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base); in lpc32xx_nand_probe()
756 nand_chip->legacy.waitfunc = lpc32xx_waitfunc; in lpc32xx_nand_probe()
758 nand_chip->options = NAND_NO_SUBPAGE_WRITE; in lpc32xx_nand_probe()
759 nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; in lpc32xx_nand_probe()
760 nand_chip->bbt_td = &lpc32xx_nand_bbt; in lpc32xx_nand_probe()
761 nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror; in lpc32xx_nand_probe()
766 res = -EIO; in lpc32xx_nand_probe()
772 readb(MLC_IRQ_SR(host->io_base)); in lpc32xx_nand_probe()
774 init_completion(&host->comp_nand); in lpc32xx_nand_probe()
775 init_completion(&host->comp_controller); in lpc32xx_nand_probe()
777 host->irq = platform_get_irq(pdev, 0); in lpc32xx_nand_probe()
778 if (host->irq < 0) { in lpc32xx_nand_probe()
779 res = -EINVAL; in lpc32xx_nand_probe()
783 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, in lpc32xx_nand_probe()
785 dev_err(&pdev->dev, "Error requesting NAND IRQ\n"); in lpc32xx_nand_probe()
786 res = -ENXIO; in lpc32xx_nand_probe()
794 nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops; in lpc32xx_nand_probe()
799 mtd->name = DRV_NAME; in lpc32xx_nand_probe()
801 res = mtd_device_register(mtd, host->ncfg->parts, in lpc32xx_nand_probe()
802 host->ncfg->num_parts); in lpc32xx_nand_probe()
811 free_irq(host->irq, host); in lpc32xx_nand_probe()
814 dma_release_channel(host->dma_chan); in lpc32xx_nand_probe()
816 clk_disable_unprepare(host->clk); in lpc32xx_nand_probe()
818 clk_put(host->clk); in lpc32xx_nand_probe()
821 gpiod_put(host->wp_gpio); in lpc32xx_nand_probe()
832 struct nand_chip *chip = &host->nand_chip; in lpc32xx_nand_remove()
839 free_irq(host->irq, host); in lpc32xx_nand_remove()
841 dma_release_channel(host->dma_chan); in lpc32xx_nand_remove()
843 clk_disable_unprepare(host->clk); in lpc32xx_nand_remove()
844 clk_put(host->clk); in lpc32xx_nand_remove()
847 gpiod_put(host->wp_gpio); in lpc32xx_nand_remove()
855 /* Re-enable NAND clock */ in lpc32xx_nand_resume()
856 ret = clk_prepare_enable(host->clk); in lpc32xx_nand_resume()
877 clk_disable_unprepare(host->clk); in lpc32xx_nand_suspend()
882 { .compatible = "nxp,lpc3220-mlc" },