19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c76b78d8SArchit Taneja /* 3c76b78d8SArchit Taneja * Copyright (c) 2016, The Linux Foundation. All rights reserved. 4c76b78d8SArchit Taneja */ 5c76b78d8SArchit Taneja 6c76b78d8SArchit Taneja #include <linux/clk.h> 7c76b78d8SArchit Taneja #include <linux/slab.h> 8c76b78d8SArchit Taneja #include <linux/bitops.h> 9c76b78d8SArchit Taneja #include <linux/dma-mapping.h> 10c76b78d8SArchit Taneja #include <linux/dmaengine.h> 11c76b78d8SArchit Taneja #include <linux/module.h> 12d4092d76SBoris Brezillon #include <linux/mtd/rawnand.h> 13c76b78d8SArchit Taneja #include <linux/mtd/partitions.h> 14c76b78d8SArchit Taneja #include <linux/of.h> 15c76b78d8SArchit Taneja #include <linux/of_device.h> 16c76b78d8SArchit Taneja #include <linux/delay.h> 178c4cdce8SAbhishek Sahu #include <linux/dma/qcom_bam_dma.h> 18c76b78d8SArchit Taneja 19c76b78d8SArchit Taneja /* NANDc reg offsets */ 20c76b78d8SArchit Taneja #define NAND_FLASH_CMD 0x00 21c76b78d8SArchit Taneja #define NAND_ADDR0 0x04 22c76b78d8SArchit Taneja #define NAND_ADDR1 0x08 23c76b78d8SArchit Taneja #define NAND_FLASH_CHIP_SELECT 0x0c 24c76b78d8SArchit Taneja #define NAND_EXEC_CMD 0x10 25c76b78d8SArchit Taneja #define NAND_FLASH_STATUS 0x14 26c76b78d8SArchit Taneja #define NAND_BUFFER_STATUS 0x18 27c76b78d8SArchit Taneja #define NAND_DEV0_CFG0 0x20 28c76b78d8SArchit Taneja #define NAND_DEV0_CFG1 0x24 29c76b78d8SArchit Taneja #define NAND_DEV0_ECC_CFG 0x28 300646493eSMd Sadre Alam #define NAND_AUTO_STATUS_EN 0x2c 31c76b78d8SArchit Taneja #define NAND_DEV1_CFG0 0x30 32c76b78d8SArchit Taneja #define NAND_DEV1_CFG1 0x34 33c76b78d8SArchit Taneja #define NAND_READ_ID 0x40 34c76b78d8SArchit Taneja #define NAND_READ_STATUS 0x44 35c76b78d8SArchit Taneja #define NAND_DEV_CMD0 0xa0 36c76b78d8SArchit Taneja #define NAND_DEV_CMD1 0xa4 37c76b78d8SArchit Taneja #define NAND_DEV_CMD2 0xa8 38c76b78d8SArchit Taneja #define NAND_DEV_CMD_VLD 0xac 39c76b78d8SArchit Taneja #define SFLASHC_BURST_CFG 0xe0 40c76b78d8SArchit Taneja #define NAND_ERASED_CW_DETECT_CFG 0xe8 41c76b78d8SArchit Taneja #define NAND_ERASED_CW_DETECT_STATUS 0xec 42c76b78d8SArchit Taneja #define NAND_EBI2_ECC_BUF_CFG 0xf0 43c76b78d8SArchit Taneja #define FLASH_BUF_ACC 0x100 44c76b78d8SArchit Taneja 45c76b78d8SArchit Taneja #define NAND_CTRL 0xf00 46c76b78d8SArchit Taneja #define NAND_VERSION 0xf08 47c76b78d8SArchit Taneja #define NAND_READ_LOCATION_0 0xf20 48c76b78d8SArchit Taneja #define NAND_READ_LOCATION_1 0xf24 4991af95c1SAbhishek Sahu #define NAND_READ_LOCATION_2 0xf28 5091af95c1SAbhishek Sahu #define NAND_READ_LOCATION_3 0xf2c 51503ee5aaSMd Sadre Alam #define NAND_READ_LOCATION_LAST_CW_0 0xf40 52503ee5aaSMd Sadre Alam #define NAND_READ_LOCATION_LAST_CW_1 0xf44 53503ee5aaSMd Sadre Alam #define NAND_READ_LOCATION_LAST_CW_2 0xf48 54503ee5aaSMd Sadre Alam #define NAND_READ_LOCATION_LAST_CW_3 0xf4c 55c76b78d8SArchit Taneja 56c76b78d8SArchit Taneja /* dummy register offsets, used by write_reg_dma */ 57c76b78d8SArchit Taneja #define NAND_DEV_CMD1_RESTORE 0xdead 58c76b78d8SArchit Taneja #define NAND_DEV_CMD_VLD_RESTORE 0xbeef 59c76b78d8SArchit Taneja 60c76b78d8SArchit Taneja /* NAND_FLASH_CMD bits */ 61c76b78d8SArchit Taneja #define PAGE_ACC BIT(4) 62c76b78d8SArchit Taneja #define LAST_PAGE BIT(5) 63c76b78d8SArchit Taneja 64c76b78d8SArchit Taneja /* NAND_FLASH_CHIP_SELECT bits */ 65c76b78d8SArchit Taneja #define NAND_DEV_SEL 0 66c76b78d8SArchit Taneja #define DM_EN BIT(2) 67c76b78d8SArchit Taneja 68c76b78d8SArchit Taneja /* NAND_FLASH_STATUS bits */ 69c76b78d8SArchit Taneja #define FS_OP_ERR BIT(4) 70c76b78d8SArchit Taneja #define FS_READY_BSY_N BIT(5) 71c76b78d8SArchit Taneja #define FS_MPU_ERR BIT(8) 72c76b78d8SArchit Taneja #define FS_DEVICE_STS_ERR BIT(16) 73c76b78d8SArchit Taneja #define FS_DEVICE_WP BIT(23) 74c76b78d8SArchit Taneja 75c76b78d8SArchit Taneja /* NAND_BUFFER_STATUS bits */ 76c76b78d8SArchit Taneja #define BS_UNCORRECTABLE_BIT BIT(8) 77c76b78d8SArchit Taneja #define BS_CORRECTABLE_ERR_MSK 0x1f 78c76b78d8SArchit Taneja 79c76b78d8SArchit Taneja /* NAND_DEVn_CFG0 bits */ 80c76b78d8SArchit Taneja #define DISABLE_STATUS_AFTER_WRITE 4 81c76b78d8SArchit Taneja #define CW_PER_PAGE 6 82c76b78d8SArchit Taneja #define UD_SIZE_BYTES 9 83c76b78d8SArchit Taneja #define ECC_PARITY_SIZE_BYTES_RS 19 84c76b78d8SArchit Taneja #define SPARE_SIZE_BYTES 23 85c76b78d8SArchit Taneja #define NUM_ADDR_CYCLES 27 86c76b78d8SArchit Taneja #define STATUS_BFR_READ 30 87c76b78d8SArchit Taneja #define SET_RD_MODE_AFTER_STATUS 31 88c76b78d8SArchit Taneja 89c76b78d8SArchit Taneja /* NAND_DEVn_CFG0 bits */ 90c76b78d8SArchit Taneja #define DEV0_CFG1_ECC_DISABLE 0 91c76b78d8SArchit Taneja #define WIDE_FLASH 1 92c76b78d8SArchit Taneja #define NAND_RECOVERY_CYCLES 2 93c76b78d8SArchit Taneja #define CS_ACTIVE_BSY 5 94c76b78d8SArchit Taneja #define BAD_BLOCK_BYTE_NUM 6 95c76b78d8SArchit Taneja #define BAD_BLOCK_IN_SPARE_AREA 16 96c76b78d8SArchit Taneja #define WR_RD_BSY_GAP 17 97c76b78d8SArchit Taneja #define ENABLE_BCH_ECC 27 98c76b78d8SArchit Taneja 99c76b78d8SArchit Taneja /* NAND_DEV0_ECC_CFG bits */ 100c76b78d8SArchit Taneja #define ECC_CFG_ECC_DISABLE 0 101c76b78d8SArchit Taneja #define ECC_SW_RESET 1 102c76b78d8SArchit Taneja #define ECC_MODE 4 103c76b78d8SArchit Taneja #define ECC_PARITY_SIZE_BYTES_BCH 8 104c76b78d8SArchit Taneja #define ECC_NUM_DATA_BYTES 16 105c76b78d8SArchit Taneja #define ECC_FORCE_CLK_OPEN 30 106c76b78d8SArchit Taneja 107c76b78d8SArchit Taneja /* NAND_DEV_CMD1 bits */ 108c76b78d8SArchit Taneja #define READ_ADDR 0 109c76b78d8SArchit Taneja 110c76b78d8SArchit Taneja /* NAND_DEV_CMD_VLD bits */ 111d8a9b320SAbhishek Sahu #define READ_START_VLD BIT(0) 112d8a9b320SAbhishek Sahu #define READ_STOP_VLD BIT(1) 113d8a9b320SAbhishek Sahu #define WRITE_START_VLD BIT(2) 114d8a9b320SAbhishek Sahu #define ERASE_START_VLD BIT(3) 115d8a9b320SAbhishek Sahu #define SEQ_READ_START_VLD BIT(4) 116c76b78d8SArchit Taneja 117c76b78d8SArchit Taneja /* NAND_EBI2_ECC_BUF_CFG bits */ 118c76b78d8SArchit Taneja #define NUM_STEPS 0 119c76b78d8SArchit Taneja 120c76b78d8SArchit Taneja /* NAND_ERASED_CW_DETECT_CFG bits */ 121c76b78d8SArchit Taneja #define ERASED_CW_ECC_MASK 1 122c76b78d8SArchit Taneja #define AUTO_DETECT_RES 0 123c76b78d8SArchit Taneja #define MASK_ECC (1 << ERASED_CW_ECC_MASK) 124c76b78d8SArchit Taneja #define RESET_ERASED_DET (1 << AUTO_DETECT_RES) 125c76b78d8SArchit Taneja #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) 126c76b78d8SArchit Taneja #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) 127c76b78d8SArchit Taneja #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) 128c76b78d8SArchit Taneja 129c76b78d8SArchit Taneja /* NAND_ERASED_CW_DETECT_STATUS bits */ 130c76b78d8SArchit Taneja #define PAGE_ALL_ERASED BIT(7) 131c76b78d8SArchit Taneja #define CODEWORD_ALL_ERASED BIT(6) 132c76b78d8SArchit Taneja #define PAGE_ERASED BIT(5) 133c76b78d8SArchit Taneja #define CODEWORD_ERASED BIT(4) 134c76b78d8SArchit Taneja #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) 135c76b78d8SArchit Taneja #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) 136c76b78d8SArchit Taneja 13791af95c1SAbhishek Sahu /* NAND_READ_LOCATION_n bits */ 13891af95c1SAbhishek Sahu #define READ_LOCATION_OFFSET 0 13991af95c1SAbhishek Sahu #define READ_LOCATION_SIZE 16 14091af95c1SAbhishek Sahu #define READ_LOCATION_LAST 31 14191af95c1SAbhishek Sahu 142c76b78d8SArchit Taneja /* Version Mask */ 143c76b78d8SArchit Taneja #define NAND_VERSION_MAJOR_MASK 0xf0000000 144c76b78d8SArchit Taneja #define NAND_VERSION_MAJOR_SHIFT 28 145c76b78d8SArchit Taneja #define NAND_VERSION_MINOR_MASK 0x0fff0000 146c76b78d8SArchit Taneja #define NAND_VERSION_MINOR_SHIFT 16 147c76b78d8SArchit Taneja 148c76b78d8SArchit Taneja /* NAND OP_CMDs */ 14933bf5519SOlof Johansson #define OP_PAGE_READ 0x2 15033bf5519SOlof Johansson #define OP_PAGE_READ_WITH_ECC 0x3 15133bf5519SOlof Johansson #define OP_PAGE_READ_WITH_ECC_SPARE 0x4 152b1209582SManivannan Sadhasivam #define OP_PAGE_READ_ONFI_READ 0x5 15333bf5519SOlof Johansson #define OP_PROGRAM_PAGE 0x6 15433bf5519SOlof Johansson #define OP_PAGE_PROGRAM_WITH_ECC 0x7 15533bf5519SOlof Johansson #define OP_PROGRAM_PAGE_SPARE 0x9 15633bf5519SOlof Johansson #define OP_BLOCK_ERASE 0xa 15733bf5519SOlof Johansson #define OP_FETCH_ID 0xb 15833bf5519SOlof Johansson #define OP_RESET_DEVICE 0xd 159c76b78d8SArchit Taneja 160d8a9b320SAbhishek Sahu /* Default Value for NAND_DEV_CMD_VLD */ 161d8a9b320SAbhishek Sahu #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ 162d8a9b320SAbhishek Sahu ERASE_START_VLD | SEQ_READ_START_VLD) 163d8a9b320SAbhishek Sahu 1649d43f915SAbhishek Sahu /* NAND_CTRL bits */ 1659d43f915SAbhishek Sahu #define BAM_MODE_EN BIT(0) 1669d43f915SAbhishek Sahu 167c76b78d8SArchit Taneja /* 168c76b78d8SArchit Taneja * the NAND controller performs reads/writes with ECC in 516 byte chunks. 169c76b78d8SArchit Taneja * the driver calls the chunks 'step' or 'codeword' interchangeably 170c76b78d8SArchit Taneja */ 171c76b78d8SArchit Taneja #define NANDC_STEP_SIZE 512 172c76b78d8SArchit Taneja 173c76b78d8SArchit Taneja /* 174c76b78d8SArchit Taneja * the largest page size we support is 8K, this will have 16 steps/codewords 175c76b78d8SArchit Taneja * of 512 bytes each 176c76b78d8SArchit Taneja */ 177c76b78d8SArchit Taneja #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE) 178c76b78d8SArchit Taneja 179c76b78d8SArchit Taneja /* we read at most 3 registers per codeword scan */ 180c76b78d8SArchit Taneja #define MAX_REG_RD (3 * MAX_NUM_STEPS) 181c76b78d8SArchit Taneja 182c76b78d8SArchit Taneja /* ECC modes supported by the controller */ 183c76b78d8SArchit Taneja #define ECC_NONE BIT(0) 184c76b78d8SArchit Taneja #define ECC_RS_4BIT BIT(1) 185c76b78d8SArchit Taneja #define ECC_BCH_4BIT BIT(2) 186c76b78d8SArchit Taneja #define ECC_BCH_8BIT BIT(3) 187c76b78d8SArchit Taneja 188e7a307f2SMd Sadre Alam #define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \ 189e7a307f2SMd Sadre Alam nandc_set_reg(chip, reg, \ 190622d3fc8SMd Sadre Alam ((cw_offset) << READ_LOCATION_OFFSET) | \ 191622d3fc8SMd Sadre Alam ((read_size) << READ_LOCATION_SIZE) | \ 192622d3fc8SMd Sadre Alam ((is_last_read_loc) << READ_LOCATION_LAST)) 19391af95c1SAbhishek Sahu 194503ee5aaSMd Sadre Alam #define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \ 195503ee5aaSMd Sadre Alam nandc_set_reg(chip, reg, \ 196503ee5aaSMd Sadre Alam ((cw_offset) << READ_LOCATION_OFFSET) | \ 197503ee5aaSMd Sadre Alam ((read_size) << READ_LOCATION_SIZE) | \ 198503ee5aaSMd Sadre Alam ((is_last_read_loc) << READ_LOCATION_LAST)) 199cc409b9aSAbhishek Sahu /* 200cc409b9aSAbhishek Sahu * Returns the actual register address for all NAND_DEV_ registers 201cc409b9aSAbhishek Sahu * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD) 202cc409b9aSAbhishek Sahu */ 203cc409b9aSAbhishek Sahu #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) 204cc409b9aSAbhishek Sahu 2058d6b6d7eSAbhishek Sahu /* Returns the NAND register physical address */ 2068d6b6d7eSAbhishek Sahu #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) 2078d6b6d7eSAbhishek Sahu 2088d6b6d7eSAbhishek Sahu /* Returns the dma address for reg read buffer */ 2098d6b6d7eSAbhishek Sahu #define reg_buf_dma_addr(chip, vaddr) \ 2108d6b6d7eSAbhishek Sahu ((chip)->reg_read_dma + \ 2118d6b6d7eSAbhishek Sahu ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) 2128d6b6d7eSAbhishek Sahu 2138c4cdce8SAbhishek Sahu #define QPIC_PER_CW_CMD_ELEMENTS 32 214cb80f114SAbhishek Sahu #define QPIC_PER_CW_CMD_SGL 32 215cb80f114SAbhishek Sahu #define QPIC_PER_CW_DATA_SGL 8 216cb80f114SAbhishek Sahu 2176f20070dSAbhishek Sahu #define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000) 2186f20070dSAbhishek Sahu 219cb80f114SAbhishek Sahu /* 22067e830aeSAbhishek Sahu * Flags used in DMA descriptor preparation helper functions 22167e830aeSAbhishek Sahu * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma) 22267e830aeSAbhishek Sahu */ 22367e830aeSAbhishek Sahu /* Don't set the EOT in current tx BAM sgl */ 22467e830aeSAbhishek Sahu #define NAND_BAM_NO_EOT BIT(0) 22567e830aeSAbhishek Sahu /* Set the NWD flag in current BAM sgl */ 22667e830aeSAbhishek Sahu #define NAND_BAM_NWD BIT(1) 22767e830aeSAbhishek Sahu /* Finish writing in the current BAM sgl and start writing in another BAM sgl */ 22867e830aeSAbhishek Sahu #define NAND_BAM_NEXT_SGL BIT(2) 229a86b9c4fSAbhishek Sahu /* 230a86b9c4fSAbhishek Sahu * Erased codeword status is being used two times in single transfer so this 231a86b9c4fSAbhishek Sahu * flag will determine the current value of erased codeword status register 232a86b9c4fSAbhishek Sahu */ 233a86b9c4fSAbhishek Sahu #define NAND_ERASED_CW_SET BIT(4) 23467e830aeSAbhishek Sahu 23567e830aeSAbhishek Sahu /* 236cb80f114SAbhishek Sahu * This data type corresponds to the BAM transaction which will be used for all 237cb80f114SAbhishek Sahu * NAND transfers. 2388c4cdce8SAbhishek Sahu * @bam_ce - the array of BAM command elements 239cb80f114SAbhishek Sahu * @cmd_sgl - sgl for NAND BAM command pipe 240cb80f114SAbhishek Sahu * @data_sgl - sgl for NAND BAM consumer/producer pipe 2418c4cdce8SAbhishek Sahu * @bam_ce_pos - the index in bam_ce which is available for next sgl 2428c4cdce8SAbhishek Sahu * @bam_ce_start - the index in bam_ce which marks the start position ce 2438c4cdce8SAbhishek Sahu * for current sgl. It will be used for size calculation 2448c4cdce8SAbhishek Sahu * for current sgl 245cb80f114SAbhishek Sahu * @cmd_sgl_pos - current index in command sgl. 246cb80f114SAbhishek Sahu * @cmd_sgl_start - start index in command sgl. 247cb80f114SAbhishek Sahu * @tx_sgl_pos - current index in data sgl for tx. 248cb80f114SAbhishek Sahu * @tx_sgl_start - start index in data sgl for tx. 249cb80f114SAbhishek Sahu * @rx_sgl_pos - current index in data sgl for rx. 250cb80f114SAbhishek Sahu * @rx_sgl_start - start index in data sgl for rx. 2516f20070dSAbhishek Sahu * @wait_second_completion - wait for second DMA desc completion before making 2526f20070dSAbhishek Sahu * the NAND transfer completion. 2536f20070dSAbhishek Sahu * @txn_done - completion for NAND transfer. 2546f20070dSAbhishek Sahu * @last_data_desc - last DMA desc in data channel (tx/rx). 2556f20070dSAbhishek Sahu * @last_cmd_desc - last DMA desc in command channel. 256cb80f114SAbhishek Sahu */ 257cb80f114SAbhishek Sahu struct bam_transaction { 2588c4cdce8SAbhishek Sahu struct bam_cmd_element *bam_ce; 259cb80f114SAbhishek Sahu struct scatterlist *cmd_sgl; 260cb80f114SAbhishek Sahu struct scatterlist *data_sgl; 2618c4cdce8SAbhishek Sahu u32 bam_ce_pos; 2628c4cdce8SAbhishek Sahu u32 bam_ce_start; 263cb80f114SAbhishek Sahu u32 cmd_sgl_pos; 264cb80f114SAbhishek Sahu u32 cmd_sgl_start; 265cb80f114SAbhishek Sahu u32 tx_sgl_pos; 266cb80f114SAbhishek Sahu u32 tx_sgl_start; 267cb80f114SAbhishek Sahu u32 rx_sgl_pos; 268cb80f114SAbhishek Sahu u32 rx_sgl_start; 2696f20070dSAbhishek Sahu bool wait_second_completion; 2706f20070dSAbhishek Sahu struct completion txn_done; 2716f20070dSAbhishek Sahu struct dma_async_tx_descriptor *last_data_desc; 2726f20070dSAbhishek Sahu struct dma_async_tx_descriptor *last_cmd_desc; 273cb80f114SAbhishek Sahu }; 274cb80f114SAbhishek Sahu 275381dd245SAbhishek Sahu /* 276381dd245SAbhishek Sahu * This data type corresponds to the nand dma descriptor 277381dd245SAbhishek Sahu * @list - list for desc_info 278381dd245SAbhishek Sahu * @dir - DMA transfer direction 279381dd245SAbhishek Sahu * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by 280381dd245SAbhishek Sahu * ADM 281381dd245SAbhishek Sahu * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM 282381dd245SAbhishek Sahu * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM 283381dd245SAbhishek Sahu * @dma_desc - low level DMA engine descriptor 284381dd245SAbhishek Sahu */ 285c76b78d8SArchit Taneja struct desc_info { 286c76b78d8SArchit Taneja struct list_head node; 287c76b78d8SArchit Taneja 288c76b78d8SArchit Taneja enum dma_data_direction dir; 289381dd245SAbhishek Sahu union { 290381dd245SAbhishek Sahu struct scatterlist adm_sgl; 291381dd245SAbhishek Sahu struct { 292381dd245SAbhishek Sahu struct scatterlist *bam_sgl; 293381dd245SAbhishek Sahu int sgl_cnt; 294381dd245SAbhishek Sahu }; 295381dd245SAbhishek Sahu }; 296c76b78d8SArchit Taneja struct dma_async_tx_descriptor *dma_desc; 297c76b78d8SArchit Taneja }; 298c76b78d8SArchit Taneja 299c76b78d8SArchit Taneja /* 300c76b78d8SArchit Taneja * holds the current register values that we want to write. acts as a contiguous 301c76b78d8SArchit Taneja * chunk of memory which we use to write the controller registers through DMA. 302c76b78d8SArchit Taneja */ 303c76b78d8SArchit Taneja struct nandc_regs { 304c76b78d8SArchit Taneja __le32 cmd; 305c76b78d8SArchit Taneja __le32 addr0; 306c76b78d8SArchit Taneja __le32 addr1; 307c76b78d8SArchit Taneja __le32 chip_sel; 308c76b78d8SArchit Taneja __le32 exec; 309c76b78d8SArchit Taneja 310c76b78d8SArchit Taneja __le32 cfg0; 311c76b78d8SArchit Taneja __le32 cfg1; 312c76b78d8SArchit Taneja __le32 ecc_bch_cfg; 313c76b78d8SArchit Taneja 314c76b78d8SArchit Taneja __le32 clrflashstatus; 315c76b78d8SArchit Taneja __le32 clrreadstatus; 316c76b78d8SArchit Taneja 317c76b78d8SArchit Taneja __le32 cmd1; 318c76b78d8SArchit Taneja __le32 vld; 319c76b78d8SArchit Taneja 320c76b78d8SArchit Taneja __le32 orig_cmd1; 321c76b78d8SArchit Taneja __le32 orig_vld; 322c76b78d8SArchit Taneja 323c76b78d8SArchit Taneja __le32 ecc_buf_cfg; 32491af95c1SAbhishek Sahu __le32 read_location0; 32591af95c1SAbhishek Sahu __le32 read_location1; 32691af95c1SAbhishek Sahu __le32 read_location2; 32791af95c1SAbhishek Sahu __le32 read_location3; 328503ee5aaSMd Sadre Alam __le32 read_location_last0; 329503ee5aaSMd Sadre Alam __le32 read_location_last1; 330503ee5aaSMd Sadre Alam __le32 read_location_last2; 331503ee5aaSMd Sadre Alam __le32 read_location_last3; 33291af95c1SAbhishek Sahu 333a86b9c4fSAbhishek Sahu __le32 erased_cw_detect_cfg_clr; 334a86b9c4fSAbhishek Sahu __le32 erased_cw_detect_cfg_set; 335c76b78d8SArchit Taneja }; 336c76b78d8SArchit Taneja 337c76b78d8SArchit Taneja /* 338c76b78d8SArchit Taneja * NAND controller data struct 339c76b78d8SArchit Taneja * 340c76b78d8SArchit Taneja * @controller: base controller structure 341c76b78d8SArchit Taneja * @host_list: list containing all the chips attached to the 342c76b78d8SArchit Taneja * controller 343c76b78d8SArchit Taneja * @dev: parent device 344c76b78d8SArchit Taneja * @base: MMIO base 3458d6b6d7eSAbhishek Sahu * @base_phys: physical base address of controller registers 3468d6b6d7eSAbhishek Sahu * @base_dma: dma base address of controller registers 347c76b78d8SArchit Taneja * @core_clk: controller clock 348c76b78d8SArchit Taneja * @aon_clk: another controller clock 349c76b78d8SArchit Taneja * 350c76b78d8SArchit Taneja * @chan: dma channel 351c76b78d8SArchit Taneja * @cmd_crci: ADM DMA CRCI for command flow control 352c76b78d8SArchit Taneja * @data_crci: ADM DMA CRCI for data flow control 353c76b78d8SArchit Taneja * @desc_list: DMA descriptor list (list of desc_infos) 354c76b78d8SArchit Taneja * 355c76b78d8SArchit Taneja * @data_buffer: our local DMA buffer for page read/writes, 356c76b78d8SArchit Taneja * used when we can't use the buffer provided 357c76b78d8SArchit Taneja * by upper layers directly 358716bbbabSBoris Brezillon * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf 359716bbbabSBoris Brezillon * functions 360c76b78d8SArchit Taneja * @reg_read_buf: local buffer for reading back registers via DMA 3616192ff7aSAbhishek Sahu * @reg_read_dma: contains dma address for register read buffer 362c76b78d8SArchit Taneja * @reg_read_pos: marker for data read in reg_read_buf 363c76b78d8SArchit Taneja * 364c76b78d8SArchit Taneja * @regs: a contiguous chunk of memory for DMA register 365c76b78d8SArchit Taneja * writes. contains the register values to be 366c76b78d8SArchit Taneja * written to controller 367c76b78d8SArchit Taneja * @cmd1/vld: some fixed controller register values 36858f1f22aSAbhishek Sahu * @props: properties of current NAND controller, 369c76b78d8SArchit Taneja * initialized via DT match data 370cb80f114SAbhishek Sahu * @max_cwperpage: maximum QPIC codewords required. calculated 371cb80f114SAbhishek Sahu * from all connected NAND devices pagesize 372c76b78d8SArchit Taneja */ 373c76b78d8SArchit Taneja struct qcom_nand_controller { 3747da45139SMiquel Raynal struct nand_controller controller; 375c76b78d8SArchit Taneja struct list_head host_list; 376c76b78d8SArchit Taneja 377c76b78d8SArchit Taneja struct device *dev; 378c76b78d8SArchit Taneja 379c76b78d8SArchit Taneja void __iomem *base; 3808d6b6d7eSAbhishek Sahu phys_addr_t base_phys; 381c76b78d8SArchit Taneja dma_addr_t base_dma; 382c76b78d8SArchit Taneja 383c76b78d8SArchit Taneja struct clk *core_clk; 384c76b78d8SArchit Taneja struct clk *aon_clk; 385c76b78d8SArchit Taneja 386497d7d85SAbhishek Sahu union { 387497d7d85SAbhishek Sahu /* will be used only by QPIC for BAM DMA */ 388497d7d85SAbhishek Sahu struct { 389497d7d85SAbhishek Sahu struct dma_chan *tx_chan; 390497d7d85SAbhishek Sahu struct dma_chan *rx_chan; 391497d7d85SAbhishek Sahu struct dma_chan *cmd_chan; 392497d7d85SAbhishek Sahu }; 393497d7d85SAbhishek Sahu 394497d7d85SAbhishek Sahu /* will be used only by EBI2 for ADM DMA */ 395497d7d85SAbhishek Sahu struct { 396c76b78d8SArchit Taneja struct dma_chan *chan; 397c76b78d8SArchit Taneja unsigned int cmd_crci; 398c76b78d8SArchit Taneja unsigned int data_crci; 399497d7d85SAbhishek Sahu }; 400497d7d85SAbhishek Sahu }; 401497d7d85SAbhishek Sahu 402c76b78d8SArchit Taneja struct list_head desc_list; 403cb80f114SAbhishek Sahu struct bam_transaction *bam_txn; 404c76b78d8SArchit Taneja 405c76b78d8SArchit Taneja u8 *data_buffer; 406c76b78d8SArchit Taneja int buf_size; 407c76b78d8SArchit Taneja int buf_count; 408c76b78d8SArchit Taneja int buf_start; 409cb80f114SAbhishek Sahu unsigned int max_cwperpage; 410c76b78d8SArchit Taneja 411c76b78d8SArchit Taneja __le32 *reg_read_buf; 4126192ff7aSAbhishek Sahu dma_addr_t reg_read_dma; 413c76b78d8SArchit Taneja int reg_read_pos; 414c76b78d8SArchit Taneja 415c76b78d8SArchit Taneja struct nandc_regs *regs; 416c76b78d8SArchit Taneja 417c76b78d8SArchit Taneja u32 cmd1, vld; 41858f1f22aSAbhishek Sahu const struct qcom_nandc_props *props; 419c76b78d8SArchit Taneja }; 420c76b78d8SArchit Taneja 421c76b78d8SArchit Taneja /* 422c76b78d8SArchit Taneja * NAND chip structure 423c76b78d8SArchit Taneja * 424c76b78d8SArchit Taneja * @chip: base NAND chip structure 425c76b78d8SArchit Taneja * @node: list node to add itself to host_list in 426c76b78d8SArchit Taneja * qcom_nand_controller 427c76b78d8SArchit Taneja * 428c76b78d8SArchit Taneja * @cs: chip select value for this chip 429c76b78d8SArchit Taneja * @cw_size: the number of bytes in a single step/codeword 430c76b78d8SArchit Taneja * of a page, consisting of all data, ecc, spare 431c76b78d8SArchit Taneja * and reserved bytes 432c76b78d8SArchit Taneja * @cw_data: the number of bytes within a codeword protected 433c76b78d8SArchit Taneja * by ECC 434c76b78d8SArchit Taneja * @use_ecc: request the controller to use ECC for the 435c76b78d8SArchit Taneja * upcoming read/write 436c76b78d8SArchit Taneja * @bch_enabled: flag to tell whether BCH ECC mode is used 437c76b78d8SArchit Taneja * @ecc_bytes_hw: ECC bytes used by controller hardware for this 438c76b78d8SArchit Taneja * chip 439c76b78d8SArchit Taneja * @status: value to be returned if NAND_CMD_STATUS command 440c76b78d8SArchit Taneja * is executed 441c76b78d8SArchit Taneja * @last_command: keeps track of last command on this chip. used 442c76b78d8SArchit Taneja * for reading correct status 443c76b78d8SArchit Taneja * 444c76b78d8SArchit Taneja * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for 445c76b78d8SArchit Taneja * ecc/non-ecc mode for the current nand flash 446c76b78d8SArchit Taneja * device 447c76b78d8SArchit Taneja */ 448c76b78d8SArchit Taneja struct qcom_nand_host { 449c76b78d8SArchit Taneja struct nand_chip chip; 450c76b78d8SArchit Taneja struct list_head node; 451c76b78d8SArchit Taneja 452c76b78d8SArchit Taneja int cs; 453c76b78d8SArchit Taneja int cw_size; 454c76b78d8SArchit Taneja int cw_data; 455c76b78d8SArchit Taneja bool use_ecc; 456c76b78d8SArchit Taneja bool bch_enabled; 457c76b78d8SArchit Taneja int ecc_bytes_hw; 458c76b78d8SArchit Taneja int spare_bytes; 459c76b78d8SArchit Taneja int bbm_size; 460c76b78d8SArchit Taneja u8 status; 461c76b78d8SArchit Taneja int last_command; 462c76b78d8SArchit Taneja 463c76b78d8SArchit Taneja u32 cfg0, cfg1; 464c76b78d8SArchit Taneja u32 cfg0_raw, cfg1_raw; 465c76b78d8SArchit Taneja u32 ecc_buf_cfg; 466c76b78d8SArchit Taneja u32 ecc_bch_cfg; 467c76b78d8SArchit Taneja u32 clrflashstatus; 468c76b78d8SArchit Taneja u32 clrreadstatus; 469c76b78d8SArchit Taneja }; 470c76b78d8SArchit Taneja 47158f1f22aSAbhishek Sahu /* 47258f1f22aSAbhishek Sahu * This data type corresponds to the NAND controller properties which varies 47358f1f22aSAbhishek Sahu * among different NAND controllers. 47458f1f22aSAbhishek Sahu * @ecc_modes - ecc mode for NAND 4758c5d5d6aSAbhishek Sahu * @is_bam - whether NAND controller is using BAM 476443440ccSSivaprakash Murugesan * @is_qpic - whether NAND CTRL is part of qpic IP 477b1209582SManivannan Sadhasivam * @qpic_v2 - flag to indicate QPIC IP version 2 478cc409b9aSAbhishek Sahu * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset 47958f1f22aSAbhishek Sahu */ 48058f1f22aSAbhishek Sahu struct qcom_nandc_props { 48158f1f22aSAbhishek Sahu u32 ecc_modes; 4828c5d5d6aSAbhishek Sahu bool is_bam; 483443440ccSSivaprakash Murugesan bool is_qpic; 484b1209582SManivannan Sadhasivam bool qpic_v2; 485cc409b9aSAbhishek Sahu u32 dev_cmd_reg_start; 48658f1f22aSAbhishek Sahu }; 48758f1f22aSAbhishek Sahu 488cb80f114SAbhishek Sahu /* Frees the BAM transaction memory */ 489cb80f114SAbhishek Sahu static void free_bam_transaction(struct qcom_nand_controller *nandc) 490cb80f114SAbhishek Sahu { 491cb80f114SAbhishek Sahu struct bam_transaction *bam_txn = nandc->bam_txn; 492cb80f114SAbhishek Sahu 493cb80f114SAbhishek Sahu devm_kfree(nandc->dev, bam_txn); 494cb80f114SAbhishek Sahu } 495cb80f114SAbhishek Sahu 496cb80f114SAbhishek Sahu /* Allocates and Initializes the BAM transaction */ 497cb80f114SAbhishek Sahu static struct bam_transaction * 498cb80f114SAbhishek Sahu alloc_bam_transaction(struct qcom_nand_controller *nandc) 499cb80f114SAbhishek Sahu { 500cb80f114SAbhishek Sahu struct bam_transaction *bam_txn; 501cb80f114SAbhishek Sahu size_t bam_txn_size; 502cb80f114SAbhishek Sahu unsigned int num_cw = nandc->max_cwperpage; 503cb80f114SAbhishek Sahu void *bam_txn_buf; 504cb80f114SAbhishek Sahu 505cb80f114SAbhishek Sahu bam_txn_size = 506cb80f114SAbhishek Sahu sizeof(*bam_txn) + num_cw * 5078c4cdce8SAbhishek Sahu ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + 5088c4cdce8SAbhishek Sahu (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + 509cb80f114SAbhishek Sahu (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); 510cb80f114SAbhishek Sahu 511cb80f114SAbhishek Sahu bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); 512cb80f114SAbhishek Sahu if (!bam_txn_buf) 513cb80f114SAbhishek Sahu return NULL; 514cb80f114SAbhishek Sahu 515cb80f114SAbhishek Sahu bam_txn = bam_txn_buf; 516cb80f114SAbhishek Sahu bam_txn_buf += sizeof(*bam_txn); 517cb80f114SAbhishek Sahu 5188c4cdce8SAbhishek Sahu bam_txn->bam_ce = bam_txn_buf; 5198c4cdce8SAbhishek Sahu bam_txn_buf += 5208c4cdce8SAbhishek Sahu sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; 5218c4cdce8SAbhishek Sahu 522cb80f114SAbhishek Sahu bam_txn->cmd_sgl = bam_txn_buf; 523cb80f114SAbhishek Sahu bam_txn_buf += 524cb80f114SAbhishek Sahu sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; 525cb80f114SAbhishek Sahu 526cb80f114SAbhishek Sahu bam_txn->data_sgl = bam_txn_buf; 527cb80f114SAbhishek Sahu 5286f20070dSAbhishek Sahu init_completion(&bam_txn->txn_done); 5296f20070dSAbhishek Sahu 530cb80f114SAbhishek Sahu return bam_txn; 531cb80f114SAbhishek Sahu } 532cb80f114SAbhishek Sahu 5334e2f6c52SAbhishek Sahu /* Clears the BAM transaction indexes */ 5344e2f6c52SAbhishek Sahu static void clear_bam_transaction(struct qcom_nand_controller *nandc) 5354e2f6c52SAbhishek Sahu { 5364e2f6c52SAbhishek Sahu struct bam_transaction *bam_txn = nandc->bam_txn; 5374e2f6c52SAbhishek Sahu 5384e2f6c52SAbhishek Sahu if (!nandc->props->is_bam) 5394e2f6c52SAbhishek Sahu return; 5404e2f6c52SAbhishek Sahu 5418c4cdce8SAbhishek Sahu bam_txn->bam_ce_pos = 0; 5428c4cdce8SAbhishek Sahu bam_txn->bam_ce_start = 0; 5434e2f6c52SAbhishek Sahu bam_txn->cmd_sgl_pos = 0; 5444e2f6c52SAbhishek Sahu bam_txn->cmd_sgl_start = 0; 5454e2f6c52SAbhishek Sahu bam_txn->tx_sgl_pos = 0; 5464e2f6c52SAbhishek Sahu bam_txn->tx_sgl_start = 0; 5474e2f6c52SAbhishek Sahu bam_txn->rx_sgl_pos = 0; 5484e2f6c52SAbhishek Sahu bam_txn->rx_sgl_start = 0; 5496f20070dSAbhishek Sahu bam_txn->last_data_desc = NULL; 5506f20070dSAbhishek Sahu bam_txn->wait_second_completion = false; 5514e2f6c52SAbhishek Sahu 5524e2f6c52SAbhishek Sahu sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * 5534e2f6c52SAbhishek Sahu QPIC_PER_CW_CMD_SGL); 5544e2f6c52SAbhishek Sahu sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * 5554e2f6c52SAbhishek Sahu QPIC_PER_CW_DATA_SGL); 5566f20070dSAbhishek Sahu 5576f20070dSAbhishek Sahu reinit_completion(&bam_txn->txn_done); 5586f20070dSAbhishek Sahu } 5596f20070dSAbhishek Sahu 5606f20070dSAbhishek Sahu /* Callback for DMA descriptor completion */ 5616f20070dSAbhishek Sahu static void qpic_bam_dma_done(void *data) 5626f20070dSAbhishek Sahu { 5636f20070dSAbhishek Sahu struct bam_transaction *bam_txn = data; 5646f20070dSAbhishek Sahu 5656f20070dSAbhishek Sahu /* 5666f20070dSAbhishek Sahu * In case of data transfer with NAND, 2 callbacks will be generated. 5676f20070dSAbhishek Sahu * One for command channel and another one for data channel. 5686f20070dSAbhishek Sahu * If current transaction has data descriptors 5696f20070dSAbhishek Sahu * (i.e. wait_second_completion is true), then set this to false 5706f20070dSAbhishek Sahu * and wait for second DMA descriptor completion. 5716f20070dSAbhishek Sahu */ 5726f20070dSAbhishek Sahu if (bam_txn->wait_second_completion) 5736f20070dSAbhishek Sahu bam_txn->wait_second_completion = false; 5746f20070dSAbhishek Sahu else 5756f20070dSAbhishek Sahu complete(&bam_txn->txn_done); 5764e2f6c52SAbhishek Sahu } 5774e2f6c52SAbhishek Sahu 578c76b78d8SArchit Taneja static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) 579c76b78d8SArchit Taneja { 580c76b78d8SArchit Taneja return container_of(chip, struct qcom_nand_host, chip); 581c76b78d8SArchit Taneja } 582c76b78d8SArchit Taneja 583c76b78d8SArchit Taneja static inline struct qcom_nand_controller * 584c76b78d8SArchit Taneja get_qcom_nand_controller(struct nand_chip *chip) 585c76b78d8SArchit Taneja { 586c76b78d8SArchit Taneja return container_of(chip->controller, struct qcom_nand_controller, 587c76b78d8SArchit Taneja controller); 588c76b78d8SArchit Taneja } 589c76b78d8SArchit Taneja 590c76b78d8SArchit Taneja static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset) 591c76b78d8SArchit Taneja { 592c76b78d8SArchit Taneja return ioread32(nandc->base + offset); 593c76b78d8SArchit Taneja } 594c76b78d8SArchit Taneja 595c76b78d8SArchit Taneja static inline void nandc_write(struct qcom_nand_controller *nandc, int offset, 596c76b78d8SArchit Taneja u32 val) 597c76b78d8SArchit Taneja { 598c76b78d8SArchit Taneja iowrite32(val, nandc->base + offset); 599c76b78d8SArchit Taneja } 600c76b78d8SArchit Taneja 6016192ff7aSAbhishek Sahu static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc, 6026192ff7aSAbhishek Sahu bool is_cpu) 6036192ff7aSAbhishek Sahu { 6046192ff7aSAbhishek Sahu if (!nandc->props->is_bam) 6056192ff7aSAbhishek Sahu return; 6066192ff7aSAbhishek Sahu 6076192ff7aSAbhishek Sahu if (is_cpu) 6086192ff7aSAbhishek Sahu dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma, 6096192ff7aSAbhishek Sahu MAX_REG_RD * 6106192ff7aSAbhishek Sahu sizeof(*nandc->reg_read_buf), 6116192ff7aSAbhishek Sahu DMA_FROM_DEVICE); 6126192ff7aSAbhishek Sahu else 6136192ff7aSAbhishek Sahu dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma, 6146192ff7aSAbhishek Sahu MAX_REG_RD * 6156192ff7aSAbhishek Sahu sizeof(*nandc->reg_read_buf), 6166192ff7aSAbhishek Sahu DMA_FROM_DEVICE); 6176192ff7aSAbhishek Sahu } 6186192ff7aSAbhishek Sahu 619c76b78d8SArchit Taneja static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset) 620c76b78d8SArchit Taneja { 621c76b78d8SArchit Taneja switch (offset) { 622c76b78d8SArchit Taneja case NAND_FLASH_CMD: 623c76b78d8SArchit Taneja return ®s->cmd; 624c76b78d8SArchit Taneja case NAND_ADDR0: 625c76b78d8SArchit Taneja return ®s->addr0; 626c76b78d8SArchit Taneja case NAND_ADDR1: 627c76b78d8SArchit Taneja return ®s->addr1; 628c76b78d8SArchit Taneja case NAND_FLASH_CHIP_SELECT: 629c76b78d8SArchit Taneja return ®s->chip_sel; 630c76b78d8SArchit Taneja case NAND_EXEC_CMD: 631c76b78d8SArchit Taneja return ®s->exec; 632c76b78d8SArchit Taneja case NAND_FLASH_STATUS: 633c76b78d8SArchit Taneja return ®s->clrflashstatus; 634c76b78d8SArchit Taneja case NAND_DEV0_CFG0: 635c76b78d8SArchit Taneja return ®s->cfg0; 636c76b78d8SArchit Taneja case NAND_DEV0_CFG1: 637c76b78d8SArchit Taneja return ®s->cfg1; 638c76b78d8SArchit Taneja case NAND_DEV0_ECC_CFG: 639c76b78d8SArchit Taneja return ®s->ecc_bch_cfg; 640c76b78d8SArchit Taneja case NAND_READ_STATUS: 641c76b78d8SArchit Taneja return ®s->clrreadstatus; 642c76b78d8SArchit Taneja case NAND_DEV_CMD1: 643c76b78d8SArchit Taneja return ®s->cmd1; 644c76b78d8SArchit Taneja case NAND_DEV_CMD1_RESTORE: 645c76b78d8SArchit Taneja return ®s->orig_cmd1; 646c76b78d8SArchit Taneja case NAND_DEV_CMD_VLD: 647c76b78d8SArchit Taneja return ®s->vld; 648c76b78d8SArchit Taneja case NAND_DEV_CMD_VLD_RESTORE: 649c76b78d8SArchit Taneja return ®s->orig_vld; 650c76b78d8SArchit Taneja case NAND_EBI2_ECC_BUF_CFG: 651c76b78d8SArchit Taneja return ®s->ecc_buf_cfg; 65291af95c1SAbhishek Sahu case NAND_READ_LOCATION_0: 65391af95c1SAbhishek Sahu return ®s->read_location0; 65491af95c1SAbhishek Sahu case NAND_READ_LOCATION_1: 65591af95c1SAbhishek Sahu return ®s->read_location1; 65691af95c1SAbhishek Sahu case NAND_READ_LOCATION_2: 65791af95c1SAbhishek Sahu return ®s->read_location2; 65891af95c1SAbhishek Sahu case NAND_READ_LOCATION_3: 65991af95c1SAbhishek Sahu return ®s->read_location3; 660503ee5aaSMd Sadre Alam case NAND_READ_LOCATION_LAST_CW_0: 661503ee5aaSMd Sadre Alam return ®s->read_location_last0; 662503ee5aaSMd Sadre Alam case NAND_READ_LOCATION_LAST_CW_1: 663503ee5aaSMd Sadre Alam return ®s->read_location_last1; 664503ee5aaSMd Sadre Alam case NAND_READ_LOCATION_LAST_CW_2: 665503ee5aaSMd Sadre Alam return ®s->read_location_last2; 666503ee5aaSMd Sadre Alam case NAND_READ_LOCATION_LAST_CW_3: 667503ee5aaSMd Sadre Alam return ®s->read_location_last3; 668c76b78d8SArchit Taneja default: 669c76b78d8SArchit Taneja return NULL; 670c76b78d8SArchit Taneja } 671c76b78d8SArchit Taneja } 672c76b78d8SArchit Taneja 6739a7c39e2SMd Sadre Alam static void nandc_set_reg(struct nand_chip *chip, int offset, 674c76b78d8SArchit Taneja u32 val) 675c76b78d8SArchit Taneja { 6769a7c39e2SMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 677c76b78d8SArchit Taneja struct nandc_regs *regs = nandc->regs; 678c76b78d8SArchit Taneja __le32 *reg; 679c76b78d8SArchit Taneja 680c76b78d8SArchit Taneja reg = offset_to_nandc_reg(regs, offset); 681c76b78d8SArchit Taneja 682c76b78d8SArchit Taneja if (reg) 683c76b78d8SArchit Taneja *reg = cpu_to_le32(val); 684c76b78d8SArchit Taneja } 685c76b78d8SArchit Taneja 686b057e498SMd Sadre Alam /* Helper to check the code word, whether it is last cw or not */ 687b057e498SMd Sadre Alam static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw) 688b057e498SMd Sadre Alam { 689b057e498SMd Sadre Alam return cw == (ecc->steps - 1); 690b057e498SMd Sadre Alam } 691b057e498SMd Sadre Alam 692e7a307f2SMd Sadre Alam /* helper to configure location register values */ 693e7a307f2SMd Sadre Alam static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg, 694e7a307f2SMd Sadre Alam int cw_offset, int read_size, int is_last_read_loc) 695e7a307f2SMd Sadre Alam { 696503ee5aaSMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 697503ee5aaSMd Sadre Alam struct nand_ecc_ctrl *ecc = &chip->ecc; 698e7a307f2SMd Sadre Alam int reg_base = NAND_READ_LOCATION_0; 699e7a307f2SMd Sadre Alam 700503ee5aaSMd Sadre Alam if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw)) 701503ee5aaSMd Sadre Alam reg_base = NAND_READ_LOCATION_LAST_CW_0; 702503ee5aaSMd Sadre Alam 703e7a307f2SMd Sadre Alam reg_base += reg * 4; 704e7a307f2SMd Sadre Alam 705503ee5aaSMd Sadre Alam if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw)) 706503ee5aaSMd Sadre Alam return nandc_set_read_loc_last(chip, reg_base, cw_offset, 707503ee5aaSMd Sadre Alam read_size, is_last_read_loc); 708503ee5aaSMd Sadre Alam else 709e7a307f2SMd Sadre Alam return nandc_set_read_loc_first(chip, reg_base, cw_offset, 710e7a307f2SMd Sadre Alam read_size, is_last_read_loc); 711e7a307f2SMd Sadre Alam } 712e7a307f2SMd Sadre Alam 713c76b78d8SArchit Taneja /* helper to configure address register values */ 714c76b78d8SArchit Taneja static void set_address(struct qcom_nand_host *host, u16 column, int page) 715c76b78d8SArchit Taneja { 716c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 717c76b78d8SArchit Taneja 718c76b78d8SArchit Taneja if (chip->options & NAND_BUSWIDTH_16) 719c76b78d8SArchit Taneja column >>= 1; 720c76b78d8SArchit Taneja 7219a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR0, page << 16 | column); 7229a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff); 723c76b78d8SArchit Taneja } 724c76b78d8SArchit Taneja 725c76b78d8SArchit Taneja /* 726c76b78d8SArchit Taneja * update_rw_regs: set up read/write register values, these will be 727c76b78d8SArchit Taneja * written to the NAND controller registers via DMA 728c76b78d8SArchit Taneja * 729c76b78d8SArchit Taneja * @num_cw: number of steps for the read/write operation 730c76b78d8SArchit Taneja * @read: read or write operation 731503ee5aaSMd Sadre Alam * @cw : which code word 732c76b78d8SArchit Taneja */ 733503ee5aaSMd Sadre Alam static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw) 734c76b78d8SArchit Taneja { 735c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 736c76b78d8SArchit Taneja u32 cmd, cfg0, cfg1, ecc_bch_cfg; 737*bfb34eceSMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 738c76b78d8SArchit Taneja 739c76b78d8SArchit Taneja if (read) { 740c76b78d8SArchit Taneja if (host->use_ecc) 74133bf5519SOlof Johansson cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; 742c76b78d8SArchit Taneja else 74333bf5519SOlof Johansson cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE; 744c76b78d8SArchit Taneja } else { 74533bf5519SOlof Johansson cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; 746c76b78d8SArchit Taneja } 747c76b78d8SArchit Taneja 748c76b78d8SArchit Taneja if (host->use_ecc) { 749c76b78d8SArchit Taneja cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) | 750c76b78d8SArchit Taneja (num_cw - 1) << CW_PER_PAGE; 751c76b78d8SArchit Taneja 752c76b78d8SArchit Taneja cfg1 = host->cfg1; 753c76b78d8SArchit Taneja ecc_bch_cfg = host->ecc_bch_cfg; 754c76b78d8SArchit Taneja } else { 755c76b78d8SArchit Taneja cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) | 756c76b78d8SArchit Taneja (num_cw - 1) << CW_PER_PAGE; 757c76b78d8SArchit Taneja 758c76b78d8SArchit Taneja cfg1 = host->cfg1_raw; 759c76b78d8SArchit Taneja ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; 760c76b78d8SArchit Taneja } 761c76b78d8SArchit Taneja 7629a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CMD, cmd); 7639a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0); 7649a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1); 7659a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg); 766*bfb34eceSMd Sadre Alam if (!nandc->props->qpic_v2) 7679a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg); 7689a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus); 7699a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus); 7709a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EXEC_CMD, 1); 77191af95c1SAbhishek Sahu 77291af95c1SAbhishek Sahu if (read) 773503ee5aaSMd Sadre Alam nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ? 77491af95c1SAbhishek Sahu host->cw_data : host->cw_size, 1); 775c76b78d8SArchit Taneja } 776c76b78d8SArchit Taneja 777381dd245SAbhishek Sahu /* 778381dd245SAbhishek Sahu * Maps the scatter gather list for DMA transfer and forms the DMA descriptor 779381dd245SAbhishek Sahu * for BAM. This descriptor will be added in the NAND DMA descriptor queue 780381dd245SAbhishek Sahu * which will be submitted to DMA engine. 781381dd245SAbhishek Sahu */ 782381dd245SAbhishek Sahu static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, 783381dd245SAbhishek Sahu struct dma_chan *chan, 784381dd245SAbhishek Sahu unsigned long flags) 785381dd245SAbhishek Sahu { 786381dd245SAbhishek Sahu struct desc_info *desc; 787381dd245SAbhishek Sahu struct scatterlist *sgl; 788381dd245SAbhishek Sahu unsigned int sgl_cnt; 789381dd245SAbhishek Sahu int ret; 790381dd245SAbhishek Sahu struct bam_transaction *bam_txn = nandc->bam_txn; 791381dd245SAbhishek Sahu enum dma_transfer_direction dir_eng; 792381dd245SAbhishek Sahu struct dma_async_tx_descriptor *dma_desc; 793381dd245SAbhishek Sahu 794381dd245SAbhishek Sahu desc = kzalloc(sizeof(*desc), GFP_KERNEL); 795381dd245SAbhishek Sahu if (!desc) 796381dd245SAbhishek Sahu return -ENOMEM; 797381dd245SAbhishek Sahu 798381dd245SAbhishek Sahu if (chan == nandc->cmd_chan) { 799381dd245SAbhishek Sahu sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start]; 800381dd245SAbhishek Sahu sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start; 801381dd245SAbhishek Sahu bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos; 802381dd245SAbhishek Sahu dir_eng = DMA_MEM_TO_DEV; 803381dd245SAbhishek Sahu desc->dir = DMA_TO_DEVICE; 804381dd245SAbhishek Sahu } else if (chan == nandc->tx_chan) { 805381dd245SAbhishek Sahu sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start]; 806381dd245SAbhishek Sahu sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start; 807381dd245SAbhishek Sahu bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos; 808381dd245SAbhishek Sahu dir_eng = DMA_MEM_TO_DEV; 809381dd245SAbhishek Sahu desc->dir = DMA_TO_DEVICE; 810381dd245SAbhishek Sahu } else { 811381dd245SAbhishek Sahu sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start]; 812381dd245SAbhishek Sahu sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start; 813381dd245SAbhishek Sahu bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos; 814381dd245SAbhishek Sahu dir_eng = DMA_DEV_TO_MEM; 815381dd245SAbhishek Sahu desc->dir = DMA_FROM_DEVICE; 816381dd245SAbhishek Sahu } 817381dd245SAbhishek Sahu 818381dd245SAbhishek Sahu sg_mark_end(sgl + sgl_cnt - 1); 819381dd245SAbhishek Sahu ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir); 820381dd245SAbhishek Sahu if (ret == 0) { 821381dd245SAbhishek Sahu dev_err(nandc->dev, "failure in mapping desc\n"); 822381dd245SAbhishek Sahu kfree(desc); 823381dd245SAbhishek Sahu return -ENOMEM; 824381dd245SAbhishek Sahu } 825381dd245SAbhishek Sahu 826381dd245SAbhishek Sahu desc->sgl_cnt = sgl_cnt; 827381dd245SAbhishek Sahu desc->bam_sgl = sgl; 828381dd245SAbhishek Sahu 829381dd245SAbhishek Sahu dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng, 830381dd245SAbhishek Sahu flags); 831381dd245SAbhishek Sahu 832381dd245SAbhishek Sahu if (!dma_desc) { 833381dd245SAbhishek Sahu dev_err(nandc->dev, "failure in prep desc\n"); 834381dd245SAbhishek Sahu dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir); 835381dd245SAbhishek Sahu kfree(desc); 836381dd245SAbhishek Sahu return -EINVAL; 837381dd245SAbhishek Sahu } 838381dd245SAbhishek Sahu 839381dd245SAbhishek Sahu desc->dma_desc = dma_desc; 840381dd245SAbhishek Sahu 8416f20070dSAbhishek Sahu /* update last data/command descriptor */ 8426f20070dSAbhishek Sahu if (chan == nandc->cmd_chan) 8436f20070dSAbhishek Sahu bam_txn->last_cmd_desc = dma_desc; 8446f20070dSAbhishek Sahu else 8456f20070dSAbhishek Sahu bam_txn->last_data_desc = dma_desc; 8466f20070dSAbhishek Sahu 847381dd245SAbhishek Sahu list_add_tail(&desc->node, &nandc->desc_list); 848381dd245SAbhishek Sahu 849381dd245SAbhishek Sahu return 0; 850381dd245SAbhishek Sahu } 851381dd245SAbhishek Sahu 8524e2f6c52SAbhishek Sahu /* 8538d6b6d7eSAbhishek Sahu * Prepares the command descriptor for BAM DMA which will be used for NAND 8548d6b6d7eSAbhishek Sahu * register reads and writes. The command descriptor requires the command 8558d6b6d7eSAbhishek Sahu * to be formed in command element type so this function uses the command 8568d6b6d7eSAbhishek Sahu * element from bam transaction ce array and fills the same with required 8578d6b6d7eSAbhishek Sahu * data. A single SGL can contain multiple command elements so 8588d6b6d7eSAbhishek Sahu * NAND_BAM_NEXT_SGL will be used for starting the separate SGL 8598d6b6d7eSAbhishek Sahu * after the current command element. 8608d6b6d7eSAbhishek Sahu */ 8618d6b6d7eSAbhishek Sahu static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, 8628d6b6d7eSAbhishek Sahu int reg_off, const void *vaddr, 8638d6b6d7eSAbhishek Sahu int size, unsigned int flags) 8648d6b6d7eSAbhishek Sahu { 8658d6b6d7eSAbhishek Sahu int bam_ce_size; 8668d6b6d7eSAbhishek Sahu int i, ret; 8678d6b6d7eSAbhishek Sahu struct bam_cmd_element *bam_ce_buffer; 8688d6b6d7eSAbhishek Sahu struct bam_transaction *bam_txn = nandc->bam_txn; 8698d6b6d7eSAbhishek Sahu 8708d6b6d7eSAbhishek Sahu bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; 8718d6b6d7eSAbhishek Sahu 8728d6b6d7eSAbhishek Sahu /* fill the command desc */ 8738d6b6d7eSAbhishek Sahu for (i = 0; i < size; i++) { 8748d6b6d7eSAbhishek Sahu if (read) 8758d6b6d7eSAbhishek Sahu bam_prep_ce(&bam_ce_buffer[i], 8768d6b6d7eSAbhishek Sahu nandc_reg_phys(nandc, reg_off + 4 * i), 8778d6b6d7eSAbhishek Sahu BAM_READ_COMMAND, 8788d6b6d7eSAbhishek Sahu reg_buf_dma_addr(nandc, 8798d6b6d7eSAbhishek Sahu (__le32 *)vaddr + i)); 8808d6b6d7eSAbhishek Sahu else 8818d6b6d7eSAbhishek Sahu bam_prep_ce_le32(&bam_ce_buffer[i], 8828d6b6d7eSAbhishek Sahu nandc_reg_phys(nandc, reg_off + 4 * i), 8838d6b6d7eSAbhishek Sahu BAM_WRITE_COMMAND, 8848d6b6d7eSAbhishek Sahu *((__le32 *)vaddr + i)); 8858d6b6d7eSAbhishek Sahu } 8868d6b6d7eSAbhishek Sahu 8878d6b6d7eSAbhishek Sahu bam_txn->bam_ce_pos += size; 8888d6b6d7eSAbhishek Sahu 8898d6b6d7eSAbhishek Sahu /* use the separate sgl after this command */ 8908d6b6d7eSAbhishek Sahu if (flags & NAND_BAM_NEXT_SGL) { 8918d6b6d7eSAbhishek Sahu bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; 8928d6b6d7eSAbhishek Sahu bam_ce_size = (bam_txn->bam_ce_pos - 8938d6b6d7eSAbhishek Sahu bam_txn->bam_ce_start) * 8948d6b6d7eSAbhishek Sahu sizeof(struct bam_cmd_element); 8958d6b6d7eSAbhishek Sahu sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], 8968d6b6d7eSAbhishek Sahu bam_ce_buffer, bam_ce_size); 8978d6b6d7eSAbhishek Sahu bam_txn->cmd_sgl_pos++; 8988d6b6d7eSAbhishek Sahu bam_txn->bam_ce_start = bam_txn->bam_ce_pos; 8998d6b6d7eSAbhishek Sahu 9008d6b6d7eSAbhishek Sahu if (flags & NAND_BAM_NWD) { 9018d6b6d7eSAbhishek Sahu ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, 9028d6b6d7eSAbhishek Sahu DMA_PREP_FENCE | 9038d6b6d7eSAbhishek Sahu DMA_PREP_CMD); 9048d6b6d7eSAbhishek Sahu if (ret) 9058d6b6d7eSAbhishek Sahu return ret; 9068d6b6d7eSAbhishek Sahu } 9078d6b6d7eSAbhishek Sahu } 9088d6b6d7eSAbhishek Sahu 9098d6b6d7eSAbhishek Sahu return 0; 9108d6b6d7eSAbhishek Sahu } 9118d6b6d7eSAbhishek Sahu 9128d6b6d7eSAbhishek Sahu /* 9134e2f6c52SAbhishek Sahu * Prepares the data descriptor for BAM DMA which will be used for NAND 9144e2f6c52SAbhishek Sahu * data reads and writes. 9154e2f6c52SAbhishek Sahu */ 9164e2f6c52SAbhishek Sahu static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, 9174e2f6c52SAbhishek Sahu const void *vaddr, 9184e2f6c52SAbhishek Sahu int size, unsigned int flags) 9194e2f6c52SAbhishek Sahu { 9204e2f6c52SAbhishek Sahu int ret; 9214e2f6c52SAbhishek Sahu struct bam_transaction *bam_txn = nandc->bam_txn; 9224e2f6c52SAbhishek Sahu 9234e2f6c52SAbhishek Sahu if (read) { 9244e2f6c52SAbhishek Sahu sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], 9254e2f6c52SAbhishek Sahu vaddr, size); 9264e2f6c52SAbhishek Sahu bam_txn->rx_sgl_pos++; 9274e2f6c52SAbhishek Sahu } else { 9284e2f6c52SAbhishek Sahu sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], 9294e2f6c52SAbhishek Sahu vaddr, size); 9304e2f6c52SAbhishek Sahu bam_txn->tx_sgl_pos++; 9314e2f6c52SAbhishek Sahu 9324e2f6c52SAbhishek Sahu /* 9334e2f6c52SAbhishek Sahu * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag 9344e2f6c52SAbhishek Sahu * is not set, form the DMA descriptor 9354e2f6c52SAbhishek Sahu */ 9364e2f6c52SAbhishek Sahu if (!(flags & NAND_BAM_NO_EOT)) { 9374e2f6c52SAbhishek Sahu ret = prepare_bam_async_desc(nandc, nandc->tx_chan, 9384e2f6c52SAbhishek Sahu DMA_PREP_INTERRUPT); 9394e2f6c52SAbhishek Sahu if (ret) 9404e2f6c52SAbhishek Sahu return ret; 9414e2f6c52SAbhishek Sahu } 9424e2f6c52SAbhishek Sahu } 9434e2f6c52SAbhishek Sahu 9444e2f6c52SAbhishek Sahu return 0; 9454e2f6c52SAbhishek Sahu } 9464e2f6c52SAbhishek Sahu 947381dd245SAbhishek Sahu static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, 948c76b78d8SArchit Taneja int reg_off, const void *vaddr, int size, 949c76b78d8SArchit Taneja bool flow_control) 950c76b78d8SArchit Taneja { 951c76b78d8SArchit Taneja struct desc_info *desc; 952c76b78d8SArchit Taneja struct dma_async_tx_descriptor *dma_desc; 953c76b78d8SArchit Taneja struct scatterlist *sgl; 954c76b78d8SArchit Taneja struct dma_slave_config slave_conf; 955c76b78d8SArchit Taneja enum dma_transfer_direction dir_eng; 956c76b78d8SArchit Taneja int ret; 957c76b78d8SArchit Taneja 958c76b78d8SArchit Taneja desc = kzalloc(sizeof(*desc), GFP_KERNEL); 959c76b78d8SArchit Taneja if (!desc) 960c76b78d8SArchit Taneja return -ENOMEM; 961c76b78d8SArchit Taneja 962381dd245SAbhishek Sahu sgl = &desc->adm_sgl; 963c76b78d8SArchit Taneja 964c76b78d8SArchit Taneja sg_init_one(sgl, vaddr, size); 965c76b78d8SArchit Taneja 966c76b78d8SArchit Taneja if (read) { 967c76b78d8SArchit Taneja dir_eng = DMA_DEV_TO_MEM; 968c76b78d8SArchit Taneja desc->dir = DMA_FROM_DEVICE; 969c76b78d8SArchit Taneja } else { 970c76b78d8SArchit Taneja dir_eng = DMA_MEM_TO_DEV; 971c76b78d8SArchit Taneja desc->dir = DMA_TO_DEVICE; 972c76b78d8SArchit Taneja } 973c76b78d8SArchit Taneja 974c76b78d8SArchit Taneja ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir); 975c76b78d8SArchit Taneja if (ret == 0) { 976c76b78d8SArchit Taneja ret = -ENOMEM; 977c76b78d8SArchit Taneja goto err; 978c76b78d8SArchit Taneja } 979c76b78d8SArchit Taneja 980c76b78d8SArchit Taneja memset(&slave_conf, 0x00, sizeof(slave_conf)); 981c76b78d8SArchit Taneja 982c76b78d8SArchit Taneja slave_conf.device_fc = flow_control; 983c76b78d8SArchit Taneja if (read) { 984c76b78d8SArchit Taneja slave_conf.src_maxburst = 16; 985c76b78d8SArchit Taneja slave_conf.src_addr = nandc->base_dma + reg_off; 986c76b78d8SArchit Taneja slave_conf.slave_id = nandc->data_crci; 987c76b78d8SArchit Taneja } else { 988c76b78d8SArchit Taneja slave_conf.dst_maxburst = 16; 989c76b78d8SArchit Taneja slave_conf.dst_addr = nandc->base_dma + reg_off; 990c76b78d8SArchit Taneja slave_conf.slave_id = nandc->cmd_crci; 991c76b78d8SArchit Taneja } 992c76b78d8SArchit Taneja 993c76b78d8SArchit Taneja ret = dmaengine_slave_config(nandc->chan, &slave_conf); 994c76b78d8SArchit Taneja if (ret) { 995c76b78d8SArchit Taneja dev_err(nandc->dev, "failed to configure dma channel\n"); 996c76b78d8SArchit Taneja goto err; 997c76b78d8SArchit Taneja } 998c76b78d8SArchit Taneja 999c76b78d8SArchit Taneja dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0); 1000c76b78d8SArchit Taneja if (!dma_desc) { 1001c76b78d8SArchit Taneja dev_err(nandc->dev, "failed to prepare desc\n"); 1002c76b78d8SArchit Taneja ret = -EINVAL; 1003c76b78d8SArchit Taneja goto err; 1004c76b78d8SArchit Taneja } 1005c76b78d8SArchit Taneja 1006c76b78d8SArchit Taneja desc->dma_desc = dma_desc; 1007c76b78d8SArchit Taneja 1008c76b78d8SArchit Taneja list_add_tail(&desc->node, &nandc->desc_list); 1009c76b78d8SArchit Taneja 1010c76b78d8SArchit Taneja return 0; 1011c76b78d8SArchit Taneja err: 1012c76b78d8SArchit Taneja kfree(desc); 1013c76b78d8SArchit Taneja 1014c76b78d8SArchit Taneja return ret; 1015c76b78d8SArchit Taneja } 1016c76b78d8SArchit Taneja 1017c76b78d8SArchit Taneja /* 1018c76b78d8SArchit Taneja * read_reg_dma: prepares a descriptor to read a given number of 1019c76b78d8SArchit Taneja * contiguous registers to the reg_read_buf pointer 1020c76b78d8SArchit Taneja * 1021c76b78d8SArchit Taneja * @first: offset of the first register in the contiguous block 1022c76b78d8SArchit Taneja * @num_regs: number of registers to read 102367e830aeSAbhishek Sahu * @flags: flags to control DMA descriptor preparation 1024c76b78d8SArchit Taneja */ 1025c76b78d8SArchit Taneja static int read_reg_dma(struct qcom_nand_controller *nandc, int first, 102667e830aeSAbhishek Sahu int num_regs, unsigned int flags) 1027c76b78d8SArchit Taneja { 1028c76b78d8SArchit Taneja bool flow_control = false; 1029c76b78d8SArchit Taneja void *vaddr; 1030c76b78d8SArchit Taneja 10318d6b6d7eSAbhishek Sahu vaddr = nandc->reg_read_buf + nandc->reg_read_pos; 10328d6b6d7eSAbhishek Sahu nandc->reg_read_pos += num_regs; 1033c76b78d8SArchit Taneja 1034cc409b9aSAbhishek Sahu if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) 1035cc409b9aSAbhishek Sahu first = dev_cmd_reg_addr(nandc, first); 1036cc409b9aSAbhishek Sahu 10378d6b6d7eSAbhishek Sahu if (nandc->props->is_bam) 10388d6b6d7eSAbhishek Sahu return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, 10398d6b6d7eSAbhishek Sahu num_regs, flags); 1040c76b78d8SArchit Taneja 10418d6b6d7eSAbhishek Sahu if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) 10428d6b6d7eSAbhishek Sahu flow_control = true; 10438d6b6d7eSAbhishek Sahu 10448d6b6d7eSAbhishek Sahu return prep_adm_dma_desc(nandc, true, first, vaddr, 10458d6b6d7eSAbhishek Sahu num_regs * sizeof(u32), flow_control); 1046c76b78d8SArchit Taneja } 1047c76b78d8SArchit Taneja 1048c76b78d8SArchit Taneja /* 1049c76b78d8SArchit Taneja * write_reg_dma: prepares a descriptor to write a given number of 1050c76b78d8SArchit Taneja * contiguous registers 1051c76b78d8SArchit Taneja * 1052c76b78d8SArchit Taneja * @first: offset of the first register in the contiguous block 1053c76b78d8SArchit Taneja * @num_regs: number of registers to write 105467e830aeSAbhishek Sahu * @flags: flags to control DMA descriptor preparation 1055c76b78d8SArchit Taneja */ 1056c76b78d8SArchit Taneja static int write_reg_dma(struct qcom_nand_controller *nandc, int first, 105767e830aeSAbhishek Sahu int num_regs, unsigned int flags) 1058c76b78d8SArchit Taneja { 1059c76b78d8SArchit Taneja bool flow_control = false; 1060c76b78d8SArchit Taneja struct nandc_regs *regs = nandc->regs; 1061c76b78d8SArchit Taneja void *vaddr; 1062c76b78d8SArchit Taneja 1063c76b78d8SArchit Taneja vaddr = offset_to_nandc_reg(regs, first); 1064c76b78d8SArchit Taneja 1065a86b9c4fSAbhishek Sahu if (first == NAND_ERASED_CW_DETECT_CFG) { 1066a86b9c4fSAbhishek Sahu if (flags & NAND_ERASED_CW_SET) 1067a86b9c4fSAbhishek Sahu vaddr = ®s->erased_cw_detect_cfg_set; 1068a86b9c4fSAbhishek Sahu else 1069a86b9c4fSAbhishek Sahu vaddr = ®s->erased_cw_detect_cfg_clr; 1070a86b9c4fSAbhishek Sahu } 1071a86b9c4fSAbhishek Sahu 107267e830aeSAbhishek Sahu if (first == NAND_EXEC_CMD) 107367e830aeSAbhishek Sahu flags |= NAND_BAM_NWD; 107467e830aeSAbhishek Sahu 1075cc409b9aSAbhishek Sahu if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1) 1076cc409b9aSAbhishek Sahu first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1); 1077c76b78d8SArchit Taneja 1078cc409b9aSAbhishek Sahu if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) 1079cc409b9aSAbhishek Sahu first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); 1080c76b78d8SArchit Taneja 10818d6b6d7eSAbhishek Sahu if (nandc->props->is_bam) 10828d6b6d7eSAbhishek Sahu return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, 10838d6b6d7eSAbhishek Sahu num_regs, flags); 1084c76b78d8SArchit Taneja 10858d6b6d7eSAbhishek Sahu if (first == NAND_FLASH_CMD) 10868d6b6d7eSAbhishek Sahu flow_control = true; 10878d6b6d7eSAbhishek Sahu 10888d6b6d7eSAbhishek Sahu return prep_adm_dma_desc(nandc, false, first, vaddr, 10898d6b6d7eSAbhishek Sahu num_regs * sizeof(u32), flow_control); 1090c76b78d8SArchit Taneja } 1091c76b78d8SArchit Taneja 1092c76b78d8SArchit Taneja /* 1093c76b78d8SArchit Taneja * read_data_dma: prepares a DMA descriptor to transfer data from the 1094c76b78d8SArchit Taneja * controller's internal buffer to the buffer 'vaddr' 1095c76b78d8SArchit Taneja * 1096c76b78d8SArchit Taneja * @reg_off: offset within the controller's data buffer 1097c76b78d8SArchit Taneja * @vaddr: virtual address of the buffer we want to write to 1098c76b78d8SArchit Taneja * @size: DMA transaction size in bytes 109967e830aeSAbhishek Sahu * @flags: flags to control DMA descriptor preparation 1100c76b78d8SArchit Taneja */ 1101c76b78d8SArchit Taneja static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, 110267e830aeSAbhishek Sahu const u8 *vaddr, int size, unsigned int flags) 1103c76b78d8SArchit Taneja { 11044e2f6c52SAbhishek Sahu if (nandc->props->is_bam) 11054e2f6c52SAbhishek Sahu return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); 11064e2f6c52SAbhishek Sahu 1107381dd245SAbhishek Sahu return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); 1108c76b78d8SArchit Taneja } 1109c76b78d8SArchit Taneja 1110c76b78d8SArchit Taneja /* 1111c76b78d8SArchit Taneja * write_data_dma: prepares a DMA descriptor to transfer data from 1112c76b78d8SArchit Taneja * 'vaddr' to the controller's internal buffer 1113c76b78d8SArchit Taneja * 1114c76b78d8SArchit Taneja * @reg_off: offset within the controller's data buffer 1115c76b78d8SArchit Taneja * @vaddr: virtual address of the buffer we want to read from 1116c76b78d8SArchit Taneja * @size: DMA transaction size in bytes 111767e830aeSAbhishek Sahu * @flags: flags to control DMA descriptor preparation 1118c76b78d8SArchit Taneja */ 1119c76b78d8SArchit Taneja static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off, 112067e830aeSAbhishek Sahu const u8 *vaddr, int size, unsigned int flags) 1121c76b78d8SArchit Taneja { 11224e2f6c52SAbhishek Sahu if (nandc->props->is_bam) 11234e2f6c52SAbhishek Sahu return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); 11244e2f6c52SAbhishek Sahu 1125381dd245SAbhishek Sahu return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); 1126c76b78d8SArchit Taneja } 1127c76b78d8SArchit Taneja 1128c76b78d8SArchit Taneja /* 1129bde4330aSAbhishek Sahu * Helper to prepare DMA descriptors for configuring registers 1130bde4330aSAbhishek Sahu * before reading a NAND page. 1131c76b78d8SArchit Taneja */ 11329a7c39e2SMd Sadre Alam static void config_nand_page_read(struct nand_chip *chip) 1133c76b78d8SArchit Taneja { 11349a7c39e2SMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 11359a7c39e2SMd Sadre Alam 113667e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_ADDR0, 2, 0); 113767e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); 1138*bfb34eceSMd Sadre Alam if (!nandc->props->qpic_v2) 113967e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); 1140a86b9c4fSAbhishek Sahu write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0); 1141a86b9c4fSAbhishek Sahu write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 1142a86b9c4fSAbhishek Sahu NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); 1143bde4330aSAbhishek Sahu } 1144c76b78d8SArchit Taneja 1145bde4330aSAbhishek Sahu /* 1146bde4330aSAbhishek Sahu * Helper to prepare DMA descriptors for configuring registers 1147bde4330aSAbhishek Sahu * before reading each codeword in NAND page. 1148bde4330aSAbhishek Sahu */ 11495bc36b2bSAbhishek Sahu static void 1150503ee5aaSMd Sadre Alam config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw) 1151bde4330aSAbhishek Sahu { 11529a7c39e2SMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1153503ee5aaSMd Sadre Alam struct nand_ecc_ctrl *ecc = &chip->ecc; 1154503ee5aaSMd Sadre Alam 1155503ee5aaSMd Sadre Alam int reg = NAND_READ_LOCATION_0; 1156503ee5aaSMd Sadre Alam 1157503ee5aaSMd Sadre Alam if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw)) 1158503ee5aaSMd Sadre Alam reg = NAND_READ_LOCATION_LAST_CW_0; 11599a7c39e2SMd Sadre Alam 116091af95c1SAbhishek Sahu if (nandc->props->is_bam) 1161503ee5aaSMd Sadre Alam write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL); 116291af95c1SAbhishek Sahu 116367e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 116467e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 1165c76b78d8SArchit Taneja 11665bc36b2bSAbhishek Sahu if (use_ecc) { 116767e830aeSAbhishek Sahu read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); 116867e830aeSAbhishek Sahu read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1, 116967e830aeSAbhishek Sahu NAND_BAM_NEXT_SGL); 11705bc36b2bSAbhishek Sahu } else { 11715bc36b2bSAbhishek Sahu read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); 11725bc36b2bSAbhishek Sahu } 1173c76b78d8SArchit Taneja } 1174c76b78d8SArchit Taneja 1175c76b78d8SArchit Taneja /* 1176bde4330aSAbhishek Sahu * Helper to prepare dma descriptors to configure registers needed for reading a 1177bde4330aSAbhishek Sahu * single codeword in page 1178c76b78d8SArchit Taneja */ 11795bc36b2bSAbhishek Sahu static void 11809a7c39e2SMd Sadre Alam config_nand_single_cw_page_read(struct nand_chip *chip, 1181503ee5aaSMd Sadre Alam bool use_ecc, int cw) 1182bde4330aSAbhishek Sahu { 11839a7c39e2SMd Sadre Alam config_nand_page_read(chip); 1184503ee5aaSMd Sadre Alam config_nand_cw_read(chip, use_ecc, cw); 1185bde4330aSAbhishek Sahu } 1186bde4330aSAbhishek Sahu 118777cc5364SAbhishek Sahu /* 118877cc5364SAbhishek Sahu * Helper to prepare DMA descriptors used to configure registers needed for 118977cc5364SAbhishek Sahu * before writing a NAND page. 119077cc5364SAbhishek Sahu */ 11919a7c39e2SMd Sadre Alam static void config_nand_page_write(struct nand_chip *chip) 1192c76b78d8SArchit Taneja { 11939a7c39e2SMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 11949a7c39e2SMd Sadre Alam 119567e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_ADDR0, 2, 0); 119667e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); 1197*bfb34eceSMd Sadre Alam if (!nandc->props->qpic_v2) 119867e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 119967e830aeSAbhishek Sahu NAND_BAM_NEXT_SGL); 1200c76b78d8SArchit Taneja } 1201c76b78d8SArchit Taneja 120277cc5364SAbhishek Sahu /* 120377cc5364SAbhishek Sahu * Helper to prepare DMA descriptors for configuring registers 120477cc5364SAbhishek Sahu * before writing each codeword in NAND page. 120577cc5364SAbhishek Sahu */ 12069a7c39e2SMd Sadre Alam static void config_nand_cw_write(struct nand_chip *chip) 1207c76b78d8SArchit Taneja { 12089a7c39e2SMd Sadre Alam struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 12099a7c39e2SMd Sadre Alam 121067e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 121167e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 1212c76b78d8SArchit Taneja 121367e830aeSAbhishek Sahu read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); 1214c76b78d8SArchit Taneja 121567e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); 121667e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); 1217c76b78d8SArchit Taneja } 1218c76b78d8SArchit Taneja 1219c76b78d8SArchit Taneja /* 1220bf6065c6SBoris Brezillon * the following functions are used within chip->legacy.cmdfunc() to 1221bf6065c6SBoris Brezillon * perform different NAND_CMD_* commands 1222c76b78d8SArchit Taneja */ 1223c76b78d8SArchit Taneja 1224c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_PARAM */ 1225c76b78d8SArchit Taneja static int nandc_param(struct qcom_nand_host *host) 1226c76b78d8SArchit Taneja { 1227c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1228c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1229c76b78d8SArchit Taneja 1230c76b78d8SArchit Taneja /* 1231c76b78d8SArchit Taneja * NAND_CMD_PARAM is called before we know much about the FLASH chip 1232c76b78d8SArchit Taneja * in use. we configure the controller to perform a raw read of 512 1233c76b78d8SArchit Taneja * bytes to read onfi params 1234c76b78d8SArchit Taneja */ 1235b1209582SManivannan Sadhasivam if (nandc->props->qpic_v2) 12369a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ | 1237b1209582SManivannan Sadhasivam PAGE_ACC | LAST_PAGE); 1238b1209582SManivannan Sadhasivam else 12399a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ | 1240b1209582SManivannan Sadhasivam PAGE_ACC | LAST_PAGE); 1241b1209582SManivannan Sadhasivam 12429a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR0, 0); 12439a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR1, 0); 12449a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE 1245c76b78d8SArchit Taneja | 512 << UD_SIZE_BYTES 1246c76b78d8SArchit Taneja | 5 << NUM_ADDR_CYCLES 1247c76b78d8SArchit Taneja | 0 << SPARE_SIZE_BYTES); 12489a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES 1249c76b78d8SArchit Taneja | 0 << CS_ACTIVE_BSY 1250c76b78d8SArchit Taneja | 17 << BAD_BLOCK_BYTE_NUM 1251c76b78d8SArchit Taneja | 1 << BAD_BLOCK_IN_SPARE_AREA 1252c76b78d8SArchit Taneja | 2 << WR_RD_BSY_GAP 1253c76b78d8SArchit Taneja | 0 << WIDE_FLASH 1254c76b78d8SArchit Taneja | 1 << DEV0_CFG1_ECC_DISABLE); 1255*bfb34eceSMd Sadre Alam if (!nandc->props->qpic_v2) 12569a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); 1257c76b78d8SArchit Taneja 1258b1209582SManivannan Sadhasivam /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ 1259b1209582SManivannan Sadhasivam if (!nandc->props->qpic_v2) { 12609a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV_CMD_VLD, 1261d8a9b320SAbhishek Sahu (nandc->vld & ~READ_START_VLD)); 12629a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV_CMD1, 1263c76b78d8SArchit Taneja (nandc->cmd1 & ~(0xFF << READ_ADDR)) 1264c76b78d8SArchit Taneja | NAND_CMD_PARAM << READ_ADDR); 1265b1209582SManivannan Sadhasivam } 1266c76b78d8SArchit Taneja 12679a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EXEC_CMD, 1); 1268c76b78d8SArchit Taneja 1269b1209582SManivannan Sadhasivam if (!nandc->props->qpic_v2) { 12709a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1); 12719a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); 1272b1209582SManivannan Sadhasivam } 1273b1209582SManivannan Sadhasivam 1274e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, 0, 0, 0, 512, 1); 1275c76b78d8SArchit Taneja 1276b1209582SManivannan Sadhasivam if (!nandc->props->qpic_v2) { 127767e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); 127867e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); 1279b1209582SManivannan Sadhasivam } 1280c76b78d8SArchit Taneja 1281c76b78d8SArchit Taneja nandc->buf_count = 512; 1282c76b78d8SArchit Taneja memset(nandc->data_buffer, 0xff, nandc->buf_count); 1283c76b78d8SArchit Taneja 1284503ee5aaSMd Sadre Alam config_nand_single_cw_page_read(chip, false, 0); 1285c76b78d8SArchit Taneja 1286c76b78d8SArchit Taneja read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, 128767e830aeSAbhishek Sahu nandc->buf_count, 0); 1288c76b78d8SArchit Taneja 1289c76b78d8SArchit Taneja /* restore CMD1 and VLD regs */ 1290b1209582SManivannan Sadhasivam if (!nandc->props->qpic_v2) { 129167e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); 129267e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); 1293b1209582SManivannan Sadhasivam } 1294c76b78d8SArchit Taneja 1295c76b78d8SArchit Taneja return 0; 1296c76b78d8SArchit Taneja } 1297c76b78d8SArchit Taneja 1298c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_ERASE1 */ 1299c76b78d8SArchit Taneja static int erase_block(struct qcom_nand_host *host, int page_addr) 1300c76b78d8SArchit Taneja { 1301c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1302c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1303c76b78d8SArchit Taneja 13049a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CMD, 130533bf5519SOlof Johansson OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE); 13069a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR0, page_addr); 13079a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR1, 0); 13089a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_CFG0, 1309c76b78d8SArchit Taneja host->cfg0_raw & ~(7 << CW_PER_PAGE)); 13109a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw); 13119a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EXEC_CMD, 1); 13129a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus); 13139a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus); 1314c76b78d8SArchit Taneja 131567e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); 131667e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); 131767e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 1318c76b78d8SArchit Taneja 131967e830aeSAbhishek Sahu read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); 1320c76b78d8SArchit Taneja 132167e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); 132267e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); 1323c76b78d8SArchit Taneja 1324c76b78d8SArchit Taneja return 0; 1325c76b78d8SArchit Taneja } 1326c76b78d8SArchit Taneja 1327c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_READID */ 1328c76b78d8SArchit Taneja static int read_id(struct qcom_nand_host *host, int column) 1329c76b78d8SArchit Taneja { 1330c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1331c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1332c76b78d8SArchit Taneja 1333c76b78d8SArchit Taneja if (column == -1) 1334c76b78d8SArchit Taneja return 0; 1335c76b78d8SArchit Taneja 13369a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID); 13379a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR0, column); 13389a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_ADDR1, 0); 13399a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT, 13409d43f915SAbhishek Sahu nandc->props->is_bam ? 0 : DM_EN); 13419a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EXEC_CMD, 1); 1342c76b78d8SArchit Taneja 134367e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); 134467e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 1345c76b78d8SArchit Taneja 134667e830aeSAbhishek Sahu read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); 1347c76b78d8SArchit Taneja 1348c76b78d8SArchit Taneja return 0; 1349c76b78d8SArchit Taneja } 1350c76b78d8SArchit Taneja 1351c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_RESET */ 1352c76b78d8SArchit Taneja static int reset(struct qcom_nand_host *host) 1353c76b78d8SArchit Taneja { 1354c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1355c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1356c76b78d8SArchit Taneja 13579a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE); 13589a7c39e2SMd Sadre Alam nandc_set_reg(chip, NAND_EXEC_CMD, 1); 1359c76b78d8SArchit Taneja 136067e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 136167e830aeSAbhishek Sahu write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 1362c76b78d8SArchit Taneja 136367e830aeSAbhishek Sahu read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); 1364c76b78d8SArchit Taneja 1365c76b78d8SArchit Taneja return 0; 1366c76b78d8SArchit Taneja } 1367c76b78d8SArchit Taneja 1368c76b78d8SArchit Taneja /* helpers to submit/free our list of dma descriptors */ 1369c76b78d8SArchit Taneja static int submit_descs(struct qcom_nand_controller *nandc) 1370c76b78d8SArchit Taneja { 1371c76b78d8SArchit Taneja struct desc_info *desc; 1372c76b78d8SArchit Taneja dma_cookie_t cookie = 0; 1373381dd245SAbhishek Sahu struct bam_transaction *bam_txn = nandc->bam_txn; 1374381dd245SAbhishek Sahu int r; 1375381dd245SAbhishek Sahu 1376381dd245SAbhishek Sahu if (nandc->props->is_bam) { 1377381dd245SAbhishek Sahu if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { 1378381dd245SAbhishek Sahu r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0); 1379381dd245SAbhishek Sahu if (r) 1380381dd245SAbhishek Sahu return r; 1381381dd245SAbhishek Sahu } 1382381dd245SAbhishek Sahu 1383381dd245SAbhishek Sahu if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { 1384381dd245SAbhishek Sahu r = prepare_bam_async_desc(nandc, nandc->tx_chan, 1385381dd245SAbhishek Sahu DMA_PREP_INTERRUPT); 1386381dd245SAbhishek Sahu if (r) 1387381dd245SAbhishek Sahu return r; 1388381dd245SAbhishek Sahu } 1389381dd245SAbhishek Sahu 1390381dd245SAbhishek Sahu if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { 13918d6b6d7eSAbhishek Sahu r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 13928d6b6d7eSAbhishek Sahu DMA_PREP_CMD); 1393381dd245SAbhishek Sahu if (r) 1394381dd245SAbhishek Sahu return r; 1395381dd245SAbhishek Sahu } 1396381dd245SAbhishek Sahu } 1397c76b78d8SArchit Taneja 1398c76b78d8SArchit Taneja list_for_each_entry(desc, &nandc->desc_list, node) 1399c76b78d8SArchit Taneja cookie = dmaengine_submit(desc->dma_desc); 1400c76b78d8SArchit Taneja 1401381dd245SAbhishek Sahu if (nandc->props->is_bam) { 14026f20070dSAbhishek Sahu bam_txn->last_cmd_desc->callback = qpic_bam_dma_done; 14036f20070dSAbhishek Sahu bam_txn->last_cmd_desc->callback_param = bam_txn; 14046f20070dSAbhishek Sahu if (bam_txn->last_data_desc) { 14056f20070dSAbhishek Sahu bam_txn->last_data_desc->callback = qpic_bam_dma_done; 14066f20070dSAbhishek Sahu bam_txn->last_data_desc->callback_param = bam_txn; 14076f20070dSAbhishek Sahu bam_txn->wait_second_completion = true; 14086f20070dSAbhishek Sahu } 14096f20070dSAbhishek Sahu 1410381dd245SAbhishek Sahu dma_async_issue_pending(nandc->tx_chan); 1411381dd245SAbhishek Sahu dma_async_issue_pending(nandc->rx_chan); 14126f20070dSAbhishek Sahu dma_async_issue_pending(nandc->cmd_chan); 1413381dd245SAbhishek Sahu 14146f20070dSAbhishek Sahu if (!wait_for_completion_timeout(&bam_txn->txn_done, 14156f20070dSAbhishek Sahu QPIC_NAND_COMPLETION_TIMEOUT)) 1416381dd245SAbhishek Sahu return -ETIMEDOUT; 1417381dd245SAbhishek Sahu } else { 1418c76b78d8SArchit Taneja if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) 1419c76b78d8SArchit Taneja return -ETIMEDOUT; 1420381dd245SAbhishek Sahu } 1421c76b78d8SArchit Taneja 1422c76b78d8SArchit Taneja return 0; 1423c76b78d8SArchit Taneja } 1424c76b78d8SArchit Taneja 1425c76b78d8SArchit Taneja static void free_descs(struct qcom_nand_controller *nandc) 1426c76b78d8SArchit Taneja { 1427c76b78d8SArchit Taneja struct desc_info *desc, *n; 1428c76b78d8SArchit Taneja 1429c76b78d8SArchit Taneja list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { 1430c76b78d8SArchit Taneja list_del(&desc->node); 1431381dd245SAbhishek Sahu 1432381dd245SAbhishek Sahu if (nandc->props->is_bam) 1433381dd245SAbhishek Sahu dma_unmap_sg(nandc->dev, desc->bam_sgl, 1434381dd245SAbhishek Sahu desc->sgl_cnt, desc->dir); 1435381dd245SAbhishek Sahu else 1436381dd245SAbhishek Sahu dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1, 1437381dd245SAbhishek Sahu desc->dir); 1438381dd245SAbhishek Sahu 1439c76b78d8SArchit Taneja kfree(desc); 1440c76b78d8SArchit Taneja } 1441c76b78d8SArchit Taneja } 1442c76b78d8SArchit Taneja 1443c76b78d8SArchit Taneja /* reset the register read buffer for next NAND operation */ 1444c76b78d8SArchit Taneja static void clear_read_regs(struct qcom_nand_controller *nandc) 1445c76b78d8SArchit Taneja { 1446c76b78d8SArchit Taneja nandc->reg_read_pos = 0; 14476192ff7aSAbhishek Sahu nandc_read_buffer_sync(nandc, false); 1448c76b78d8SArchit Taneja } 1449c76b78d8SArchit Taneja 1450c76b78d8SArchit Taneja static void pre_command(struct qcom_nand_host *host, int command) 1451c76b78d8SArchit Taneja { 1452c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1453c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1454c76b78d8SArchit Taneja 1455c76b78d8SArchit Taneja nandc->buf_count = 0; 1456c76b78d8SArchit Taneja nandc->buf_start = 0; 1457c76b78d8SArchit Taneja host->use_ecc = false; 1458c76b78d8SArchit Taneja host->last_command = command; 1459c76b78d8SArchit Taneja 1460c76b78d8SArchit Taneja clear_read_regs(nandc); 14614e2f6c52SAbhishek Sahu 14624e2f6c52SAbhishek Sahu if (command == NAND_CMD_RESET || command == NAND_CMD_READID || 14634e2f6c52SAbhishek Sahu command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1) 14644e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 1465c76b78d8SArchit Taneja } 1466c76b78d8SArchit Taneja 1467c76b78d8SArchit Taneja /* 1468c76b78d8SArchit Taneja * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our 1469c76b78d8SArchit Taneja * privately maintained status byte, this status byte can be read after 1470c76b78d8SArchit Taneja * NAND_CMD_STATUS is called 1471c76b78d8SArchit Taneja */ 1472c76b78d8SArchit Taneja static void parse_erase_write_errors(struct qcom_nand_host *host, int command) 1473c76b78d8SArchit Taneja { 1474c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1475c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1476c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 1477c76b78d8SArchit Taneja int num_cw; 1478c76b78d8SArchit Taneja int i; 1479c76b78d8SArchit Taneja 1480c76b78d8SArchit Taneja num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1; 14816192ff7aSAbhishek Sahu nandc_read_buffer_sync(nandc, true); 1482c76b78d8SArchit Taneja 1483c76b78d8SArchit Taneja for (i = 0; i < num_cw; i++) { 1484c76b78d8SArchit Taneja u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]); 1485c76b78d8SArchit Taneja 1486c76b78d8SArchit Taneja if (flash_status & FS_MPU_ERR) 1487c76b78d8SArchit Taneja host->status &= ~NAND_STATUS_WP; 1488c76b78d8SArchit Taneja 1489c76b78d8SArchit Taneja if (flash_status & FS_OP_ERR || (i == (num_cw - 1) && 1490c76b78d8SArchit Taneja (flash_status & 1491c76b78d8SArchit Taneja FS_DEVICE_STS_ERR))) 1492c76b78d8SArchit Taneja host->status |= NAND_STATUS_FAIL; 1493c76b78d8SArchit Taneja } 1494c76b78d8SArchit Taneja } 1495c76b78d8SArchit Taneja 1496c76b78d8SArchit Taneja static void post_command(struct qcom_nand_host *host, int command) 1497c76b78d8SArchit Taneja { 1498c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1499c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1500c76b78d8SArchit Taneja 1501c76b78d8SArchit Taneja switch (command) { 1502c76b78d8SArchit Taneja case NAND_CMD_READID: 15036192ff7aSAbhishek Sahu nandc_read_buffer_sync(nandc, true); 1504c76b78d8SArchit Taneja memcpy(nandc->data_buffer, nandc->reg_read_buf, 1505c76b78d8SArchit Taneja nandc->buf_count); 1506c76b78d8SArchit Taneja break; 1507c76b78d8SArchit Taneja case NAND_CMD_PAGEPROG: 1508c76b78d8SArchit Taneja case NAND_CMD_ERASE1: 1509c76b78d8SArchit Taneja parse_erase_write_errors(host, command); 1510c76b78d8SArchit Taneja break; 1511c76b78d8SArchit Taneja default: 1512c76b78d8SArchit Taneja break; 1513c76b78d8SArchit Taneja } 1514c76b78d8SArchit Taneja } 1515c76b78d8SArchit Taneja 1516c76b78d8SArchit Taneja /* 1517bf6065c6SBoris Brezillon * Implements chip->legacy.cmdfunc. It's only used for a limited set of 1518bf6065c6SBoris Brezillon * commands. The rest of the commands wouldn't be called by upper layers. 1519bf6065c6SBoris Brezillon * For example, NAND_CMD_READOOB would never be called because we have our own 1520bf6065c6SBoris Brezillon * versions of read_oob ops for nand_ecc_ctrl. 1521c76b78d8SArchit Taneja */ 15225295cf2eSBoris Brezillon static void qcom_nandc_command(struct nand_chip *chip, unsigned int command, 1523c76b78d8SArchit Taneja int column, int page_addr) 1524c76b78d8SArchit Taneja { 1525c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 1526c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 1527c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1528c76b78d8SArchit Taneja bool wait = false; 1529c76b78d8SArchit Taneja int ret = 0; 1530c76b78d8SArchit Taneja 1531c76b78d8SArchit Taneja pre_command(host, command); 1532c76b78d8SArchit Taneja 1533c76b78d8SArchit Taneja switch (command) { 1534c76b78d8SArchit Taneja case NAND_CMD_RESET: 1535c76b78d8SArchit Taneja ret = reset(host); 1536c76b78d8SArchit Taneja wait = true; 1537c76b78d8SArchit Taneja break; 1538c76b78d8SArchit Taneja 1539c76b78d8SArchit Taneja case NAND_CMD_READID: 1540c76b78d8SArchit Taneja nandc->buf_count = 4; 1541c76b78d8SArchit Taneja ret = read_id(host, column); 1542c76b78d8SArchit Taneja wait = true; 1543c76b78d8SArchit Taneja break; 1544c76b78d8SArchit Taneja 1545c76b78d8SArchit Taneja case NAND_CMD_PARAM: 1546c76b78d8SArchit Taneja ret = nandc_param(host); 1547c76b78d8SArchit Taneja wait = true; 1548c76b78d8SArchit Taneja break; 1549c76b78d8SArchit Taneja 1550c76b78d8SArchit Taneja case NAND_CMD_ERASE1: 1551c76b78d8SArchit Taneja ret = erase_block(host, page_addr); 1552c76b78d8SArchit Taneja wait = true; 1553c76b78d8SArchit Taneja break; 1554c76b78d8SArchit Taneja 1555c76b78d8SArchit Taneja case NAND_CMD_READ0: 1556c76b78d8SArchit Taneja /* we read the entire page for now */ 1557c76b78d8SArchit Taneja WARN_ON(column != 0); 1558c76b78d8SArchit Taneja 1559c76b78d8SArchit Taneja host->use_ecc = true; 1560c76b78d8SArchit Taneja set_address(host, 0, page_addr); 1561503ee5aaSMd Sadre Alam update_rw_regs(host, ecc->steps, true, 0); 1562c76b78d8SArchit Taneja break; 1563c76b78d8SArchit Taneja 1564c76b78d8SArchit Taneja case NAND_CMD_SEQIN: 1565c76b78d8SArchit Taneja WARN_ON(column != 0); 1566c76b78d8SArchit Taneja set_address(host, 0, page_addr); 1567c76b78d8SArchit Taneja break; 1568c76b78d8SArchit Taneja 1569c76b78d8SArchit Taneja case NAND_CMD_PAGEPROG: 1570c76b78d8SArchit Taneja case NAND_CMD_STATUS: 1571c76b78d8SArchit Taneja case NAND_CMD_NONE: 1572c76b78d8SArchit Taneja default: 1573c76b78d8SArchit Taneja break; 1574c76b78d8SArchit Taneja } 1575c76b78d8SArchit Taneja 1576c76b78d8SArchit Taneja if (ret) { 1577c76b78d8SArchit Taneja dev_err(nandc->dev, "failure executing command %d\n", 1578c76b78d8SArchit Taneja command); 1579c76b78d8SArchit Taneja free_descs(nandc); 1580c76b78d8SArchit Taneja return; 1581c76b78d8SArchit Taneja } 1582c76b78d8SArchit Taneja 1583c76b78d8SArchit Taneja if (wait) { 1584c76b78d8SArchit Taneja ret = submit_descs(nandc); 1585c76b78d8SArchit Taneja if (ret) 1586c76b78d8SArchit Taneja dev_err(nandc->dev, 1587c76b78d8SArchit Taneja "failure submitting descs for command %d\n", 1588c76b78d8SArchit Taneja command); 1589c76b78d8SArchit Taneja } 1590c76b78d8SArchit Taneja 1591c76b78d8SArchit Taneja free_descs(nandc); 1592c76b78d8SArchit Taneja 1593c76b78d8SArchit Taneja post_command(host, command); 1594c76b78d8SArchit Taneja } 1595c76b78d8SArchit Taneja 1596c76b78d8SArchit Taneja /* 1597c76b78d8SArchit Taneja * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read 1598c76b78d8SArchit Taneja * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS. 1599c76b78d8SArchit Taneja * 1600c76b78d8SArchit Taneja * when using RS ECC, the HW reports the same erros when reading an erased CW, 1601c76b78d8SArchit Taneja * but it notifies that it is an erased CW by placing special characters at 1602c76b78d8SArchit Taneja * certain offsets in the buffer. 1603c76b78d8SArchit Taneja * 1604c76b78d8SArchit Taneja * verify if the page is erased or not, and fix up the page for RS ECC by 1605c76b78d8SArchit Taneja * replacing the special characters with 0xff. 1606c76b78d8SArchit Taneja */ 1607c76b78d8SArchit Taneja static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len) 1608c76b78d8SArchit Taneja { 1609c76b78d8SArchit Taneja u8 empty1, empty2; 1610c76b78d8SArchit Taneja 1611c76b78d8SArchit Taneja /* 1612c76b78d8SArchit Taneja * an erased page flags an error in NAND_FLASH_STATUS, check if the page 1613c76b78d8SArchit Taneja * is erased by looking for 0x54s at offsets 3 and 175 from the 1614c76b78d8SArchit Taneja * beginning of each codeword 1615c76b78d8SArchit Taneja */ 1616c76b78d8SArchit Taneja 1617c76b78d8SArchit Taneja empty1 = data_buf[3]; 1618c76b78d8SArchit Taneja empty2 = data_buf[175]; 1619c76b78d8SArchit Taneja 1620c76b78d8SArchit Taneja /* 1621c76b78d8SArchit Taneja * if the erased codework markers, if they exist override them with 1622c76b78d8SArchit Taneja * 0xffs 1623c76b78d8SArchit Taneja */ 1624c76b78d8SArchit Taneja if ((empty1 == 0x54 && empty2 == 0xff) || 1625c76b78d8SArchit Taneja (empty1 == 0xff && empty2 == 0x54)) { 1626c76b78d8SArchit Taneja data_buf[3] = 0xff; 1627c76b78d8SArchit Taneja data_buf[175] = 0xff; 1628c76b78d8SArchit Taneja } 1629c76b78d8SArchit Taneja 1630c76b78d8SArchit Taneja /* 1631c76b78d8SArchit Taneja * check if the entire chunk contains 0xffs or not. if it doesn't, then 1632c76b78d8SArchit Taneja * restore the original values at the special offsets 1633c76b78d8SArchit Taneja */ 1634c76b78d8SArchit Taneja if (memchr_inv(data_buf, 0xff, data_len)) { 1635c76b78d8SArchit Taneja data_buf[3] = empty1; 1636c76b78d8SArchit Taneja data_buf[175] = empty2; 1637c76b78d8SArchit Taneja 1638c76b78d8SArchit Taneja return false; 1639c76b78d8SArchit Taneja } 1640c76b78d8SArchit Taneja 1641c76b78d8SArchit Taneja return true; 1642c76b78d8SArchit Taneja } 1643c76b78d8SArchit Taneja 1644c76b78d8SArchit Taneja struct read_stats { 1645c76b78d8SArchit Taneja __le32 flash; 1646c76b78d8SArchit Taneja __le32 buffer; 1647c76b78d8SArchit Taneja __le32 erased_cw; 1648c76b78d8SArchit Taneja }; 1649c76b78d8SArchit Taneja 16505bc36b2bSAbhishek Sahu /* reads back FLASH_STATUS register set by the controller */ 16515bc36b2bSAbhishek Sahu static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) 16525bc36b2bSAbhishek Sahu { 16535bc36b2bSAbhishek Sahu struct nand_chip *chip = &host->chip; 16545bc36b2bSAbhishek Sahu struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 16555bc36b2bSAbhishek Sahu int i; 16565bc36b2bSAbhishek Sahu 1657bc368602SPraveenkumar I nandc_read_buffer_sync(nandc, true); 1658bc368602SPraveenkumar I 16595bc36b2bSAbhishek Sahu for (i = 0; i < cw_cnt; i++) { 16605bc36b2bSAbhishek Sahu u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); 16615bc36b2bSAbhishek Sahu 16625bc36b2bSAbhishek Sahu if (flash & (FS_OP_ERR | FS_MPU_ERR)) 16635bc36b2bSAbhishek Sahu return -EIO; 16645bc36b2bSAbhishek Sahu } 16655bc36b2bSAbhishek Sahu 16665bc36b2bSAbhishek Sahu return 0; 16675bc36b2bSAbhishek Sahu } 16685bc36b2bSAbhishek Sahu 166985632c17SAbhishek Sahu /* performs raw read for one codeword */ 167085632c17SAbhishek Sahu static int 167185632c17SAbhishek Sahu qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, 167285632c17SAbhishek Sahu u8 *data_buf, u8 *oob_buf, int page, int cw) 167385632c17SAbhishek Sahu { 167485632c17SAbhishek Sahu struct qcom_nand_host *host = to_qcom_nand_host(chip); 167585632c17SAbhishek Sahu struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 167685632c17SAbhishek Sahu struct nand_ecc_ctrl *ecc = &chip->ecc; 167785632c17SAbhishek Sahu int data_size1, data_size2, oob_size1, oob_size2; 167885632c17SAbhishek Sahu int ret, reg_off = FLASH_BUF_ACC, read_loc = 0; 167985632c17SAbhishek Sahu 168085632c17SAbhishek Sahu nand_read_page_op(chip, page, 0, NULL, 0); 168185632c17SAbhishek Sahu host->use_ecc = false; 168285632c17SAbhishek Sahu 168385632c17SAbhishek Sahu clear_bam_transaction(nandc); 168485632c17SAbhishek Sahu set_address(host, host->cw_size * cw, page); 1685503ee5aaSMd Sadre Alam update_rw_regs(host, 1, true, cw); 16869a7c39e2SMd Sadre Alam config_nand_page_read(chip); 168785632c17SAbhishek Sahu 168885632c17SAbhishek Sahu data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1); 168985632c17SAbhishek Sahu oob_size1 = host->bbm_size; 169085632c17SAbhishek Sahu 1691b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, cw)) { 169285632c17SAbhishek Sahu data_size2 = ecc->size - data_size1 - 169385632c17SAbhishek Sahu ((ecc->steps - 1) * 4); 169485632c17SAbhishek Sahu oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw + 169585632c17SAbhishek Sahu host->spare_bytes; 169685632c17SAbhishek Sahu } else { 169785632c17SAbhishek Sahu data_size2 = host->cw_data - data_size1; 169885632c17SAbhishek Sahu oob_size2 = host->ecc_bytes_hw + host->spare_bytes; 169985632c17SAbhishek Sahu } 170085632c17SAbhishek Sahu 170185632c17SAbhishek Sahu if (nandc->props->is_bam) { 1702e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0); 170385632c17SAbhishek Sahu read_loc += data_size1; 170485632c17SAbhishek Sahu 1705e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0); 170685632c17SAbhishek Sahu read_loc += oob_size1; 170785632c17SAbhishek Sahu 1708e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0); 170985632c17SAbhishek Sahu read_loc += data_size2; 171085632c17SAbhishek Sahu 1711e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1); 171285632c17SAbhishek Sahu } 171385632c17SAbhishek Sahu 1714503ee5aaSMd Sadre Alam config_nand_cw_read(chip, false, cw); 171585632c17SAbhishek Sahu 171685632c17SAbhishek Sahu read_data_dma(nandc, reg_off, data_buf, data_size1, 0); 171785632c17SAbhishek Sahu reg_off += data_size1; 171885632c17SAbhishek Sahu 171985632c17SAbhishek Sahu read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0); 172085632c17SAbhishek Sahu reg_off += oob_size1; 172185632c17SAbhishek Sahu 172285632c17SAbhishek Sahu read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0); 172385632c17SAbhishek Sahu reg_off += data_size2; 172485632c17SAbhishek Sahu 172585632c17SAbhishek Sahu read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0); 172685632c17SAbhishek Sahu 172785632c17SAbhishek Sahu ret = submit_descs(nandc); 172885632c17SAbhishek Sahu free_descs(nandc); 172985632c17SAbhishek Sahu if (ret) { 173085632c17SAbhishek Sahu dev_err(nandc->dev, "failure to read raw cw %d\n", cw); 173185632c17SAbhishek Sahu return ret; 173285632c17SAbhishek Sahu } 173385632c17SAbhishek Sahu 173485632c17SAbhishek Sahu return check_flash_errors(host, 1); 173585632c17SAbhishek Sahu } 173685632c17SAbhishek Sahu 1737c76b78d8SArchit Taneja /* 17389f43deeeSAbhishek Sahu * Bitflips can happen in erased codewords also so this function counts the 17399f43deeeSAbhishek Sahu * number of 0 in each CW for which ECC engine returns the uncorrectable 17409f43deeeSAbhishek Sahu * error. The page will be assumed as erased if this count is less than or 17419f43deeeSAbhishek Sahu * equal to the ecc->strength for each CW. 17429f43deeeSAbhishek Sahu * 17439f43deeeSAbhishek Sahu * 1. Both DATA and OOB need to be checked for number of 0. The 17449f43deeeSAbhishek Sahu * top-level API can be called with only data buf or OOB buf so use 17459f43deeeSAbhishek Sahu * chip->data_buf if data buf is null and chip->oob_poi if oob buf 17469f43deeeSAbhishek Sahu * is null for copying the raw bytes. 17479f43deeeSAbhishek Sahu * 2. Perform raw read for all the CW which has uncorrectable errors. 17489f43deeeSAbhishek Sahu * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes. 17499f43deeeSAbhishek Sahu * The BBM and spare bytes bit flip won’t affect the ECC so don’t check 17509f43deeeSAbhishek Sahu * the number of bitflips in this area. 17519f43deeeSAbhishek Sahu */ 17529f43deeeSAbhishek Sahu static int 17539f43deeeSAbhishek Sahu check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf, 17549f43deeeSAbhishek Sahu u8 *oob_buf, unsigned long uncorrectable_cws, 17559f43deeeSAbhishek Sahu int page, unsigned int max_bitflips) 17569f43deeeSAbhishek Sahu { 17579f43deeeSAbhishek Sahu struct nand_chip *chip = &host->chip; 17589f43deeeSAbhishek Sahu struct mtd_info *mtd = nand_to_mtd(chip); 17599f43deeeSAbhishek Sahu struct nand_ecc_ctrl *ecc = &chip->ecc; 17609f43deeeSAbhishek Sahu u8 *cw_data_buf, *cw_oob_buf; 17619f43deeeSAbhishek Sahu int cw, data_size, oob_size, ret = 0; 17629f43deeeSAbhishek Sahu 1763eeab7174SBoris Brezillon if (!data_buf) 1764eeab7174SBoris Brezillon data_buf = nand_get_data_buf(chip); 17659f43deeeSAbhishek Sahu 17669f43deeeSAbhishek Sahu if (!oob_buf) { 1767eeab7174SBoris Brezillon nand_get_data_buf(chip); 17689f43deeeSAbhishek Sahu oob_buf = chip->oob_poi; 17699f43deeeSAbhishek Sahu } 17709f43deeeSAbhishek Sahu 17719f43deeeSAbhishek Sahu for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) { 1772b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, cw)) { 17739f43deeeSAbhishek Sahu data_size = ecc->size - ((ecc->steps - 1) * 4); 17749f43deeeSAbhishek Sahu oob_size = (ecc->steps * 4) + host->ecc_bytes_hw; 17759f43deeeSAbhishek Sahu } else { 17769f43deeeSAbhishek Sahu data_size = host->cw_data; 17779f43deeeSAbhishek Sahu oob_size = host->ecc_bytes_hw; 17789f43deeeSAbhishek Sahu } 17799f43deeeSAbhishek Sahu 17809f43deeeSAbhishek Sahu /* determine starting buffer address for current CW */ 17819f43deeeSAbhishek Sahu cw_data_buf = data_buf + (cw * host->cw_data); 17829f43deeeSAbhishek Sahu cw_oob_buf = oob_buf + (cw * ecc->bytes); 17839f43deeeSAbhishek Sahu 17849f43deeeSAbhishek Sahu ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf, 17859f43deeeSAbhishek Sahu cw_oob_buf, page, cw); 17869f43deeeSAbhishek Sahu if (ret) 17879f43deeeSAbhishek Sahu return ret; 17889f43deeeSAbhishek Sahu 17899f43deeeSAbhishek Sahu /* 17909f43deeeSAbhishek Sahu * make sure it isn't an erased page reported 17919f43deeeSAbhishek Sahu * as not-erased by HW because of a few bitflips 17929f43deeeSAbhishek Sahu */ 17939f43deeeSAbhishek Sahu ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size, 17949f43deeeSAbhishek Sahu cw_oob_buf + host->bbm_size, 17959f43deeeSAbhishek Sahu oob_size, NULL, 17969f43deeeSAbhishek Sahu 0, ecc->strength); 17979f43deeeSAbhishek Sahu if (ret < 0) { 17989f43deeeSAbhishek Sahu mtd->ecc_stats.failed++; 17999f43deeeSAbhishek Sahu } else { 18009f43deeeSAbhishek Sahu mtd->ecc_stats.corrected += ret; 18019f43deeeSAbhishek Sahu max_bitflips = max_t(unsigned int, max_bitflips, ret); 18029f43deeeSAbhishek Sahu } 18039f43deeeSAbhishek Sahu } 18049f43deeeSAbhishek Sahu 18059f43deeeSAbhishek Sahu return max_bitflips; 18069f43deeeSAbhishek Sahu } 18079f43deeeSAbhishek Sahu 18089f43deeeSAbhishek Sahu /* 1809c76b78d8SArchit Taneja * reads back status registers set by the controller to notify page read 1810c76b78d8SArchit Taneja * errors. this is equivalent to what 'ecc->correct()' would do. 1811c76b78d8SArchit Taneja */ 1812c76b78d8SArchit Taneja static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf, 18139f43deeeSAbhishek Sahu u8 *oob_buf, int page) 1814c76b78d8SArchit Taneja { 1815c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1816c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1817c76b78d8SArchit Taneja struct mtd_info *mtd = nand_to_mtd(chip); 1818c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 18199f43deeeSAbhishek Sahu unsigned int max_bitflips = 0, uncorrectable_cws = 0; 1820c76b78d8SArchit Taneja struct read_stats *buf; 18219f43deeeSAbhishek Sahu bool flash_op_err = false, erased; 1822c76b78d8SArchit Taneja int i; 18239f43deeeSAbhishek Sahu u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf; 1824c76b78d8SArchit Taneja 1825c76b78d8SArchit Taneja buf = (struct read_stats *)nandc->reg_read_buf; 18266192ff7aSAbhishek Sahu nandc_read_buffer_sync(nandc, true); 1827c76b78d8SArchit Taneja 1828c76b78d8SArchit Taneja for (i = 0; i < ecc->steps; i++, buf++) { 1829c76b78d8SArchit Taneja u32 flash, buffer, erased_cw; 1830c76b78d8SArchit Taneja int data_len, oob_len; 1831c76b78d8SArchit Taneja 1832b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, i)) { 1833c76b78d8SArchit Taneja data_len = ecc->size - ((ecc->steps - 1) << 2); 1834c76b78d8SArchit Taneja oob_len = ecc->steps << 2; 1835c76b78d8SArchit Taneja } else { 1836c76b78d8SArchit Taneja data_len = host->cw_data; 1837c76b78d8SArchit Taneja oob_len = 0; 1838c76b78d8SArchit Taneja } 1839c76b78d8SArchit Taneja 1840c76b78d8SArchit Taneja flash = le32_to_cpu(buf->flash); 1841c76b78d8SArchit Taneja buffer = le32_to_cpu(buf->buffer); 1842c76b78d8SArchit Taneja erased_cw = le32_to_cpu(buf->erased_cw); 1843c76b78d8SArchit Taneja 18448eab7214SAbhishek Sahu /* 18458eab7214SAbhishek Sahu * Check ECC failure for each codeword. ECC failure can 18468eab7214SAbhishek Sahu * happen in either of the following conditions 18478eab7214SAbhishek Sahu * 1. If number of bitflips are greater than ECC engine 18488eab7214SAbhishek Sahu * capability. 18498eab7214SAbhishek Sahu * 2. If this codeword contains all 0xff for which erased 18508eab7214SAbhishek Sahu * codeword detection check will be done. 18518eab7214SAbhishek Sahu */ 18528eab7214SAbhishek Sahu if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) { 18532f610386SAbhishek Sahu /* 18542f610386SAbhishek Sahu * For BCH ECC, ignore erased codeword errors, if 18552f610386SAbhishek Sahu * ERASED_CW bits are set. 18562f610386SAbhishek Sahu */ 1857c76b78d8SArchit Taneja if (host->bch_enabled) { 1858902f332eSZhen Lei erased = (erased_cw & ERASED_CW) == ERASED_CW; 18592f610386SAbhishek Sahu /* 18602f610386SAbhishek Sahu * For RS ECC, HW reports the erased CW by placing 18612f610386SAbhishek Sahu * special characters at certain offsets in the buffer. 18622f610386SAbhishek Sahu * These special characters will be valid only if 18632f610386SAbhishek Sahu * complete page is read i.e. data_buf is not NULL. 18642f610386SAbhishek Sahu */ 18652f610386SAbhishek Sahu } else if (data_buf) { 1866c76b78d8SArchit Taneja erased = erased_chunk_check_and_fixup(data_buf, 1867c76b78d8SArchit Taneja data_len); 18682f610386SAbhishek Sahu } else { 18692f610386SAbhishek Sahu erased = false; 1870c76b78d8SArchit Taneja } 1871c76b78d8SArchit Taneja 18729f43deeeSAbhishek Sahu if (!erased) 18739f43deeeSAbhishek Sahu uncorrectable_cws |= BIT(i); 18748eab7214SAbhishek Sahu /* 18758eab7214SAbhishek Sahu * Check if MPU or any other operational error (timeout, 18768eab7214SAbhishek Sahu * device failure, etc.) happened for this codeword and 18778eab7214SAbhishek Sahu * make flash_op_err true. If flash_op_err is set, then 18788eab7214SAbhishek Sahu * EIO will be returned for page read. 18798eab7214SAbhishek Sahu */ 18808eab7214SAbhishek Sahu } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) { 18818eab7214SAbhishek Sahu flash_op_err = true; 18828eab7214SAbhishek Sahu /* 18838eab7214SAbhishek Sahu * No ECC or operational errors happened. Check the number of 18848eab7214SAbhishek Sahu * bits corrected and update the ecc_stats.corrected. 18858eab7214SAbhishek Sahu */ 1886c76b78d8SArchit Taneja } else { 1887c76b78d8SArchit Taneja unsigned int stat; 1888c76b78d8SArchit Taneja 1889c76b78d8SArchit Taneja stat = buffer & BS_CORRECTABLE_ERR_MSK; 1890c76b78d8SArchit Taneja mtd->ecc_stats.corrected += stat; 1891c76b78d8SArchit Taneja max_bitflips = max(max_bitflips, stat); 1892c76b78d8SArchit Taneja } 1893c76b78d8SArchit Taneja 18942f610386SAbhishek Sahu if (data_buf) 1895c76b78d8SArchit Taneja data_buf += data_len; 1896c76b78d8SArchit Taneja if (oob_buf) 1897c76b78d8SArchit Taneja oob_buf += oob_len + ecc->bytes; 1898c76b78d8SArchit Taneja } 1899c76b78d8SArchit Taneja 19008eab7214SAbhishek Sahu if (flash_op_err) 19018eab7214SAbhishek Sahu return -EIO; 19028eab7214SAbhishek Sahu 19039f43deeeSAbhishek Sahu if (!uncorrectable_cws) 1904c76b78d8SArchit Taneja return max_bitflips; 19059f43deeeSAbhishek Sahu 19069f43deeeSAbhishek Sahu return check_for_erased_page(host, data_buf_start, oob_buf_start, 19079f43deeeSAbhishek Sahu uncorrectable_cws, page, 19089f43deeeSAbhishek Sahu max_bitflips); 1909c76b78d8SArchit Taneja } 1910c76b78d8SArchit Taneja 1911c76b78d8SArchit Taneja /* 1912c76b78d8SArchit Taneja * helper to perform the actual page read operation, used by ecc->read_page(), 1913c76b78d8SArchit Taneja * ecc->read_oob() 1914c76b78d8SArchit Taneja */ 1915c76b78d8SArchit Taneja static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, 19169f43deeeSAbhishek Sahu u8 *oob_buf, int page) 1917c76b78d8SArchit Taneja { 1918c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1919c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 1920c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 1921add0cfa3SAbhishek Sahu u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf; 1922c76b78d8SArchit Taneja int i, ret; 1923c76b78d8SArchit Taneja 19249a7c39e2SMd Sadre Alam config_nand_page_read(chip); 1925bde4330aSAbhishek Sahu 1926c76b78d8SArchit Taneja /* queue cmd descs for each codeword */ 1927c76b78d8SArchit Taneja for (i = 0; i < ecc->steps; i++) { 1928c76b78d8SArchit Taneja int data_size, oob_size; 1929c76b78d8SArchit Taneja 1930b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, i)) { 1931c76b78d8SArchit Taneja data_size = ecc->size - ((ecc->steps - 1) << 2); 1932c76b78d8SArchit Taneja oob_size = (ecc->steps << 2) + host->ecc_bytes_hw + 1933c76b78d8SArchit Taneja host->spare_bytes; 1934c76b78d8SArchit Taneja } else { 1935c76b78d8SArchit Taneja data_size = host->cw_data; 1936c76b78d8SArchit Taneja oob_size = host->ecc_bytes_hw + host->spare_bytes; 1937c76b78d8SArchit Taneja } 1938c76b78d8SArchit Taneja 193991af95c1SAbhishek Sahu if (nandc->props->is_bam) { 194091af95c1SAbhishek Sahu if (data_buf && oob_buf) { 1941e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, i, 0, 0, data_size, 0); 1942e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, i, 1, data_size, 194391af95c1SAbhishek Sahu oob_size, 1); 194491af95c1SAbhishek Sahu } else if (data_buf) { 1945e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, i, 0, 0, data_size, 1); 194691af95c1SAbhishek Sahu } else { 1947e7a307f2SMd Sadre Alam nandc_set_read_loc(chip, i, 0, data_size, 194891af95c1SAbhishek Sahu oob_size, 1); 194991af95c1SAbhishek Sahu } 195091af95c1SAbhishek Sahu } 195191af95c1SAbhishek Sahu 1952503ee5aaSMd Sadre Alam config_nand_cw_read(chip, true, i); 1953c76b78d8SArchit Taneja 1954c76b78d8SArchit Taneja if (data_buf) 1955c76b78d8SArchit Taneja read_data_dma(nandc, FLASH_BUF_ACC, data_buf, 195667e830aeSAbhishek Sahu data_size, 0); 1957c76b78d8SArchit Taneja 1958c76b78d8SArchit Taneja /* 1959c76b78d8SArchit Taneja * when ecc is enabled, the controller doesn't read the real 1960c76b78d8SArchit Taneja * or dummy bad block markers in each chunk. To maintain a 1961c76b78d8SArchit Taneja * consistent layout across RAW and ECC reads, we just 1962c76b78d8SArchit Taneja * leave the real/dummy BBM offsets empty (i.e, filled with 1963c76b78d8SArchit Taneja * 0xffs) 1964c76b78d8SArchit Taneja */ 1965c76b78d8SArchit Taneja if (oob_buf) { 1966c76b78d8SArchit Taneja int j; 1967c76b78d8SArchit Taneja 1968c76b78d8SArchit Taneja for (j = 0; j < host->bbm_size; j++) 1969c76b78d8SArchit Taneja *oob_buf++ = 0xff; 1970c76b78d8SArchit Taneja 1971c76b78d8SArchit Taneja read_data_dma(nandc, FLASH_BUF_ACC + data_size, 197267e830aeSAbhishek Sahu oob_buf, oob_size, 0); 1973c76b78d8SArchit Taneja } 1974c76b78d8SArchit Taneja 1975c76b78d8SArchit Taneja if (data_buf) 1976c76b78d8SArchit Taneja data_buf += data_size; 1977c76b78d8SArchit Taneja if (oob_buf) 1978c76b78d8SArchit Taneja oob_buf += oob_size; 1979c76b78d8SArchit Taneja } 1980c76b78d8SArchit Taneja 1981c76b78d8SArchit Taneja ret = submit_descs(nandc); 1982c76b78d8SArchit Taneja free_descs(nandc); 1983c76b78d8SArchit Taneja 1984add0cfa3SAbhishek Sahu if (ret) { 1985add0cfa3SAbhishek Sahu dev_err(nandc->dev, "failure to read page/oob\n"); 1986c76b78d8SArchit Taneja return ret; 1987c76b78d8SArchit Taneja } 1988c76b78d8SArchit Taneja 19899f43deeeSAbhishek Sahu return parse_read_errors(host, data_buf_start, oob_buf_start, page); 1990add0cfa3SAbhishek Sahu } 1991add0cfa3SAbhishek Sahu 1992c76b78d8SArchit Taneja /* 1993c76b78d8SArchit Taneja * a helper that copies the last step/codeword of a page (containing free oob) 1994c76b78d8SArchit Taneja * into our local buffer 1995c76b78d8SArchit Taneja */ 1996c76b78d8SArchit Taneja static int copy_last_cw(struct qcom_nand_host *host, int page) 1997c76b78d8SArchit Taneja { 1998c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 1999c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2000c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2001c76b78d8SArchit Taneja int size; 2002c76b78d8SArchit Taneja int ret; 2003c76b78d8SArchit Taneja 2004c76b78d8SArchit Taneja clear_read_regs(nandc); 2005c76b78d8SArchit Taneja 2006c76b78d8SArchit Taneja size = host->use_ecc ? host->cw_data : host->cw_size; 2007c76b78d8SArchit Taneja 2008c76b78d8SArchit Taneja /* prepare a clean read buffer */ 2009c76b78d8SArchit Taneja memset(nandc->data_buffer, 0xff, size); 2010c76b78d8SArchit Taneja 2011c76b78d8SArchit Taneja set_address(host, host->cw_size * (ecc->steps - 1), page); 2012503ee5aaSMd Sadre Alam update_rw_regs(host, 1, true, ecc->steps - 1); 2013c76b78d8SArchit Taneja 2014503ee5aaSMd Sadre Alam config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1); 2015c76b78d8SArchit Taneja 201667e830aeSAbhishek Sahu read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0); 2017c76b78d8SArchit Taneja 2018c76b78d8SArchit Taneja ret = submit_descs(nandc); 2019c76b78d8SArchit Taneja if (ret) 2020c76b78d8SArchit Taneja dev_err(nandc->dev, "failed to copy last codeword\n"); 2021c76b78d8SArchit Taneja 2022c76b78d8SArchit Taneja free_descs(nandc); 2023c76b78d8SArchit Taneja 2024c76b78d8SArchit Taneja return ret; 2025c76b78d8SArchit Taneja } 2026c76b78d8SArchit Taneja 2027c76b78d8SArchit Taneja /* implements ecc->read_page() */ 2028b9761687SBoris Brezillon static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf, 2029b9761687SBoris Brezillon int oob_required, int page) 2030c76b78d8SArchit Taneja { 2031c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2032c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2033c76b78d8SArchit Taneja u8 *data_buf, *oob_buf = NULL; 2034c76b78d8SArchit Taneja 203525f815f6SBoris Brezillon nand_read_page_op(chip, page, 0, NULL, 0); 2036c76b78d8SArchit Taneja data_buf = buf; 2037c76b78d8SArchit Taneja oob_buf = oob_required ? chip->oob_poi : NULL; 2038c76b78d8SArchit Taneja 20394e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2040c76b78d8SArchit Taneja 20419f43deeeSAbhishek Sahu return read_page_ecc(host, data_buf, oob_buf, page); 2042c76b78d8SArchit Taneja } 2043c76b78d8SArchit Taneja 2044c76b78d8SArchit Taneja /* implements ecc->read_page_raw() */ 2045b9761687SBoris Brezillon static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf, 2046c76b78d8SArchit Taneja int oob_required, int page) 2047c76b78d8SArchit Taneja { 2048b9761687SBoris Brezillon struct mtd_info *mtd = nand_to_mtd(chip); 2049c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2050c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 205185632c17SAbhishek Sahu int cw, ret; 205285632c17SAbhishek Sahu u8 *data_buf = buf, *oob_buf = chip->oob_poi; 2053c76b78d8SArchit Taneja 205485632c17SAbhishek Sahu for (cw = 0; cw < ecc->steps; cw++) { 205585632c17SAbhishek Sahu ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf, 205685632c17SAbhishek Sahu page, cw); 205785632c17SAbhishek Sahu if (ret) 2058783b5bf9SAbhishek Sahu return ret; 205985632c17SAbhishek Sahu 206085632c17SAbhishek Sahu data_buf += host->cw_data; 206185632c17SAbhishek Sahu oob_buf += ecc->bytes; 2062c76b78d8SArchit Taneja } 2063c76b78d8SArchit Taneja 206485632c17SAbhishek Sahu return 0; 20655bc36b2bSAbhishek Sahu } 20665bc36b2bSAbhishek Sahu 2067c76b78d8SArchit Taneja /* implements ecc->read_oob() */ 2068b9761687SBoris Brezillon static int qcom_nandc_read_oob(struct nand_chip *chip, int page) 2069c76b78d8SArchit Taneja { 2070c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2071c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2072c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2073c76b78d8SArchit Taneja 2074c76b78d8SArchit Taneja clear_read_regs(nandc); 20754e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2076c76b78d8SArchit Taneja 2077c76b78d8SArchit Taneja host->use_ecc = true; 2078c76b78d8SArchit Taneja set_address(host, 0, page); 2079503ee5aaSMd Sadre Alam update_rw_regs(host, ecc->steps, true, 0); 2080c76b78d8SArchit Taneja 20819f43deeeSAbhishek Sahu return read_page_ecc(host, NULL, chip->oob_poi, page); 2082c76b78d8SArchit Taneja } 2083c76b78d8SArchit Taneja 2084c76b78d8SArchit Taneja /* implements ecc->write_page() */ 2085767eb6fbSBoris Brezillon static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf, 2086767eb6fbSBoris Brezillon int oob_required, int page) 2087c76b78d8SArchit Taneja { 2088c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2089c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2090c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2091c76b78d8SArchit Taneja u8 *data_buf, *oob_buf; 2092c76b78d8SArchit Taneja int i, ret; 2093c76b78d8SArchit Taneja 209425f815f6SBoris Brezillon nand_prog_page_begin_op(chip, page, 0, NULL, 0); 209525f815f6SBoris Brezillon 2096c76b78d8SArchit Taneja clear_read_regs(nandc); 20974e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2098c76b78d8SArchit Taneja 2099c76b78d8SArchit Taneja data_buf = (u8 *)buf; 2100c76b78d8SArchit Taneja oob_buf = chip->oob_poi; 2101c76b78d8SArchit Taneja 2102c76b78d8SArchit Taneja host->use_ecc = true; 2103503ee5aaSMd Sadre Alam update_rw_regs(host, ecc->steps, false, 0); 21049a7c39e2SMd Sadre Alam config_nand_page_write(chip); 2105c76b78d8SArchit Taneja 2106c76b78d8SArchit Taneja for (i = 0; i < ecc->steps; i++) { 2107c76b78d8SArchit Taneja int data_size, oob_size; 2108c76b78d8SArchit Taneja 2109b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, i)) { 2110c76b78d8SArchit Taneja data_size = ecc->size - ((ecc->steps - 1) << 2); 2111c76b78d8SArchit Taneja oob_size = (ecc->steps << 2) + host->ecc_bytes_hw + 2112c76b78d8SArchit Taneja host->spare_bytes; 2113c76b78d8SArchit Taneja } else { 2114c76b78d8SArchit Taneja data_size = host->cw_data; 2115c76b78d8SArchit Taneja oob_size = ecc->bytes; 2116c76b78d8SArchit Taneja } 2117c76b78d8SArchit Taneja 2118c76b78d8SArchit Taneja 211967e830aeSAbhishek Sahu write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size, 212067e830aeSAbhishek Sahu i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0); 2121c76b78d8SArchit Taneja 2122c76b78d8SArchit Taneja /* 2123c76b78d8SArchit Taneja * when ECC is enabled, we don't really need to write anything 2124c76b78d8SArchit Taneja * to oob for the first n - 1 codewords since these oob regions 2125c76b78d8SArchit Taneja * just contain ECC bytes that's written by the controller 2126c76b78d8SArchit Taneja * itself. For the last codeword, we skip the bbm positions and 2127c76b78d8SArchit Taneja * write to the free oob area. 2128c76b78d8SArchit Taneja */ 2129b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, i)) { 2130c76b78d8SArchit Taneja oob_buf += host->bbm_size; 2131c76b78d8SArchit Taneja 2132c76b78d8SArchit Taneja write_data_dma(nandc, FLASH_BUF_ACC + data_size, 213367e830aeSAbhishek Sahu oob_buf, oob_size, 0); 2134c76b78d8SArchit Taneja } 2135c76b78d8SArchit Taneja 21369a7c39e2SMd Sadre Alam config_nand_cw_write(chip); 2137c76b78d8SArchit Taneja 2138c76b78d8SArchit Taneja data_buf += data_size; 2139c76b78d8SArchit Taneja oob_buf += oob_size; 2140c76b78d8SArchit Taneja } 2141c76b78d8SArchit Taneja 2142c76b78d8SArchit Taneja ret = submit_descs(nandc); 2143c76b78d8SArchit Taneja if (ret) 2144c76b78d8SArchit Taneja dev_err(nandc->dev, "failure to write page\n"); 2145c76b78d8SArchit Taneja 2146c76b78d8SArchit Taneja free_descs(nandc); 2147c76b78d8SArchit Taneja 214825f815f6SBoris Brezillon if (!ret) 214925f815f6SBoris Brezillon ret = nand_prog_page_end_op(chip); 215025f815f6SBoris Brezillon 2151c76b78d8SArchit Taneja return ret; 2152c76b78d8SArchit Taneja } 2153c76b78d8SArchit Taneja 2154c76b78d8SArchit Taneja /* implements ecc->write_page_raw() */ 2155767eb6fbSBoris Brezillon static int qcom_nandc_write_page_raw(struct nand_chip *chip, 2156767eb6fbSBoris Brezillon const uint8_t *buf, int oob_required, 2157767eb6fbSBoris Brezillon int page) 2158c76b78d8SArchit Taneja { 2159767eb6fbSBoris Brezillon struct mtd_info *mtd = nand_to_mtd(chip); 2160c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2161c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2162c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2163c76b78d8SArchit Taneja u8 *data_buf, *oob_buf; 2164c76b78d8SArchit Taneja int i, ret; 2165c76b78d8SArchit Taneja 216625f815f6SBoris Brezillon nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2167c76b78d8SArchit Taneja clear_read_regs(nandc); 21684e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2169c76b78d8SArchit Taneja 2170c76b78d8SArchit Taneja data_buf = (u8 *)buf; 2171c76b78d8SArchit Taneja oob_buf = chip->oob_poi; 2172c76b78d8SArchit Taneja 2173c76b78d8SArchit Taneja host->use_ecc = false; 2174503ee5aaSMd Sadre Alam update_rw_regs(host, ecc->steps, false, 0); 21759a7c39e2SMd Sadre Alam config_nand_page_write(chip); 2176c76b78d8SArchit Taneja 2177c76b78d8SArchit Taneja for (i = 0; i < ecc->steps; i++) { 2178c76b78d8SArchit Taneja int data_size1, data_size2, oob_size1, oob_size2; 2179c76b78d8SArchit Taneja int reg_off = FLASH_BUF_ACC; 2180c76b78d8SArchit Taneja 2181c76b78d8SArchit Taneja data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1); 2182c76b78d8SArchit Taneja oob_size1 = host->bbm_size; 2183c76b78d8SArchit Taneja 2184b057e498SMd Sadre Alam if (qcom_nandc_is_last_cw(ecc, i)) { 2185c76b78d8SArchit Taneja data_size2 = ecc->size - data_size1 - 2186c76b78d8SArchit Taneja ((ecc->steps - 1) << 2); 2187c76b78d8SArchit Taneja oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw + 2188c76b78d8SArchit Taneja host->spare_bytes; 2189c76b78d8SArchit Taneja } else { 2190c76b78d8SArchit Taneja data_size2 = host->cw_data - data_size1; 2191c76b78d8SArchit Taneja oob_size2 = host->ecc_bytes_hw + host->spare_bytes; 2192c76b78d8SArchit Taneja } 2193c76b78d8SArchit Taneja 219467e830aeSAbhishek Sahu write_data_dma(nandc, reg_off, data_buf, data_size1, 219567e830aeSAbhishek Sahu NAND_BAM_NO_EOT); 2196c76b78d8SArchit Taneja reg_off += data_size1; 2197c76b78d8SArchit Taneja data_buf += data_size1; 2198c76b78d8SArchit Taneja 219967e830aeSAbhishek Sahu write_data_dma(nandc, reg_off, oob_buf, oob_size1, 220067e830aeSAbhishek Sahu NAND_BAM_NO_EOT); 2201c76b78d8SArchit Taneja reg_off += oob_size1; 2202c76b78d8SArchit Taneja oob_buf += oob_size1; 2203c76b78d8SArchit Taneja 220467e830aeSAbhishek Sahu write_data_dma(nandc, reg_off, data_buf, data_size2, 220567e830aeSAbhishek Sahu NAND_BAM_NO_EOT); 2206c76b78d8SArchit Taneja reg_off += data_size2; 2207c76b78d8SArchit Taneja data_buf += data_size2; 2208c76b78d8SArchit Taneja 220967e830aeSAbhishek Sahu write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); 2210c76b78d8SArchit Taneja oob_buf += oob_size2; 2211c76b78d8SArchit Taneja 22129a7c39e2SMd Sadre Alam config_nand_cw_write(chip); 2213c76b78d8SArchit Taneja } 2214c76b78d8SArchit Taneja 2215c76b78d8SArchit Taneja ret = submit_descs(nandc); 2216c76b78d8SArchit Taneja if (ret) 2217c76b78d8SArchit Taneja dev_err(nandc->dev, "failure to write raw page\n"); 2218c76b78d8SArchit Taneja 2219c76b78d8SArchit Taneja free_descs(nandc); 2220c76b78d8SArchit Taneja 222125f815f6SBoris Brezillon if (!ret) 222225f815f6SBoris Brezillon ret = nand_prog_page_end_op(chip); 222325f815f6SBoris Brezillon 2224c76b78d8SArchit Taneja return ret; 2225c76b78d8SArchit Taneja } 2226c76b78d8SArchit Taneja 2227c76b78d8SArchit Taneja /* 2228c76b78d8SArchit Taneja * implements ecc->write_oob() 2229c76b78d8SArchit Taneja * 223028eed9f6SAbhishek Sahu * the NAND controller cannot write only data or only OOB within a codeword 223128eed9f6SAbhishek Sahu * since ECC is calculated for the combined codeword. So update the OOB from 223228eed9f6SAbhishek Sahu * chip->oob_poi, and pad the data area with OxFF before writing. 2233c76b78d8SArchit Taneja */ 2234767eb6fbSBoris Brezillon static int qcom_nandc_write_oob(struct nand_chip *chip, int page) 2235c76b78d8SArchit Taneja { 2236767eb6fbSBoris Brezillon struct mtd_info *mtd = nand_to_mtd(chip); 2237c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2238c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2239c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2240c76b78d8SArchit Taneja u8 *oob = chip->oob_poi; 2241c76b78d8SArchit Taneja int data_size, oob_size; 224297d90da8SBoris Brezillon int ret; 2243c76b78d8SArchit Taneja 2244c76b78d8SArchit Taneja host->use_ecc = true; 22454e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2246c76b78d8SArchit Taneja 2247c76b78d8SArchit Taneja /* calculate the data and oob size for the last codeword/step */ 2248c76b78d8SArchit Taneja data_size = ecc->size - ((ecc->steps - 1) << 2); 2249aa02fcf5SBoris Brezillon oob_size = mtd->oobavail; 2250c76b78d8SArchit Taneja 225128eed9f6SAbhishek Sahu memset(nandc->data_buffer, 0xff, host->cw_data); 2252c76b78d8SArchit Taneja /* override new oob content to last codeword */ 2253aa02fcf5SBoris Brezillon mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob, 2254aa02fcf5SBoris Brezillon 0, mtd->oobavail); 2255c76b78d8SArchit Taneja 2256c76b78d8SArchit Taneja set_address(host, host->cw_size * (ecc->steps - 1), page); 2257503ee5aaSMd Sadre Alam update_rw_regs(host, 1, false, 0); 2258c76b78d8SArchit Taneja 22599a7c39e2SMd Sadre Alam config_nand_page_write(chip); 226067e830aeSAbhishek Sahu write_data_dma(nandc, FLASH_BUF_ACC, 226167e830aeSAbhishek Sahu nandc->data_buffer, data_size + oob_size, 0); 22629a7c39e2SMd Sadre Alam config_nand_cw_write(chip); 2263c76b78d8SArchit Taneja 2264c76b78d8SArchit Taneja ret = submit_descs(nandc); 2265c76b78d8SArchit Taneja 2266c76b78d8SArchit Taneja free_descs(nandc); 2267c76b78d8SArchit Taneja 2268c76b78d8SArchit Taneja if (ret) { 2269c76b78d8SArchit Taneja dev_err(nandc->dev, "failure to write oob\n"); 2270c76b78d8SArchit Taneja return -EIO; 2271c76b78d8SArchit Taneja } 2272c76b78d8SArchit Taneja 227397d90da8SBoris Brezillon return nand_prog_page_end_op(chip); 2274c76b78d8SArchit Taneja } 2275c76b78d8SArchit Taneja 2276c17556f5SBoris Brezillon static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs) 2277c76b78d8SArchit Taneja { 2278c17556f5SBoris Brezillon struct mtd_info *mtd = nand_to_mtd(chip); 2279c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2280c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2281c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2282c76b78d8SArchit Taneja int page, ret, bbpos, bad = 0; 2283c76b78d8SArchit Taneja 2284c76b78d8SArchit Taneja page = (int)(ofs >> chip->page_shift) & chip->pagemask; 2285c76b78d8SArchit Taneja 2286c76b78d8SArchit Taneja /* 2287c76b78d8SArchit Taneja * configure registers for a raw sub page read, the address is set to 2288c76b78d8SArchit Taneja * the beginning of the last codeword, we don't care about reading ecc 2289c76b78d8SArchit Taneja * portion of oob. we just want the first few bytes from this codeword 2290c76b78d8SArchit Taneja * that contains the BBM 2291c76b78d8SArchit Taneja */ 2292c76b78d8SArchit Taneja host->use_ecc = false; 2293c76b78d8SArchit Taneja 22944e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2295c76b78d8SArchit Taneja ret = copy_last_cw(host, page); 2296c76b78d8SArchit Taneja if (ret) 2297c76b78d8SArchit Taneja goto err; 2298c76b78d8SArchit Taneja 22995bc36b2bSAbhishek Sahu if (check_flash_errors(host, 1)) { 2300c76b78d8SArchit Taneja dev_warn(nandc->dev, "error when trying to read BBM\n"); 2301c76b78d8SArchit Taneja goto err; 2302c76b78d8SArchit Taneja } 2303c76b78d8SArchit Taneja 2304c76b78d8SArchit Taneja bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1); 2305c76b78d8SArchit Taneja 2306c76b78d8SArchit Taneja bad = nandc->data_buffer[bbpos] != 0xff; 2307c76b78d8SArchit Taneja 2308c76b78d8SArchit Taneja if (chip->options & NAND_BUSWIDTH_16) 2309c76b78d8SArchit Taneja bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff); 2310c76b78d8SArchit Taneja err: 2311c76b78d8SArchit Taneja return bad; 2312c76b78d8SArchit Taneja } 2313c76b78d8SArchit Taneja 2314c17556f5SBoris Brezillon static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs) 2315c76b78d8SArchit Taneja { 2316c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2317c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2318c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 231997d90da8SBoris Brezillon int page, ret; 2320c76b78d8SArchit Taneja 2321c76b78d8SArchit Taneja clear_read_regs(nandc); 23224e2f6c52SAbhishek Sahu clear_bam_transaction(nandc); 2323c76b78d8SArchit Taneja 2324c76b78d8SArchit Taneja /* 2325c76b78d8SArchit Taneja * to mark the BBM as bad, we flash the entire last codeword with 0s. 2326c76b78d8SArchit Taneja * we don't care about the rest of the content in the codeword since 2327c76b78d8SArchit Taneja * we aren't going to use this block again 2328c76b78d8SArchit Taneja */ 2329c76b78d8SArchit Taneja memset(nandc->data_buffer, 0x00, host->cw_size); 2330c76b78d8SArchit Taneja 2331c76b78d8SArchit Taneja page = (int)(ofs >> chip->page_shift) & chip->pagemask; 2332c76b78d8SArchit Taneja 2333c76b78d8SArchit Taneja /* prepare write */ 2334c76b78d8SArchit Taneja host->use_ecc = false; 2335c76b78d8SArchit Taneja set_address(host, host->cw_size * (ecc->steps - 1), page); 2336503ee5aaSMd Sadre Alam update_rw_regs(host, 1, false, ecc->steps - 1); 2337c76b78d8SArchit Taneja 23389a7c39e2SMd Sadre Alam config_nand_page_write(chip); 233967e830aeSAbhishek Sahu write_data_dma(nandc, FLASH_BUF_ACC, 234067e830aeSAbhishek Sahu nandc->data_buffer, host->cw_size, 0); 23419a7c39e2SMd Sadre Alam config_nand_cw_write(chip); 2342c76b78d8SArchit Taneja 2343c76b78d8SArchit Taneja ret = submit_descs(nandc); 2344c76b78d8SArchit Taneja 2345c76b78d8SArchit Taneja free_descs(nandc); 2346c76b78d8SArchit Taneja 2347c76b78d8SArchit Taneja if (ret) { 2348c76b78d8SArchit Taneja dev_err(nandc->dev, "failure to update BBM\n"); 2349c76b78d8SArchit Taneja return -EIO; 2350c76b78d8SArchit Taneja } 2351c76b78d8SArchit Taneja 235297d90da8SBoris Brezillon return nand_prog_page_end_op(chip); 2353c76b78d8SArchit Taneja } 2354c76b78d8SArchit Taneja 2355c76b78d8SArchit Taneja /* 2356716bbbabSBoris Brezillon * the three functions below implement chip->legacy.read_byte(), 2357716bbbabSBoris Brezillon * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these 2358716bbbabSBoris Brezillon * aren't used for reading/writing page data, they are used for smaller data 2359716bbbabSBoris Brezillon * like reading id, status etc 2360c76b78d8SArchit Taneja */ 23617e534323SBoris Brezillon static uint8_t qcom_nandc_read_byte(struct nand_chip *chip) 2362c76b78d8SArchit Taneja { 2363c76b78d8SArchit Taneja struct qcom_nand_host *host = to_qcom_nand_host(chip); 2364c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2365c76b78d8SArchit Taneja u8 *buf = nandc->data_buffer; 2366c76b78d8SArchit Taneja u8 ret = 0x0; 2367c76b78d8SArchit Taneja 2368c76b78d8SArchit Taneja if (host->last_command == NAND_CMD_STATUS) { 2369c76b78d8SArchit Taneja ret = host->status; 2370c76b78d8SArchit Taneja 2371c76b78d8SArchit Taneja host->status = NAND_STATUS_READY | NAND_STATUS_WP; 2372c76b78d8SArchit Taneja 2373c76b78d8SArchit Taneja return ret; 2374c76b78d8SArchit Taneja } 2375c76b78d8SArchit Taneja 2376c76b78d8SArchit Taneja if (nandc->buf_start < nandc->buf_count) 2377c76b78d8SArchit Taneja ret = buf[nandc->buf_start++]; 2378c76b78d8SArchit Taneja 2379c76b78d8SArchit Taneja return ret; 2380c76b78d8SArchit Taneja } 2381c76b78d8SArchit Taneja 23827e534323SBoris Brezillon static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 2383c76b78d8SArchit Taneja { 2384c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2385c76b78d8SArchit Taneja int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start); 2386c76b78d8SArchit Taneja 2387c76b78d8SArchit Taneja memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len); 2388c76b78d8SArchit Taneja nandc->buf_start += real_len; 2389c76b78d8SArchit Taneja } 2390c76b78d8SArchit Taneja 2391c0739d85SBoris Brezillon static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf, 2392c76b78d8SArchit Taneja int len) 2393c76b78d8SArchit Taneja { 2394c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2395c76b78d8SArchit Taneja int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start); 2396c76b78d8SArchit Taneja 2397c76b78d8SArchit Taneja memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len); 2398c76b78d8SArchit Taneja 2399c76b78d8SArchit Taneja nandc->buf_start += real_len; 2400c76b78d8SArchit Taneja } 2401c76b78d8SArchit Taneja 2402c76b78d8SArchit Taneja /* we support only one external chip for now */ 2403758b56f5SBoris Brezillon static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr) 2404c76b78d8SArchit Taneja { 2405c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 2406c76b78d8SArchit Taneja 2407c76b78d8SArchit Taneja if (chipnr <= 0) 2408c76b78d8SArchit Taneja return; 2409c76b78d8SArchit Taneja 2410c76b78d8SArchit Taneja dev_warn(nandc->dev, "invalid chip select\n"); 2411c76b78d8SArchit Taneja } 2412c76b78d8SArchit Taneja 2413c76b78d8SArchit Taneja /* 2414c76b78d8SArchit Taneja * NAND controller page layout info 2415c76b78d8SArchit Taneja * 2416c76b78d8SArchit Taneja * Layout with ECC enabled: 2417c76b78d8SArchit Taneja * 2418c76b78d8SArchit Taneja * |----------------------| |---------------------------------| 2419c76b78d8SArchit Taneja * | xx.......yy| | *********xx.......yy| 2420c76b78d8SArchit Taneja * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy| 2421c76b78d8SArchit Taneja * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy| 2422c76b78d8SArchit Taneja * | xx.......yy| | *********xx.......yy| 2423c76b78d8SArchit Taneja * |----------------------| |---------------------------------| 2424c76b78d8SArchit Taneja * codeword 1,2..n-1 codeword n 2425c76b78d8SArchit Taneja * <---(528/532 Bytes)--> <-------(528/532 Bytes)---------> 2426c76b78d8SArchit Taneja * 2427c76b78d8SArchit Taneja * n = Number of codewords in the page 2428c76b78d8SArchit Taneja * . = ECC bytes 2429c76b78d8SArchit Taneja * * = Spare/free bytes 2430c76b78d8SArchit Taneja * x = Unused byte(s) 2431c76b78d8SArchit Taneja * y = Reserved byte(s) 2432c76b78d8SArchit Taneja * 2433c76b78d8SArchit Taneja * 2K page: n = 4, spare = 16 bytes 2434c76b78d8SArchit Taneja * 4K page: n = 8, spare = 32 bytes 2435c76b78d8SArchit Taneja * 8K page: n = 16, spare = 64 bytes 2436c76b78d8SArchit Taneja * 2437c76b78d8SArchit Taneja * the qcom nand controller operates at a sub page/codeword level. each 2438c76b78d8SArchit Taneja * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively. 2439c76b78d8SArchit Taneja * the number of ECC bytes vary based on the ECC strength and the bus width. 2440c76b78d8SArchit Taneja * 2441c76b78d8SArchit Taneja * the first n - 1 codewords contains 516 bytes of user data, the remaining 2442c76b78d8SArchit Taneja * 12/16 bytes consist of ECC and reserved data. The nth codeword contains 2443c76b78d8SArchit Taneja * both user data and spare(oobavail) bytes that sum up to 516 bytes. 2444c76b78d8SArchit Taneja * 2445c76b78d8SArchit Taneja * When we access a page with ECC enabled, the reserved bytes(s) are not 2446c76b78d8SArchit Taneja * accessible at all. When reading, we fill up these unreadable positions 2447c76b78d8SArchit Taneja * with 0xffs. When writing, the controller skips writing the inaccessible 2448c76b78d8SArchit Taneja * bytes. 2449c76b78d8SArchit Taneja * 2450c76b78d8SArchit Taneja * Layout with ECC disabled: 2451c76b78d8SArchit Taneja * 2452c76b78d8SArchit Taneja * |------------------------------| |---------------------------------------| 2453c76b78d8SArchit Taneja * | yy xx.......| | bb *********xx.......| 2454c76b78d8SArchit Taneja * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..| 2455c76b78d8SArchit Taneja * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......| 2456c76b78d8SArchit Taneja * | yy xx.......| | bb *********xx.......| 2457c76b78d8SArchit Taneja * |------------------------------| |---------------------------------------| 2458c76b78d8SArchit Taneja * codeword 1,2..n-1 codeword n 2459c76b78d8SArchit Taneja * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)-----------> 2460c76b78d8SArchit Taneja * 2461c76b78d8SArchit Taneja * n = Number of codewords in the page 2462c76b78d8SArchit Taneja * . = ECC bytes 2463c76b78d8SArchit Taneja * * = Spare/free bytes 2464c76b78d8SArchit Taneja * x = Unused byte(s) 2465c76b78d8SArchit Taneja * y = Dummy Bad Bock byte(s) 2466c76b78d8SArchit Taneja * b = Real Bad Block byte(s) 2467c76b78d8SArchit Taneja * size1/size2 = function of codeword size and 'n' 2468c76b78d8SArchit Taneja * 2469c76b78d8SArchit Taneja * when the ECC block is disabled, one reserved byte (or two for 16 bit bus 2470c76b78d8SArchit Taneja * width) is now accessible. For the first n - 1 codewords, these are dummy Bad 2471c76b78d8SArchit Taneja * Block Markers. In the last codeword, this position contains the real BBM 2472c76b78d8SArchit Taneja * 2473c76b78d8SArchit Taneja * In order to have a consistent layout between RAW and ECC modes, we assume 2474c76b78d8SArchit Taneja * the following OOB layout arrangement: 2475c76b78d8SArchit Taneja * 2476c76b78d8SArchit Taneja * |-----------| |--------------------| 2477c76b78d8SArchit Taneja * |yyxx.......| |bb*********xx.......| 2478c76b78d8SArchit Taneja * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..| 2479c76b78d8SArchit Taneja * |yyxx.......| |bb*********xx.......| 2480c76b78d8SArchit Taneja * |yyxx.......| |bb*********xx.......| 2481c76b78d8SArchit Taneja * |-----------| |--------------------| 2482c76b78d8SArchit Taneja * first n - 1 nth OOB region 2483c76b78d8SArchit Taneja * OOB regions 2484c76b78d8SArchit Taneja * 2485c76b78d8SArchit Taneja * n = Number of codewords in the page 2486c76b78d8SArchit Taneja * . = ECC bytes 2487c76b78d8SArchit Taneja * * = FREE OOB bytes 2488c76b78d8SArchit Taneja * y = Dummy bad block byte(s) (inaccessible when ECC enabled) 2489c76b78d8SArchit Taneja * x = Unused byte(s) 2490c76b78d8SArchit Taneja * b = Real bad block byte(s) (inaccessible when ECC enabled) 2491c76b78d8SArchit Taneja * 2492c76b78d8SArchit Taneja * This layout is read as is when ECC is disabled. When ECC is enabled, the 2493c76b78d8SArchit Taneja * inaccessible Bad Block byte(s) are ignored when we write to a page/oob, 2494c76b78d8SArchit Taneja * and assumed as 0xffs when we read a page/oob. The ECC, unused and 2495421e81c4SBoris Brezillon * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is 2496421e81c4SBoris Brezillon * the sum of the three). 2497c76b78d8SArchit Taneja */ 2498421e81c4SBoris Brezillon static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section, 2499421e81c4SBoris Brezillon struct mtd_oob_region *oobregion) 2500c76b78d8SArchit Taneja { 2501421e81c4SBoris Brezillon struct nand_chip *chip = mtd_to_nand(mtd); 2502421e81c4SBoris Brezillon struct qcom_nand_host *host = to_qcom_nand_host(chip); 2503c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2504c76b78d8SArchit Taneja 2505421e81c4SBoris Brezillon if (section > 1) 2506421e81c4SBoris Brezillon return -ERANGE; 2507c76b78d8SArchit Taneja 2508421e81c4SBoris Brezillon if (!section) { 2509421e81c4SBoris Brezillon oobregion->length = (ecc->bytes * (ecc->steps - 1)) + 2510421e81c4SBoris Brezillon host->bbm_size; 2511421e81c4SBoris Brezillon oobregion->offset = 0; 2512421e81c4SBoris Brezillon } else { 2513421e81c4SBoris Brezillon oobregion->length = host->ecc_bytes_hw + host->spare_bytes; 2514421e81c4SBoris Brezillon oobregion->offset = mtd->oobsize - oobregion->length; 2515c76b78d8SArchit Taneja } 2516c76b78d8SArchit Taneja 2517421e81c4SBoris Brezillon return 0; 2518c76b78d8SArchit Taneja } 2519c76b78d8SArchit Taneja 2520421e81c4SBoris Brezillon static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section, 2521421e81c4SBoris Brezillon struct mtd_oob_region *oobregion) 2522421e81c4SBoris Brezillon { 2523421e81c4SBoris Brezillon struct nand_chip *chip = mtd_to_nand(mtd); 2524421e81c4SBoris Brezillon struct qcom_nand_host *host = to_qcom_nand_host(chip); 2525421e81c4SBoris Brezillon struct nand_ecc_ctrl *ecc = &chip->ecc; 2526421e81c4SBoris Brezillon 2527421e81c4SBoris Brezillon if (section) 2528421e81c4SBoris Brezillon return -ERANGE; 2529421e81c4SBoris Brezillon 2530421e81c4SBoris Brezillon oobregion->length = ecc->steps * 4; 2531421e81c4SBoris Brezillon oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size; 2532421e81c4SBoris Brezillon 2533421e81c4SBoris Brezillon return 0; 2534421e81c4SBoris Brezillon } 2535421e81c4SBoris Brezillon 2536421e81c4SBoris Brezillon static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = { 2537421e81c4SBoris Brezillon .ecc = qcom_nand_ooblayout_ecc, 2538421e81c4SBoris Brezillon .free = qcom_nand_ooblayout_free, 2539421e81c4SBoris Brezillon }; 2540421e81c4SBoris Brezillon 25417ddb937fSAbhishek Sahu static int 25427ddb937fSAbhishek Sahu qcom_nandc_calc_ecc_bytes(int step_size, int strength) 25437ddb937fSAbhishek Sahu { 25447ddb937fSAbhishek Sahu return strength == 4 ? 12 : 16; 25457ddb937fSAbhishek Sahu } 25467ddb937fSAbhishek Sahu NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes, 25477ddb937fSAbhishek Sahu NANDC_STEP_SIZE, 4, 8); 25487ddb937fSAbhishek Sahu 25496a3cec64SMiquel Raynal static int qcom_nand_attach_chip(struct nand_chip *chip) 2550c76b78d8SArchit Taneja { 2551c76b78d8SArchit Taneja struct mtd_info *mtd = nand_to_mtd(chip); 25526a3cec64SMiquel Raynal struct qcom_nand_host *host = to_qcom_nand_host(chip); 2553c76b78d8SArchit Taneja struct nand_ecc_ctrl *ecc = &chip->ecc; 2554c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 25557ddb937fSAbhishek Sahu int cwperpage, bad_block_byte, ret; 2556c76b78d8SArchit Taneja bool wide_bus; 2557c76b78d8SArchit Taneja int ecc_mode = 1; 2558c76b78d8SArchit Taneja 2559320bdb5fSAbhishek Sahu /* controller only supports 512 bytes data steps */ 2560320bdb5fSAbhishek Sahu ecc->size = NANDC_STEP_SIZE; 2561c76b78d8SArchit Taneja wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false; 25627ddb937fSAbhishek Sahu cwperpage = mtd->writesize / NANDC_STEP_SIZE; 25637ddb937fSAbhishek Sahu 25647ddb937fSAbhishek Sahu /* 25657ddb937fSAbhishek Sahu * Each CW has 4 available OOB bytes which will be protected with ECC 25667ddb937fSAbhishek Sahu * so remaining bytes can be used for ECC. 25677ddb937fSAbhishek Sahu */ 25687ddb937fSAbhishek Sahu ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps, 25697ddb937fSAbhishek Sahu mtd->oobsize - (cwperpage * 4)); 25707ddb937fSAbhishek Sahu if (ret) { 25717ddb937fSAbhishek Sahu dev_err(nandc->dev, "No valid ECC settings possible\n"); 25727ddb937fSAbhishek Sahu return ret; 25737ddb937fSAbhishek Sahu } 2574c76b78d8SArchit Taneja 2575c76b78d8SArchit Taneja if (ecc->strength >= 8) { 2576c76b78d8SArchit Taneja /* 8 bit ECC defaults to BCH ECC on all platforms */ 2577c76b78d8SArchit Taneja host->bch_enabled = true; 2578c76b78d8SArchit Taneja ecc_mode = 1; 2579c76b78d8SArchit Taneja 2580c76b78d8SArchit Taneja if (wide_bus) { 2581c76b78d8SArchit Taneja host->ecc_bytes_hw = 14; 2582c76b78d8SArchit Taneja host->spare_bytes = 0; 2583c76b78d8SArchit Taneja host->bbm_size = 2; 2584c76b78d8SArchit Taneja } else { 2585c76b78d8SArchit Taneja host->ecc_bytes_hw = 13; 2586c76b78d8SArchit Taneja host->spare_bytes = 2; 2587c76b78d8SArchit Taneja host->bbm_size = 1; 2588c76b78d8SArchit Taneja } 2589c76b78d8SArchit Taneja } else { 2590c76b78d8SArchit Taneja /* 2591c76b78d8SArchit Taneja * if the controller supports BCH for 4 bit ECC, the controller 2592c76b78d8SArchit Taneja * uses lesser bytes for ECC. If RS is used, the ECC bytes is 2593c76b78d8SArchit Taneja * always 10 bytes 2594c76b78d8SArchit Taneja */ 259558f1f22aSAbhishek Sahu if (nandc->props->ecc_modes & ECC_BCH_4BIT) { 2596c76b78d8SArchit Taneja /* BCH */ 2597c76b78d8SArchit Taneja host->bch_enabled = true; 2598c76b78d8SArchit Taneja ecc_mode = 0; 2599c76b78d8SArchit Taneja 2600c76b78d8SArchit Taneja if (wide_bus) { 2601c76b78d8SArchit Taneja host->ecc_bytes_hw = 8; 2602c76b78d8SArchit Taneja host->spare_bytes = 2; 2603c76b78d8SArchit Taneja host->bbm_size = 2; 2604c76b78d8SArchit Taneja } else { 2605c76b78d8SArchit Taneja host->ecc_bytes_hw = 7; 2606c76b78d8SArchit Taneja host->spare_bytes = 4; 2607c76b78d8SArchit Taneja host->bbm_size = 1; 2608c76b78d8SArchit Taneja } 2609c76b78d8SArchit Taneja } else { 2610c76b78d8SArchit Taneja /* RS */ 2611c76b78d8SArchit Taneja host->ecc_bytes_hw = 10; 2612c76b78d8SArchit Taneja 2613c76b78d8SArchit Taneja if (wide_bus) { 2614c76b78d8SArchit Taneja host->spare_bytes = 0; 2615c76b78d8SArchit Taneja host->bbm_size = 2; 2616c76b78d8SArchit Taneja } else { 2617c76b78d8SArchit Taneja host->spare_bytes = 1; 2618c76b78d8SArchit Taneja host->bbm_size = 1; 2619c76b78d8SArchit Taneja } 2620c76b78d8SArchit Taneja } 2621c76b78d8SArchit Taneja } 2622c76b78d8SArchit Taneja 2623c76b78d8SArchit Taneja /* 2624c76b78d8SArchit Taneja * we consider ecc->bytes as the sum of all the non-data content in a 2625c76b78d8SArchit Taneja * step. It gives us a clean representation of the oob area (even if 2626c76b78d8SArchit Taneja * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit 2627c76b78d8SArchit Taneja * ECC and 12 bytes for 4 bit ECC 2628c76b78d8SArchit Taneja */ 2629c76b78d8SArchit Taneja ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size; 2630c76b78d8SArchit Taneja 2631c76b78d8SArchit Taneja ecc->read_page = qcom_nandc_read_page; 2632c76b78d8SArchit Taneja ecc->read_page_raw = qcom_nandc_read_page_raw; 2633c76b78d8SArchit Taneja ecc->read_oob = qcom_nandc_read_oob; 2634c76b78d8SArchit Taneja ecc->write_page = qcom_nandc_write_page; 2635c76b78d8SArchit Taneja ecc->write_page_raw = qcom_nandc_write_page_raw; 2636c76b78d8SArchit Taneja ecc->write_oob = qcom_nandc_write_oob; 2637c76b78d8SArchit Taneja 2638bace41f8SMiquel Raynal ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 2639c76b78d8SArchit Taneja 2640421e81c4SBoris Brezillon mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops); 2641c76b78d8SArchit Taneja 2642cb80f114SAbhishek Sahu nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage, 2643cb80f114SAbhishek Sahu cwperpage); 2644c76b78d8SArchit Taneja 2645c76b78d8SArchit Taneja /* 2646c76b78d8SArchit Taneja * DATA_UD_BYTES varies based on whether the read/write command protects 2647c76b78d8SArchit Taneja * spare data with ECC too. We protect spare data by default, so we set 2648c76b78d8SArchit Taneja * it to main + spare data, which are 512 and 4 bytes respectively. 2649c76b78d8SArchit Taneja */ 2650c76b78d8SArchit Taneja host->cw_data = 516; 2651c76b78d8SArchit Taneja 2652c76b78d8SArchit Taneja /* 2653c76b78d8SArchit Taneja * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes 2654c76b78d8SArchit Taneja * for 8 bit ECC 2655c76b78d8SArchit Taneja */ 2656c76b78d8SArchit Taneja host->cw_size = host->cw_data + ecc->bytes; 2657c76b78d8SArchit Taneja bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1; 2658c76b78d8SArchit Taneja 2659c76b78d8SArchit Taneja host->cfg0 = (cwperpage - 1) << CW_PER_PAGE 2660c76b78d8SArchit Taneja | host->cw_data << UD_SIZE_BYTES 2661c76b78d8SArchit Taneja | 0 << DISABLE_STATUS_AFTER_WRITE 2662c76b78d8SArchit Taneja | 5 << NUM_ADDR_CYCLES 2663c76b78d8SArchit Taneja | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS 2664c76b78d8SArchit Taneja | 0 << STATUS_BFR_READ 2665c76b78d8SArchit Taneja | 1 << SET_RD_MODE_AFTER_STATUS 2666c76b78d8SArchit Taneja | host->spare_bytes << SPARE_SIZE_BYTES; 2667c76b78d8SArchit Taneja 2668c76b78d8SArchit Taneja host->cfg1 = 7 << NAND_RECOVERY_CYCLES 2669c76b78d8SArchit Taneja | 0 << CS_ACTIVE_BSY 2670c76b78d8SArchit Taneja | bad_block_byte << BAD_BLOCK_BYTE_NUM 2671c76b78d8SArchit Taneja | 0 << BAD_BLOCK_IN_SPARE_AREA 2672c76b78d8SArchit Taneja | 2 << WR_RD_BSY_GAP 2673c76b78d8SArchit Taneja | wide_bus << WIDE_FLASH 2674c76b78d8SArchit Taneja | host->bch_enabled << ENABLE_BCH_ECC; 2675c76b78d8SArchit Taneja 2676c76b78d8SArchit Taneja host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE 2677c76b78d8SArchit Taneja | host->cw_size << UD_SIZE_BYTES 2678c76b78d8SArchit Taneja | 5 << NUM_ADDR_CYCLES 2679c76b78d8SArchit Taneja | 0 << SPARE_SIZE_BYTES; 2680c76b78d8SArchit Taneja 2681c76b78d8SArchit Taneja host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES 2682c76b78d8SArchit Taneja | 0 << CS_ACTIVE_BSY 2683c76b78d8SArchit Taneja | 17 << BAD_BLOCK_BYTE_NUM 2684c76b78d8SArchit Taneja | 1 << BAD_BLOCK_IN_SPARE_AREA 2685c76b78d8SArchit Taneja | 2 << WR_RD_BSY_GAP 2686c76b78d8SArchit Taneja | wide_bus << WIDE_FLASH 2687c76b78d8SArchit Taneja | 1 << DEV0_CFG1_ECC_DISABLE; 2688c76b78d8SArchit Taneja 268910777de5SAbhishek Sahu host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE 2690c76b78d8SArchit Taneja | 0 << ECC_SW_RESET 2691c76b78d8SArchit Taneja | host->cw_data << ECC_NUM_DATA_BYTES 2692c76b78d8SArchit Taneja | 1 << ECC_FORCE_CLK_OPEN 2693c76b78d8SArchit Taneja | ecc_mode << ECC_MODE 2694c76b78d8SArchit Taneja | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH; 2695c76b78d8SArchit Taneja 2696*bfb34eceSMd Sadre Alam if (!nandc->props->qpic_v2) 2697c76b78d8SArchit Taneja host->ecc_buf_cfg = 0x203 << NUM_STEPS; 2698c76b78d8SArchit Taneja 2699c76b78d8SArchit Taneja host->clrflashstatus = FS_READY_BSY_N; 2700c76b78d8SArchit Taneja host->clrreadstatus = 0xc0; 2701a86b9c4fSAbhishek Sahu nandc->regs->erased_cw_detect_cfg_clr = 2702a86b9c4fSAbhishek Sahu cpu_to_le32(CLR_ERASED_PAGE_DET); 2703a86b9c4fSAbhishek Sahu nandc->regs->erased_cw_detect_cfg_set = 2704a86b9c4fSAbhishek Sahu cpu_to_le32(SET_ERASED_PAGE_DET); 2705c76b78d8SArchit Taneja 2706c76b78d8SArchit Taneja dev_dbg(nandc->dev, 2707c76b78d8SArchit Taneja "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n", 2708c76b78d8SArchit Taneja host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg, 2709c76b78d8SArchit Taneja host->cw_size, host->cw_data, ecc->strength, ecc->bytes, 2710c76b78d8SArchit Taneja cwperpage); 2711c76b78d8SArchit Taneja 2712c76b78d8SArchit Taneja return 0; 2713c76b78d8SArchit Taneja } 2714c76b78d8SArchit Taneja 27156a3cec64SMiquel Raynal static const struct nand_controller_ops qcom_nandc_ops = { 27166a3cec64SMiquel Raynal .attach_chip = qcom_nand_attach_chip, 27176a3cec64SMiquel Raynal }; 27186a3cec64SMiquel Raynal 271980c3012eSPeter Ujfalusi static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) 272080c3012eSPeter Ujfalusi { 272180c3012eSPeter Ujfalusi if (nandc->props->is_bam) { 272280c3012eSPeter Ujfalusi if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma)) 272380c3012eSPeter Ujfalusi dma_unmap_single(nandc->dev, nandc->reg_read_dma, 272480c3012eSPeter Ujfalusi MAX_REG_RD * 272580c3012eSPeter Ujfalusi sizeof(*nandc->reg_read_buf), 272680c3012eSPeter Ujfalusi DMA_FROM_DEVICE); 272780c3012eSPeter Ujfalusi 272880c3012eSPeter Ujfalusi if (nandc->tx_chan) 272980c3012eSPeter Ujfalusi dma_release_channel(nandc->tx_chan); 273080c3012eSPeter Ujfalusi 273180c3012eSPeter Ujfalusi if (nandc->rx_chan) 273280c3012eSPeter Ujfalusi dma_release_channel(nandc->rx_chan); 273380c3012eSPeter Ujfalusi 273480c3012eSPeter Ujfalusi if (nandc->cmd_chan) 273580c3012eSPeter Ujfalusi dma_release_channel(nandc->cmd_chan); 273680c3012eSPeter Ujfalusi } else { 273780c3012eSPeter Ujfalusi if (nandc->chan) 273880c3012eSPeter Ujfalusi dma_release_channel(nandc->chan); 273980c3012eSPeter Ujfalusi } 274080c3012eSPeter Ujfalusi } 274180c3012eSPeter Ujfalusi 2742c76b78d8SArchit Taneja static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) 2743c76b78d8SArchit Taneja { 2744c76b78d8SArchit Taneja int ret; 2745c76b78d8SArchit Taneja 2746c76b78d8SArchit Taneja ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32)); 2747c76b78d8SArchit Taneja if (ret) { 2748c76b78d8SArchit Taneja dev_err(nandc->dev, "failed to set DMA mask\n"); 2749c76b78d8SArchit Taneja return ret; 2750c76b78d8SArchit Taneja } 2751c76b78d8SArchit Taneja 2752c76b78d8SArchit Taneja /* 2753c76b78d8SArchit Taneja * we use the internal buffer for reading ONFI params, reading small 2754c76b78d8SArchit Taneja * data like ID and status, and preforming read-copy-write operations 2755c76b78d8SArchit Taneja * when writing to a codeword partially. 532 is the maximum possible 2756c76b78d8SArchit Taneja * size of a codeword for our nand controller 2757c76b78d8SArchit Taneja */ 2758c76b78d8SArchit Taneja nandc->buf_size = 532; 2759c76b78d8SArchit Taneja 2760c76b78d8SArchit Taneja nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, 2761c76b78d8SArchit Taneja GFP_KERNEL); 2762c76b78d8SArchit Taneja if (!nandc->data_buffer) 2763c76b78d8SArchit Taneja return -ENOMEM; 2764c76b78d8SArchit Taneja 2765c76b78d8SArchit Taneja nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), 2766c76b78d8SArchit Taneja GFP_KERNEL); 2767c76b78d8SArchit Taneja if (!nandc->regs) 2768c76b78d8SArchit Taneja return -ENOMEM; 2769c76b78d8SArchit Taneja 2770a86854d0SKees Cook nandc->reg_read_buf = devm_kcalloc(nandc->dev, 2771a86854d0SKees Cook MAX_REG_RD, sizeof(*nandc->reg_read_buf), 2772c76b78d8SArchit Taneja GFP_KERNEL); 2773c76b78d8SArchit Taneja if (!nandc->reg_read_buf) 2774c76b78d8SArchit Taneja return -ENOMEM; 2775c76b78d8SArchit Taneja 2776497d7d85SAbhishek Sahu if (nandc->props->is_bam) { 27776192ff7aSAbhishek Sahu nandc->reg_read_dma = 27786192ff7aSAbhishek Sahu dma_map_single(nandc->dev, nandc->reg_read_buf, 27796192ff7aSAbhishek Sahu MAX_REG_RD * 27806192ff7aSAbhishek Sahu sizeof(*nandc->reg_read_buf), 27816192ff7aSAbhishek Sahu DMA_FROM_DEVICE); 27826192ff7aSAbhishek Sahu if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) { 27836192ff7aSAbhishek Sahu dev_err(nandc->dev, "failed to DMA MAP reg buffer\n"); 27846192ff7aSAbhishek Sahu return -EIO; 27856192ff7aSAbhishek Sahu } 27866192ff7aSAbhishek Sahu 278792f0f8efSPeter Ujfalusi nandc->tx_chan = dma_request_chan(nandc->dev, "tx"); 278892f0f8efSPeter Ujfalusi if (IS_ERR(nandc->tx_chan)) { 278992f0f8efSPeter Ujfalusi ret = PTR_ERR(nandc->tx_chan); 279092f0f8efSPeter Ujfalusi nandc->tx_chan = NULL; 279107eb014fSKrzysztof Kozlowski dev_err_probe(nandc->dev, ret, 279207eb014fSKrzysztof Kozlowski "tx DMA channel request failed\n"); 279380c3012eSPeter Ujfalusi goto unalloc; 2794497d7d85SAbhishek Sahu } 2795497d7d85SAbhishek Sahu 279692f0f8efSPeter Ujfalusi nandc->rx_chan = dma_request_chan(nandc->dev, "rx"); 279792f0f8efSPeter Ujfalusi if (IS_ERR(nandc->rx_chan)) { 279892f0f8efSPeter Ujfalusi ret = PTR_ERR(nandc->rx_chan); 279992f0f8efSPeter Ujfalusi nandc->rx_chan = NULL; 280007eb014fSKrzysztof Kozlowski dev_err_probe(nandc->dev, ret, 280107eb014fSKrzysztof Kozlowski "rx DMA channel request failed\n"); 280280c3012eSPeter Ujfalusi goto unalloc; 2803497d7d85SAbhishek Sahu } 2804497d7d85SAbhishek Sahu 280592f0f8efSPeter Ujfalusi nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd"); 280692f0f8efSPeter Ujfalusi if (IS_ERR(nandc->cmd_chan)) { 280792f0f8efSPeter Ujfalusi ret = PTR_ERR(nandc->cmd_chan); 280892f0f8efSPeter Ujfalusi nandc->cmd_chan = NULL; 280907eb014fSKrzysztof Kozlowski dev_err_probe(nandc->dev, ret, 281007eb014fSKrzysztof Kozlowski "cmd DMA channel request failed\n"); 281180c3012eSPeter Ujfalusi goto unalloc; 2812497d7d85SAbhishek Sahu } 2813cb80f114SAbhishek Sahu 2814cb80f114SAbhishek Sahu /* 2815cb80f114SAbhishek Sahu * Initially allocate BAM transaction to read ONFI param page. 2816cb80f114SAbhishek Sahu * After detecting all the devices, this BAM transaction will 2817cb80f114SAbhishek Sahu * be freed and the next BAM tranasction will be allocated with 2818cb80f114SAbhishek Sahu * maximum codeword size 2819cb80f114SAbhishek Sahu */ 2820cb80f114SAbhishek Sahu nandc->max_cwperpage = 1; 2821cb80f114SAbhishek Sahu nandc->bam_txn = alloc_bam_transaction(nandc); 2822cb80f114SAbhishek Sahu if (!nandc->bam_txn) { 2823cb80f114SAbhishek Sahu dev_err(nandc->dev, 2824cb80f114SAbhishek Sahu "failed to allocate bam transaction\n"); 282580c3012eSPeter Ujfalusi ret = -ENOMEM; 282680c3012eSPeter Ujfalusi goto unalloc; 2827cb80f114SAbhishek Sahu } 2828497d7d85SAbhishek Sahu } else { 282992f0f8efSPeter Ujfalusi nandc->chan = dma_request_chan(nandc->dev, "rxtx"); 283092f0f8efSPeter Ujfalusi if (IS_ERR(nandc->chan)) { 283192f0f8efSPeter Ujfalusi ret = PTR_ERR(nandc->chan); 283292f0f8efSPeter Ujfalusi nandc->chan = NULL; 283307eb014fSKrzysztof Kozlowski dev_err_probe(nandc->dev, ret, 283407eb014fSKrzysztof Kozlowski "rxtx DMA channel request failed\n"); 283592f0f8efSPeter Ujfalusi return ret; 2836c76b78d8SArchit Taneja } 2837497d7d85SAbhishek Sahu } 2838c76b78d8SArchit Taneja 2839c76b78d8SArchit Taneja INIT_LIST_HEAD(&nandc->desc_list); 2840c76b78d8SArchit Taneja INIT_LIST_HEAD(&nandc->host_list); 2841c76b78d8SArchit Taneja 28427da45139SMiquel Raynal nand_controller_init(&nandc->controller); 28436a3cec64SMiquel Raynal nandc->controller.ops = &qcom_nandc_ops; 2844c76b78d8SArchit Taneja 2845c76b78d8SArchit Taneja return 0; 284680c3012eSPeter Ujfalusi unalloc: 284780c3012eSPeter Ujfalusi qcom_nandc_unalloc(nandc); 284880c3012eSPeter Ujfalusi return ret; 2849497d7d85SAbhishek Sahu } 2850c76b78d8SArchit Taneja 2851c76b78d8SArchit Taneja /* one time setup of a few nand controller registers */ 2852c76b78d8SArchit Taneja static int qcom_nandc_setup(struct qcom_nand_controller *nandc) 2853c76b78d8SArchit Taneja { 28549d43f915SAbhishek Sahu u32 nand_ctrl; 28559d43f915SAbhishek Sahu 2856c76b78d8SArchit Taneja /* kill onenand */ 2857443440ccSSivaprakash Murugesan if (!nandc->props->is_qpic) 2858c76b78d8SArchit Taneja nandc_write(nandc, SFLASHC_BURST_CFG, 0); 2859b1209582SManivannan Sadhasivam 2860b1209582SManivannan Sadhasivam if (!nandc->props->qpic_v2) 2861cc409b9aSAbhishek Sahu nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), 2862cc409b9aSAbhishek Sahu NAND_DEV_CMD_VLD_VAL); 2863c76b78d8SArchit Taneja 28649d43f915SAbhishek Sahu /* enable ADM or BAM DMA */ 28659d43f915SAbhishek Sahu if (nandc->props->is_bam) { 28669d43f915SAbhishek Sahu nand_ctrl = nandc_read(nandc, NAND_CTRL); 2867cb272395SSivaprakash Murugesan 2868cb272395SSivaprakash Murugesan /* 2869cb272395SSivaprakash Murugesan *NAND_CTRL is an operational registers, and CPU 2870cb272395SSivaprakash Murugesan * access to operational registers are read only 2871cb272395SSivaprakash Murugesan * in BAM mode. So update the NAND_CTRL register 2872cb272395SSivaprakash Murugesan * only if it is not in BAM mode. In most cases BAM 2873cb272395SSivaprakash Murugesan * mode will be enabled in bootloader 2874cb272395SSivaprakash Murugesan */ 2875cb272395SSivaprakash Murugesan if (!(nand_ctrl & BAM_MODE_EN)) 28769d43f915SAbhishek Sahu nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN); 28779d43f915SAbhishek Sahu } else { 2878c76b78d8SArchit Taneja nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); 28799d43f915SAbhishek Sahu } 2880c76b78d8SArchit Taneja 2881c76b78d8SArchit Taneja /* save the original values of these registers */ 2882b1209582SManivannan Sadhasivam if (!nandc->props->qpic_v2) { 2883cc409b9aSAbhishek Sahu nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); 2884d8a9b320SAbhishek Sahu nandc->vld = NAND_DEV_CMD_VLD_VAL; 2885b1209582SManivannan Sadhasivam } 2886c76b78d8SArchit Taneja 2887c76b78d8SArchit Taneja return 0; 2888c76b78d8SArchit Taneja } 2889c76b78d8SArchit Taneja 289021020becSBaruch Siach static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL }; 289182bfd11fSManivannan Sadhasivam 28926a3cec64SMiquel Raynal static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, 2893c76b78d8SArchit Taneja struct qcom_nand_host *host, 2894c76b78d8SArchit Taneja struct device_node *dn) 2895c76b78d8SArchit Taneja { 2896c76b78d8SArchit Taneja struct nand_chip *chip = &host->chip; 2897c76b78d8SArchit Taneja struct mtd_info *mtd = nand_to_mtd(chip); 2898c76b78d8SArchit Taneja struct device *dev = nandc->dev; 2899c76b78d8SArchit Taneja int ret; 2900c76b78d8SArchit Taneja 2901c76b78d8SArchit Taneja ret = of_property_read_u32(dn, "reg", &host->cs); 2902c76b78d8SArchit Taneja if (ret) { 2903c76b78d8SArchit Taneja dev_err(dev, "can't get chip-select\n"); 2904c76b78d8SArchit Taneja return -ENXIO; 2905c76b78d8SArchit Taneja } 2906c76b78d8SArchit Taneja 2907c76b78d8SArchit Taneja nand_set_flash_node(chip, dn); 2908c76b78d8SArchit Taneja mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); 2909069f0534SFabio Estevam if (!mtd->name) 2910069f0534SFabio Estevam return -ENOMEM; 2911069f0534SFabio Estevam 2912c76b78d8SArchit Taneja mtd->owner = THIS_MODULE; 2913c76b78d8SArchit Taneja mtd->dev.parent = dev; 2914c76b78d8SArchit Taneja 2915bf6065c6SBoris Brezillon chip->legacy.cmdfunc = qcom_nandc_command; 29167d6c37e9SBoris Brezillon chip->legacy.select_chip = qcom_nandc_select_chip; 2917716bbbabSBoris Brezillon chip->legacy.read_byte = qcom_nandc_read_byte; 2918716bbbabSBoris Brezillon chip->legacy.read_buf = qcom_nandc_read_buf; 2919716bbbabSBoris Brezillon chip->legacy.write_buf = qcom_nandc_write_buf; 292045240367SBoris Brezillon chip->legacy.set_features = nand_get_set_features_notsupp; 292145240367SBoris Brezillon chip->legacy.get_features = nand_get_set_features_notsupp; 2922c76b78d8SArchit Taneja 2923c76b78d8SArchit Taneja /* 2924c76b78d8SArchit Taneja * the bad block marker is readable only when we read the last codeword 2925c76b78d8SArchit Taneja * of a page with ECC disabled. currently, the nand_base and nand_bbt 2926c76b78d8SArchit Taneja * helpers don't allow us to read BB from a nand chip with ECC 2927c76b78d8SArchit Taneja * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad 2928c76b78d8SArchit Taneja * and block_markbad helpers until we permanently switch to using 2929c76b78d8SArchit Taneja * MTD_OPS_RAW for all drivers (with the help of badblockbits) 2930c76b78d8SArchit Taneja */ 2931cdc784c7SBoris Brezillon chip->legacy.block_bad = qcom_nandc_block_bad; 2932cdc784c7SBoris Brezillon chip->legacy.block_markbad = qcom_nandc_block_markbad; 2933c76b78d8SArchit Taneja 2934c76b78d8SArchit Taneja chip->controller = &nandc->controller; 2935ce8148d7SMiquel Raynal chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | 2936c76b78d8SArchit Taneja NAND_SKIP_BBTSCAN; 2937c76b78d8SArchit Taneja 2938c76b78d8SArchit Taneja /* set up initial status value */ 2939c76b78d8SArchit Taneja host->status = NAND_STATUS_READY | NAND_STATUS_WP; 2940c76b78d8SArchit Taneja 294100ad378fSBoris Brezillon ret = nand_scan(chip, 1); 2942c76b78d8SArchit Taneja if (ret) 2943c76b78d8SArchit Taneja return ret; 2944c76b78d8SArchit Taneja 294581d9bdf5SChristian Lamparter if (nandc->props->is_bam) { 294681d9bdf5SChristian Lamparter free_bam_transaction(nandc); 294781d9bdf5SChristian Lamparter nandc->bam_txn = alloc_bam_transaction(nandc); 294881d9bdf5SChristian Lamparter if (!nandc->bam_txn) { 294981d9bdf5SChristian Lamparter dev_err(nandc->dev, 295081d9bdf5SChristian Lamparter "failed to allocate bam transaction\n"); 2951ab2c8d3eSManivannan Sadhasivam nand_cleanup(chip); 295281d9bdf5SChristian Lamparter return -ENOMEM; 295381d9bdf5SChristian Lamparter } 295481d9bdf5SChristian Lamparter } 295581d9bdf5SChristian Lamparter 295682bfd11fSManivannan Sadhasivam ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0); 295789f5127cSAbhishek Sahu if (ret) 29586a3cec64SMiquel Raynal nand_cleanup(chip); 295989f5127cSAbhishek Sahu 296089f5127cSAbhishek Sahu return ret; 296189f5127cSAbhishek Sahu } 296289f5127cSAbhishek Sahu 296389f5127cSAbhishek Sahu static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) 296489f5127cSAbhishek Sahu { 296589f5127cSAbhishek Sahu struct device *dev = nandc->dev; 296689f5127cSAbhishek Sahu struct device_node *dn = dev->of_node, *child; 29676a3cec64SMiquel Raynal struct qcom_nand_host *host; 296855fbb9baSManivannan Sadhasivam int ret = -ENODEV; 296989f5127cSAbhishek Sahu 29706a3cec64SMiquel Raynal for_each_available_child_of_node(dn, child) { 29716a3cec64SMiquel Raynal host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 29726a3cec64SMiquel Raynal if (!host) { 29736a3cec64SMiquel Raynal of_node_put(child); 29746a3cec64SMiquel Raynal return -ENOMEM; 297589f5127cSAbhishek Sahu } 29766a3cec64SMiquel Raynal 29776a3cec64SMiquel Raynal ret = qcom_nand_host_init_and_register(nandc, host, child); 29786a3cec64SMiquel Raynal if (ret) { 29796a3cec64SMiquel Raynal devm_kfree(dev, host); 29806a3cec64SMiquel Raynal continue; 29816a3cec64SMiquel Raynal } 29826a3cec64SMiquel Raynal 29836a3cec64SMiquel Raynal list_add_tail(&host->node, &nandc->host_list); 298489f5127cSAbhishek Sahu } 298589f5127cSAbhishek Sahu 298655fbb9baSManivannan Sadhasivam return ret; 2987c76b78d8SArchit Taneja } 2988c76b78d8SArchit Taneja 2989c76b78d8SArchit Taneja /* parse custom DT properties here */ 2990c76b78d8SArchit Taneja static int qcom_nandc_parse_dt(struct platform_device *pdev) 2991c76b78d8SArchit Taneja { 2992c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = platform_get_drvdata(pdev); 2993c76b78d8SArchit Taneja struct device_node *np = nandc->dev->of_node; 2994c76b78d8SArchit Taneja int ret; 2995c76b78d8SArchit Taneja 2996497d7d85SAbhishek Sahu if (!nandc->props->is_bam) { 2997497d7d85SAbhishek Sahu ret = of_property_read_u32(np, "qcom,cmd-crci", 2998497d7d85SAbhishek Sahu &nandc->cmd_crci); 2999c76b78d8SArchit Taneja if (ret) { 3000c76b78d8SArchit Taneja dev_err(nandc->dev, "command CRCI unspecified\n"); 3001c76b78d8SArchit Taneja return ret; 3002c76b78d8SArchit Taneja } 3003c76b78d8SArchit Taneja 3004497d7d85SAbhishek Sahu ret = of_property_read_u32(np, "qcom,data-crci", 3005497d7d85SAbhishek Sahu &nandc->data_crci); 3006c76b78d8SArchit Taneja if (ret) { 3007c76b78d8SArchit Taneja dev_err(nandc->dev, "data CRCI unspecified\n"); 3008c76b78d8SArchit Taneja return ret; 3009c76b78d8SArchit Taneja } 3010497d7d85SAbhishek Sahu } 3011c76b78d8SArchit Taneja 3012c76b78d8SArchit Taneja return 0; 3013c76b78d8SArchit Taneja } 3014c76b78d8SArchit Taneja 3015c76b78d8SArchit Taneja static int qcom_nandc_probe(struct platform_device *pdev) 3016c76b78d8SArchit Taneja { 3017c76b78d8SArchit Taneja struct qcom_nand_controller *nandc; 3018c76b78d8SArchit Taneja const void *dev_data; 3019c76b78d8SArchit Taneja struct device *dev = &pdev->dev; 3020c76b78d8SArchit Taneja struct resource *res; 3021c76b78d8SArchit Taneja int ret; 3022c76b78d8SArchit Taneja 3023c76b78d8SArchit Taneja nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL); 3024c76b78d8SArchit Taneja if (!nandc) 3025c76b78d8SArchit Taneja return -ENOMEM; 3026c76b78d8SArchit Taneja 3027c76b78d8SArchit Taneja platform_set_drvdata(pdev, nandc); 3028c76b78d8SArchit Taneja nandc->dev = dev; 3029c76b78d8SArchit Taneja 3030c76b78d8SArchit Taneja dev_data = of_device_get_match_data(dev); 3031c76b78d8SArchit Taneja if (!dev_data) { 3032c76b78d8SArchit Taneja dev_err(&pdev->dev, "failed to get device data\n"); 3033c76b78d8SArchit Taneja return -ENODEV; 3034c76b78d8SArchit Taneja } 3035c76b78d8SArchit Taneja 303658f1f22aSAbhishek Sahu nandc->props = dev_data; 3037c76b78d8SArchit Taneja 3038c76b78d8SArchit Taneja nandc->core_clk = devm_clk_get(dev, "core"); 3039c76b78d8SArchit Taneja if (IS_ERR(nandc->core_clk)) 3040c76b78d8SArchit Taneja return PTR_ERR(nandc->core_clk); 3041c76b78d8SArchit Taneja 3042c76b78d8SArchit Taneja nandc->aon_clk = devm_clk_get(dev, "aon"); 3043c76b78d8SArchit Taneja if (IS_ERR(nandc->aon_clk)) 3044c76b78d8SArchit Taneja return PTR_ERR(nandc->aon_clk); 3045c76b78d8SArchit Taneja 3046c76b78d8SArchit Taneja ret = qcom_nandc_parse_dt(pdev); 3047c76b78d8SArchit Taneja if (ret) 3048c76b78d8SArchit Taneja return ret; 3049c76b78d8SArchit Taneja 30507330fc50SArnd Bergmann res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 30517330fc50SArnd Bergmann nandc->base = devm_ioremap_resource(dev, res); 30527330fc50SArnd Bergmann if (IS_ERR(nandc->base)) 30537330fc50SArnd Bergmann return PTR_ERR(nandc->base); 30547330fc50SArnd Bergmann 30557330fc50SArnd Bergmann nandc->base_phys = res->start; 30567330fc50SArnd Bergmann nandc->base_dma = dma_map_resource(dev, res->start, 30577330fc50SArnd Bergmann resource_size(res), 30587330fc50SArnd Bergmann DMA_BIDIRECTIONAL, 0); 305932cbc7cbSManivannan Sadhasivam if (dma_mapping_error(dev, nandc->base_dma)) 30607330fc50SArnd Bergmann return -ENXIO; 30617330fc50SArnd Bergmann 3062c76b78d8SArchit Taneja ret = qcom_nandc_alloc(nandc); 3063c76b78d8SArchit Taneja if (ret) 30647330fc50SArnd Bergmann goto err_nandc_alloc; 3065c76b78d8SArchit Taneja 3066c76b78d8SArchit Taneja ret = clk_prepare_enable(nandc->core_clk); 3067c76b78d8SArchit Taneja if (ret) 3068c76b78d8SArchit Taneja goto err_core_clk; 3069c76b78d8SArchit Taneja 3070c76b78d8SArchit Taneja ret = clk_prepare_enable(nandc->aon_clk); 3071c76b78d8SArchit Taneja if (ret) 3072c76b78d8SArchit Taneja goto err_aon_clk; 3073c76b78d8SArchit Taneja 3074c76b78d8SArchit Taneja ret = qcom_nandc_setup(nandc); 3075c76b78d8SArchit Taneja if (ret) 3076c76b78d8SArchit Taneja goto err_setup; 3077c76b78d8SArchit Taneja 307889f5127cSAbhishek Sahu ret = qcom_probe_nand_devices(nandc); 307989f5127cSAbhishek Sahu if (ret) 308089f5127cSAbhishek Sahu goto err_setup; 3081c76b78d8SArchit Taneja 3082c76b78d8SArchit Taneja return 0; 3083c76b78d8SArchit Taneja 3084c76b78d8SArchit Taneja err_setup: 3085c76b78d8SArchit Taneja clk_disable_unprepare(nandc->aon_clk); 3086c76b78d8SArchit Taneja err_aon_clk: 3087c76b78d8SArchit Taneja clk_disable_unprepare(nandc->core_clk); 3088c76b78d8SArchit Taneja err_core_clk: 3089c76b78d8SArchit Taneja qcom_nandc_unalloc(nandc); 30907330fc50SArnd Bergmann err_nandc_alloc: 30917330fc50SArnd Bergmann dma_unmap_resource(dev, res->start, resource_size(res), 30927330fc50SArnd Bergmann DMA_BIDIRECTIONAL, 0); 3093c76b78d8SArchit Taneja 3094c76b78d8SArchit Taneja return ret; 3095c76b78d8SArchit Taneja } 3096c76b78d8SArchit Taneja 3097c76b78d8SArchit Taneja static int qcom_nandc_remove(struct platform_device *pdev) 3098c76b78d8SArchit Taneja { 3099c76b78d8SArchit Taneja struct qcom_nand_controller *nandc = platform_get_drvdata(pdev); 31007330fc50SArnd Bergmann struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3101c76b78d8SArchit Taneja struct qcom_nand_host *host; 31020a2bc991SMiquel Raynal struct nand_chip *chip; 31030a2bc991SMiquel Raynal int ret; 3104c76b78d8SArchit Taneja 31050a2bc991SMiquel Raynal list_for_each_entry(host, &nandc->host_list, node) { 31060a2bc991SMiquel Raynal chip = &host->chip; 31070a2bc991SMiquel Raynal ret = mtd_device_unregister(nand_to_mtd(chip)); 31080a2bc991SMiquel Raynal WARN_ON(ret); 31090a2bc991SMiquel Raynal nand_cleanup(chip); 31100a2bc991SMiquel Raynal } 31117330fc50SArnd Bergmann 3112c76b78d8SArchit Taneja qcom_nandc_unalloc(nandc); 3113c76b78d8SArchit Taneja 3114c76b78d8SArchit Taneja clk_disable_unprepare(nandc->aon_clk); 3115c76b78d8SArchit Taneja clk_disable_unprepare(nandc->core_clk); 3116c76b78d8SArchit Taneja 31177330fc50SArnd Bergmann dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res), 31187330fc50SArnd Bergmann DMA_BIDIRECTIONAL, 0); 31197330fc50SArnd Bergmann 3120c76b78d8SArchit Taneja return 0; 3121c76b78d8SArchit Taneja } 3122c76b78d8SArchit Taneja 312358f1f22aSAbhishek Sahu static const struct qcom_nandc_props ipq806x_nandc_props = { 312458f1f22aSAbhishek Sahu .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT), 31258c5d5d6aSAbhishek Sahu .is_bam = false, 3126cc409b9aSAbhishek Sahu .dev_cmd_reg_start = 0x0, 312758f1f22aSAbhishek Sahu }; 3128c76b78d8SArchit Taneja 3129a0637834SAbhishek Sahu static const struct qcom_nandc_props ipq4019_nandc_props = { 3130a0637834SAbhishek Sahu .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), 3131a0637834SAbhishek Sahu .is_bam = true, 3132443440ccSSivaprakash Murugesan .is_qpic = true, 3133a0637834SAbhishek Sahu .dev_cmd_reg_start = 0x0, 3134a0637834SAbhishek Sahu }; 3135a0637834SAbhishek Sahu 3136dce84760SAbhishek Sahu static const struct qcom_nandc_props ipq8074_nandc_props = { 3137dce84760SAbhishek Sahu .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), 3138dce84760SAbhishek Sahu .is_bam = true, 3139443440ccSSivaprakash Murugesan .is_qpic = true, 3140dce84760SAbhishek Sahu .dev_cmd_reg_start = 0x7000, 3141dce84760SAbhishek Sahu }; 3142dce84760SAbhishek Sahu 3143b1209582SManivannan Sadhasivam static const struct qcom_nandc_props sdx55_nandc_props = { 3144b1209582SManivannan Sadhasivam .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), 3145b1209582SManivannan Sadhasivam .is_bam = true, 3146b1209582SManivannan Sadhasivam .is_qpic = true, 3147b1209582SManivannan Sadhasivam .qpic_v2 = true, 3148b1209582SManivannan Sadhasivam .dev_cmd_reg_start = 0x7000, 3149b1209582SManivannan Sadhasivam }; 3150b1209582SManivannan Sadhasivam 3151c76b78d8SArchit Taneja /* 3152c76b78d8SArchit Taneja * data will hold a struct pointer containing more differences once we support 3153c76b78d8SArchit Taneja * more controller variants 3154c76b78d8SArchit Taneja */ 3155c76b78d8SArchit Taneja static const struct of_device_id qcom_nandc_of_match[] = { 315658f1f22aSAbhishek Sahu { 315758f1f22aSAbhishek Sahu .compatible = "qcom,ipq806x-nand", 315858f1f22aSAbhishek Sahu .data = &ipq806x_nandc_props, 3159c76b78d8SArchit Taneja }, 3160a0637834SAbhishek Sahu { 3161a0637834SAbhishek Sahu .compatible = "qcom,ipq4019-nand", 3162a0637834SAbhishek Sahu .data = &ipq4019_nandc_props, 3163a0637834SAbhishek Sahu }, 3164dce84760SAbhishek Sahu { 316562858625SKathiravan T .compatible = "qcom,ipq6018-nand", 316662858625SKathiravan T .data = &ipq8074_nandc_props, 316762858625SKathiravan T }, 316862858625SKathiravan T { 3169dce84760SAbhishek Sahu .compatible = "qcom,ipq8074-nand", 3170dce84760SAbhishek Sahu .data = &ipq8074_nandc_props, 3171dce84760SAbhishek Sahu }, 3172b1209582SManivannan Sadhasivam { 3173b1209582SManivannan Sadhasivam .compatible = "qcom,sdx55-nand", 3174b1209582SManivannan Sadhasivam .data = &sdx55_nandc_props, 3175b1209582SManivannan Sadhasivam }, 3176c76b78d8SArchit Taneja {} 3177c76b78d8SArchit Taneja }; 3178c76b78d8SArchit Taneja MODULE_DEVICE_TABLE(of, qcom_nandc_of_match); 3179c76b78d8SArchit Taneja 3180c76b78d8SArchit Taneja static struct platform_driver qcom_nandc_driver = { 3181c76b78d8SArchit Taneja .driver = { 3182c76b78d8SArchit Taneja .name = "qcom-nandc", 3183c76b78d8SArchit Taneja .of_match_table = qcom_nandc_of_match, 3184c76b78d8SArchit Taneja }, 3185c76b78d8SArchit Taneja .probe = qcom_nandc_probe, 3186c76b78d8SArchit Taneja .remove = qcom_nandc_remove, 3187c76b78d8SArchit Taneja }; 3188c76b78d8SArchit Taneja module_platform_driver(qcom_nandc_driver); 3189c76b78d8SArchit Taneja 3190c76b78d8SArchit Taneja MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>"); 3191c76b78d8SArchit Taneja MODULE_DESCRIPTION("Qualcomm NAND Controller driver"); 3192c76b78d8SArchit Taneja MODULE_LICENSE("GPL v2"); 3193