xref: /linux/drivers/mtd/nand/raw/qcom_nandc.c (revision 2f610386736d9e5dc69fa06374138f9712690921)
1c76b78d8SArchit Taneja /*
2c76b78d8SArchit Taneja  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3c76b78d8SArchit Taneja  *
4c76b78d8SArchit Taneja  * This software is licensed under the terms of the GNU General Public
5c76b78d8SArchit Taneja  * License version 2, as published by the Free Software Foundation, and
6c76b78d8SArchit Taneja  * may be copied, distributed, and modified under those terms.
7c76b78d8SArchit Taneja  *
8c76b78d8SArchit Taneja  * This program is distributed in the hope that it will be useful,
9c76b78d8SArchit Taneja  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10c76b78d8SArchit Taneja  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11c76b78d8SArchit Taneja  * GNU General Public License for more details.
12c76b78d8SArchit Taneja  */
13c76b78d8SArchit Taneja 
14c76b78d8SArchit Taneja #include <linux/clk.h>
15c76b78d8SArchit Taneja #include <linux/slab.h>
16c76b78d8SArchit Taneja #include <linux/bitops.h>
17c76b78d8SArchit Taneja #include <linux/dma-mapping.h>
18c76b78d8SArchit Taneja #include <linux/dmaengine.h>
19c76b78d8SArchit Taneja #include <linux/module.h>
20d4092d76SBoris Brezillon #include <linux/mtd/rawnand.h>
21c76b78d8SArchit Taneja #include <linux/mtd/partitions.h>
22c76b78d8SArchit Taneja #include <linux/of.h>
23c76b78d8SArchit Taneja #include <linux/of_device.h>
24c76b78d8SArchit Taneja #include <linux/delay.h>
258c4cdce8SAbhishek Sahu #include <linux/dma/qcom_bam_dma.h>
26ea8c64acSChristoph Hellwig #include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
27c76b78d8SArchit Taneja 
28c76b78d8SArchit Taneja /* NANDc reg offsets */
29c76b78d8SArchit Taneja #define	NAND_FLASH_CMD			0x00
30c76b78d8SArchit Taneja #define	NAND_ADDR0			0x04
31c76b78d8SArchit Taneja #define	NAND_ADDR1			0x08
32c76b78d8SArchit Taneja #define	NAND_FLASH_CHIP_SELECT		0x0c
33c76b78d8SArchit Taneja #define	NAND_EXEC_CMD			0x10
34c76b78d8SArchit Taneja #define	NAND_FLASH_STATUS		0x14
35c76b78d8SArchit Taneja #define	NAND_BUFFER_STATUS		0x18
36c76b78d8SArchit Taneja #define	NAND_DEV0_CFG0			0x20
37c76b78d8SArchit Taneja #define	NAND_DEV0_CFG1			0x24
38c76b78d8SArchit Taneja #define	NAND_DEV0_ECC_CFG		0x28
39c76b78d8SArchit Taneja #define	NAND_DEV1_ECC_CFG		0x2c
40c76b78d8SArchit Taneja #define	NAND_DEV1_CFG0			0x30
41c76b78d8SArchit Taneja #define	NAND_DEV1_CFG1			0x34
42c76b78d8SArchit Taneja #define	NAND_READ_ID			0x40
43c76b78d8SArchit Taneja #define	NAND_READ_STATUS		0x44
44c76b78d8SArchit Taneja #define	NAND_DEV_CMD0			0xa0
45c76b78d8SArchit Taneja #define	NAND_DEV_CMD1			0xa4
46c76b78d8SArchit Taneja #define	NAND_DEV_CMD2			0xa8
47c76b78d8SArchit Taneja #define	NAND_DEV_CMD_VLD		0xac
48c76b78d8SArchit Taneja #define	SFLASHC_BURST_CFG		0xe0
49c76b78d8SArchit Taneja #define	NAND_ERASED_CW_DETECT_CFG	0xe8
50c76b78d8SArchit Taneja #define	NAND_ERASED_CW_DETECT_STATUS	0xec
51c76b78d8SArchit Taneja #define	NAND_EBI2_ECC_BUF_CFG		0xf0
52c76b78d8SArchit Taneja #define	FLASH_BUF_ACC			0x100
53c76b78d8SArchit Taneja 
54c76b78d8SArchit Taneja #define	NAND_CTRL			0xf00
55c76b78d8SArchit Taneja #define	NAND_VERSION			0xf08
56c76b78d8SArchit Taneja #define	NAND_READ_LOCATION_0		0xf20
57c76b78d8SArchit Taneja #define	NAND_READ_LOCATION_1		0xf24
5891af95c1SAbhishek Sahu #define	NAND_READ_LOCATION_2		0xf28
5991af95c1SAbhishek Sahu #define	NAND_READ_LOCATION_3		0xf2c
60c76b78d8SArchit Taneja 
61c76b78d8SArchit Taneja /* dummy register offsets, used by write_reg_dma */
62c76b78d8SArchit Taneja #define	NAND_DEV_CMD1_RESTORE		0xdead
63c76b78d8SArchit Taneja #define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
64c76b78d8SArchit Taneja 
65c76b78d8SArchit Taneja /* NAND_FLASH_CMD bits */
66c76b78d8SArchit Taneja #define	PAGE_ACC			BIT(4)
67c76b78d8SArchit Taneja #define	LAST_PAGE			BIT(5)
68c76b78d8SArchit Taneja 
69c76b78d8SArchit Taneja /* NAND_FLASH_CHIP_SELECT bits */
70c76b78d8SArchit Taneja #define	NAND_DEV_SEL			0
71c76b78d8SArchit Taneja #define	DM_EN				BIT(2)
72c76b78d8SArchit Taneja 
73c76b78d8SArchit Taneja /* NAND_FLASH_STATUS bits */
74c76b78d8SArchit Taneja #define	FS_OP_ERR			BIT(4)
75c76b78d8SArchit Taneja #define	FS_READY_BSY_N			BIT(5)
76c76b78d8SArchit Taneja #define	FS_MPU_ERR			BIT(8)
77c76b78d8SArchit Taneja #define	FS_DEVICE_STS_ERR		BIT(16)
78c76b78d8SArchit Taneja #define	FS_DEVICE_WP			BIT(23)
79c76b78d8SArchit Taneja 
80c76b78d8SArchit Taneja /* NAND_BUFFER_STATUS bits */
81c76b78d8SArchit Taneja #define	BS_UNCORRECTABLE_BIT		BIT(8)
82c76b78d8SArchit Taneja #define	BS_CORRECTABLE_ERR_MSK		0x1f
83c76b78d8SArchit Taneja 
84c76b78d8SArchit Taneja /* NAND_DEVn_CFG0 bits */
85c76b78d8SArchit Taneja #define	DISABLE_STATUS_AFTER_WRITE	4
86c76b78d8SArchit Taneja #define	CW_PER_PAGE			6
87c76b78d8SArchit Taneja #define	UD_SIZE_BYTES			9
88c76b78d8SArchit Taneja #define	ECC_PARITY_SIZE_BYTES_RS	19
89c76b78d8SArchit Taneja #define	SPARE_SIZE_BYTES		23
90c76b78d8SArchit Taneja #define	NUM_ADDR_CYCLES			27
91c76b78d8SArchit Taneja #define	STATUS_BFR_READ			30
92c76b78d8SArchit Taneja #define	SET_RD_MODE_AFTER_STATUS	31
93c76b78d8SArchit Taneja 
94c76b78d8SArchit Taneja /* NAND_DEVn_CFG0 bits */
95c76b78d8SArchit Taneja #define	DEV0_CFG1_ECC_DISABLE		0
96c76b78d8SArchit Taneja #define	WIDE_FLASH			1
97c76b78d8SArchit Taneja #define	NAND_RECOVERY_CYCLES		2
98c76b78d8SArchit Taneja #define	CS_ACTIVE_BSY			5
99c76b78d8SArchit Taneja #define	BAD_BLOCK_BYTE_NUM		6
100c76b78d8SArchit Taneja #define	BAD_BLOCK_IN_SPARE_AREA		16
101c76b78d8SArchit Taneja #define	WR_RD_BSY_GAP			17
102c76b78d8SArchit Taneja #define	ENABLE_BCH_ECC			27
103c76b78d8SArchit Taneja 
104c76b78d8SArchit Taneja /* NAND_DEV0_ECC_CFG bits */
105c76b78d8SArchit Taneja #define	ECC_CFG_ECC_DISABLE		0
106c76b78d8SArchit Taneja #define	ECC_SW_RESET			1
107c76b78d8SArchit Taneja #define	ECC_MODE			4
108c76b78d8SArchit Taneja #define	ECC_PARITY_SIZE_BYTES_BCH	8
109c76b78d8SArchit Taneja #define	ECC_NUM_DATA_BYTES		16
110c76b78d8SArchit Taneja #define	ECC_FORCE_CLK_OPEN		30
111c76b78d8SArchit Taneja 
112c76b78d8SArchit Taneja /* NAND_DEV_CMD1 bits */
113c76b78d8SArchit Taneja #define	READ_ADDR			0
114c76b78d8SArchit Taneja 
115c76b78d8SArchit Taneja /* NAND_DEV_CMD_VLD bits */
116d8a9b320SAbhishek Sahu #define	READ_START_VLD			BIT(0)
117d8a9b320SAbhishek Sahu #define	READ_STOP_VLD			BIT(1)
118d8a9b320SAbhishek Sahu #define	WRITE_START_VLD			BIT(2)
119d8a9b320SAbhishek Sahu #define	ERASE_START_VLD			BIT(3)
120d8a9b320SAbhishek Sahu #define	SEQ_READ_START_VLD		BIT(4)
121c76b78d8SArchit Taneja 
122c76b78d8SArchit Taneja /* NAND_EBI2_ECC_BUF_CFG bits */
123c76b78d8SArchit Taneja #define	NUM_STEPS			0
124c76b78d8SArchit Taneja 
125c76b78d8SArchit Taneja /* NAND_ERASED_CW_DETECT_CFG bits */
126c76b78d8SArchit Taneja #define	ERASED_CW_ECC_MASK		1
127c76b78d8SArchit Taneja #define	AUTO_DETECT_RES			0
128c76b78d8SArchit Taneja #define	MASK_ECC			(1 << ERASED_CW_ECC_MASK)
129c76b78d8SArchit Taneja #define	RESET_ERASED_DET		(1 << AUTO_DETECT_RES)
130c76b78d8SArchit Taneja #define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
131c76b78d8SArchit Taneja #define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
132c76b78d8SArchit Taneja #define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
133c76b78d8SArchit Taneja 
134c76b78d8SArchit Taneja /* NAND_ERASED_CW_DETECT_STATUS bits */
135c76b78d8SArchit Taneja #define	PAGE_ALL_ERASED			BIT(7)
136c76b78d8SArchit Taneja #define	CODEWORD_ALL_ERASED		BIT(6)
137c76b78d8SArchit Taneja #define	PAGE_ERASED			BIT(5)
138c76b78d8SArchit Taneja #define	CODEWORD_ERASED			BIT(4)
139c76b78d8SArchit Taneja #define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
140c76b78d8SArchit Taneja #define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
141c76b78d8SArchit Taneja 
14291af95c1SAbhishek Sahu /* NAND_READ_LOCATION_n bits */
14391af95c1SAbhishek Sahu #define READ_LOCATION_OFFSET		0
14491af95c1SAbhishek Sahu #define READ_LOCATION_SIZE		16
14591af95c1SAbhishek Sahu #define READ_LOCATION_LAST		31
14691af95c1SAbhishek Sahu 
147c76b78d8SArchit Taneja /* Version Mask */
148c76b78d8SArchit Taneja #define	NAND_VERSION_MAJOR_MASK		0xf0000000
149c76b78d8SArchit Taneja #define	NAND_VERSION_MAJOR_SHIFT	28
150c76b78d8SArchit Taneja #define	NAND_VERSION_MINOR_MASK		0x0fff0000
151c76b78d8SArchit Taneja #define	NAND_VERSION_MINOR_SHIFT	16
152c76b78d8SArchit Taneja 
153c76b78d8SArchit Taneja /* NAND OP_CMDs */
154c76b78d8SArchit Taneja #define	PAGE_READ			0x2
155c76b78d8SArchit Taneja #define	PAGE_READ_WITH_ECC		0x3
156c76b78d8SArchit Taneja #define	PAGE_READ_WITH_ECC_SPARE	0x4
157c76b78d8SArchit Taneja #define	PROGRAM_PAGE			0x6
158c76b78d8SArchit Taneja #define	PAGE_PROGRAM_WITH_ECC		0x7
159c76b78d8SArchit Taneja #define	PROGRAM_PAGE_SPARE		0x9
160c76b78d8SArchit Taneja #define	BLOCK_ERASE			0xa
161c76b78d8SArchit Taneja #define	FETCH_ID			0xb
162c76b78d8SArchit Taneja #define	RESET_DEVICE			0xd
163c76b78d8SArchit Taneja 
164d8a9b320SAbhishek Sahu /* Default Value for NAND_DEV_CMD_VLD */
165d8a9b320SAbhishek Sahu #define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
166d8a9b320SAbhishek Sahu 					 ERASE_START_VLD | SEQ_READ_START_VLD)
167d8a9b320SAbhishek Sahu 
1689d43f915SAbhishek Sahu /* NAND_CTRL bits */
1699d43f915SAbhishek Sahu #define	BAM_MODE_EN			BIT(0)
1709d43f915SAbhishek Sahu 
171c76b78d8SArchit Taneja /*
172c76b78d8SArchit Taneja  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
173c76b78d8SArchit Taneja  * the driver calls the chunks 'step' or 'codeword' interchangeably
174c76b78d8SArchit Taneja  */
175c76b78d8SArchit Taneja #define	NANDC_STEP_SIZE			512
176c76b78d8SArchit Taneja 
177c76b78d8SArchit Taneja /*
178c76b78d8SArchit Taneja  * the largest page size we support is 8K, this will have 16 steps/codewords
179c76b78d8SArchit Taneja  * of 512 bytes each
180c76b78d8SArchit Taneja  */
181c76b78d8SArchit Taneja #define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
182c76b78d8SArchit Taneja 
183c76b78d8SArchit Taneja /* we read at most 3 registers per codeword scan */
184c76b78d8SArchit Taneja #define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
185c76b78d8SArchit Taneja 
186c76b78d8SArchit Taneja /* ECC modes supported by the controller */
187c76b78d8SArchit Taneja #define	ECC_NONE	BIT(0)
188c76b78d8SArchit Taneja #define	ECC_RS_4BIT	BIT(1)
189c76b78d8SArchit Taneja #define	ECC_BCH_4BIT	BIT(2)
190c76b78d8SArchit Taneja #define	ECC_BCH_8BIT	BIT(3)
191c76b78d8SArchit Taneja 
19291af95c1SAbhishek Sahu #define nandc_set_read_loc(nandc, reg, offset, size, is_last)	\
19391af95c1SAbhishek Sahu nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,			\
19491af95c1SAbhishek Sahu 	      ((offset) << READ_LOCATION_OFFSET) |		\
19591af95c1SAbhishek Sahu 	      ((size) << READ_LOCATION_SIZE) |			\
19691af95c1SAbhishek Sahu 	      ((is_last) << READ_LOCATION_LAST))
19791af95c1SAbhishek Sahu 
198cc409b9aSAbhishek Sahu /*
199cc409b9aSAbhishek Sahu  * Returns the actual register address for all NAND_DEV_ registers
200cc409b9aSAbhishek Sahu  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
201cc409b9aSAbhishek Sahu  */
202cc409b9aSAbhishek Sahu #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
203cc409b9aSAbhishek Sahu 
2048d6b6d7eSAbhishek Sahu /* Returns the NAND register physical address */
2058d6b6d7eSAbhishek Sahu #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
2068d6b6d7eSAbhishek Sahu 
2078d6b6d7eSAbhishek Sahu /* Returns the dma address for reg read buffer */
2088d6b6d7eSAbhishek Sahu #define reg_buf_dma_addr(chip, vaddr) \
2098d6b6d7eSAbhishek Sahu 	((chip)->reg_read_dma + \
2108d6b6d7eSAbhishek Sahu 	((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
2118d6b6d7eSAbhishek Sahu 
2128c4cdce8SAbhishek Sahu #define QPIC_PER_CW_CMD_ELEMENTS	32
213cb80f114SAbhishek Sahu #define QPIC_PER_CW_CMD_SGL		32
214cb80f114SAbhishek Sahu #define QPIC_PER_CW_DATA_SGL		8
215cb80f114SAbhishek Sahu 
2166f20070dSAbhishek Sahu #define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
2176f20070dSAbhishek Sahu 
218cb80f114SAbhishek Sahu /*
21967e830aeSAbhishek Sahu  * Flags used in DMA descriptor preparation helper functions
22067e830aeSAbhishek Sahu  * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
22167e830aeSAbhishek Sahu  */
22267e830aeSAbhishek Sahu /* Don't set the EOT in current tx BAM sgl */
22367e830aeSAbhishek Sahu #define NAND_BAM_NO_EOT			BIT(0)
22467e830aeSAbhishek Sahu /* Set the NWD flag in current BAM sgl */
22567e830aeSAbhishek Sahu #define NAND_BAM_NWD			BIT(1)
22667e830aeSAbhishek Sahu /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
22767e830aeSAbhishek Sahu #define NAND_BAM_NEXT_SGL		BIT(2)
228a86b9c4fSAbhishek Sahu /*
229a86b9c4fSAbhishek Sahu  * Erased codeword status is being used two times in single transfer so this
230a86b9c4fSAbhishek Sahu  * flag will determine the current value of erased codeword status register
231a86b9c4fSAbhishek Sahu  */
232a86b9c4fSAbhishek Sahu #define NAND_ERASED_CW_SET		BIT(4)
23367e830aeSAbhishek Sahu 
23467e830aeSAbhishek Sahu /*
235cb80f114SAbhishek Sahu  * This data type corresponds to the BAM transaction which will be used for all
236cb80f114SAbhishek Sahu  * NAND transfers.
2378c4cdce8SAbhishek Sahu  * @bam_ce - the array of BAM command elements
238cb80f114SAbhishek Sahu  * @cmd_sgl - sgl for NAND BAM command pipe
239cb80f114SAbhishek Sahu  * @data_sgl - sgl for NAND BAM consumer/producer pipe
2408c4cdce8SAbhishek Sahu  * @bam_ce_pos - the index in bam_ce which is available for next sgl
2418c4cdce8SAbhishek Sahu  * @bam_ce_start - the index in bam_ce which marks the start position ce
2428c4cdce8SAbhishek Sahu  *		   for current sgl. It will be used for size calculation
2438c4cdce8SAbhishek Sahu  *		   for current sgl
244cb80f114SAbhishek Sahu  * @cmd_sgl_pos - current index in command sgl.
245cb80f114SAbhishek Sahu  * @cmd_sgl_start - start index in command sgl.
246cb80f114SAbhishek Sahu  * @tx_sgl_pos - current index in data sgl for tx.
247cb80f114SAbhishek Sahu  * @tx_sgl_start - start index in data sgl for tx.
248cb80f114SAbhishek Sahu  * @rx_sgl_pos - current index in data sgl for rx.
249cb80f114SAbhishek Sahu  * @rx_sgl_start - start index in data sgl for rx.
2506f20070dSAbhishek Sahu  * @wait_second_completion - wait for second DMA desc completion before making
2516f20070dSAbhishek Sahu  *			     the NAND transfer completion.
2526f20070dSAbhishek Sahu  * @txn_done - completion for NAND transfer.
2536f20070dSAbhishek Sahu  * @last_data_desc - last DMA desc in data channel (tx/rx).
2546f20070dSAbhishek Sahu  * @last_cmd_desc - last DMA desc in command channel.
255cb80f114SAbhishek Sahu  */
256cb80f114SAbhishek Sahu struct bam_transaction {
2578c4cdce8SAbhishek Sahu 	struct bam_cmd_element *bam_ce;
258cb80f114SAbhishek Sahu 	struct scatterlist *cmd_sgl;
259cb80f114SAbhishek Sahu 	struct scatterlist *data_sgl;
2608c4cdce8SAbhishek Sahu 	u32 bam_ce_pos;
2618c4cdce8SAbhishek Sahu 	u32 bam_ce_start;
262cb80f114SAbhishek Sahu 	u32 cmd_sgl_pos;
263cb80f114SAbhishek Sahu 	u32 cmd_sgl_start;
264cb80f114SAbhishek Sahu 	u32 tx_sgl_pos;
265cb80f114SAbhishek Sahu 	u32 tx_sgl_start;
266cb80f114SAbhishek Sahu 	u32 rx_sgl_pos;
267cb80f114SAbhishek Sahu 	u32 rx_sgl_start;
2686f20070dSAbhishek Sahu 	bool wait_second_completion;
2696f20070dSAbhishek Sahu 	struct completion txn_done;
2706f20070dSAbhishek Sahu 	struct dma_async_tx_descriptor *last_data_desc;
2716f20070dSAbhishek Sahu 	struct dma_async_tx_descriptor *last_cmd_desc;
272cb80f114SAbhishek Sahu };
273cb80f114SAbhishek Sahu 
274381dd245SAbhishek Sahu /*
275381dd245SAbhishek Sahu  * This data type corresponds to the nand dma descriptor
276381dd245SAbhishek Sahu  * @list - list for desc_info
277381dd245SAbhishek Sahu  * @dir - DMA transfer direction
278381dd245SAbhishek Sahu  * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
279381dd245SAbhishek Sahu  *	      ADM
280381dd245SAbhishek Sahu  * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
281381dd245SAbhishek Sahu  * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
282381dd245SAbhishek Sahu  * @dma_desc - low level DMA engine descriptor
283381dd245SAbhishek Sahu  */
284c76b78d8SArchit Taneja struct desc_info {
285c76b78d8SArchit Taneja 	struct list_head node;
286c76b78d8SArchit Taneja 
287c76b78d8SArchit Taneja 	enum dma_data_direction dir;
288381dd245SAbhishek Sahu 	union {
289381dd245SAbhishek Sahu 		struct scatterlist adm_sgl;
290381dd245SAbhishek Sahu 		struct {
291381dd245SAbhishek Sahu 			struct scatterlist *bam_sgl;
292381dd245SAbhishek Sahu 			int sgl_cnt;
293381dd245SAbhishek Sahu 		};
294381dd245SAbhishek Sahu 	};
295c76b78d8SArchit Taneja 	struct dma_async_tx_descriptor *dma_desc;
296c76b78d8SArchit Taneja };
297c76b78d8SArchit Taneja 
298c76b78d8SArchit Taneja /*
299c76b78d8SArchit Taneja  * holds the current register values that we want to write. acts as a contiguous
300c76b78d8SArchit Taneja  * chunk of memory which we use to write the controller registers through DMA.
301c76b78d8SArchit Taneja  */
302c76b78d8SArchit Taneja struct nandc_regs {
303c76b78d8SArchit Taneja 	__le32 cmd;
304c76b78d8SArchit Taneja 	__le32 addr0;
305c76b78d8SArchit Taneja 	__le32 addr1;
306c76b78d8SArchit Taneja 	__le32 chip_sel;
307c76b78d8SArchit Taneja 	__le32 exec;
308c76b78d8SArchit Taneja 
309c76b78d8SArchit Taneja 	__le32 cfg0;
310c76b78d8SArchit Taneja 	__le32 cfg1;
311c76b78d8SArchit Taneja 	__le32 ecc_bch_cfg;
312c76b78d8SArchit Taneja 
313c76b78d8SArchit Taneja 	__le32 clrflashstatus;
314c76b78d8SArchit Taneja 	__le32 clrreadstatus;
315c76b78d8SArchit Taneja 
316c76b78d8SArchit Taneja 	__le32 cmd1;
317c76b78d8SArchit Taneja 	__le32 vld;
318c76b78d8SArchit Taneja 
319c76b78d8SArchit Taneja 	__le32 orig_cmd1;
320c76b78d8SArchit Taneja 	__le32 orig_vld;
321c76b78d8SArchit Taneja 
322c76b78d8SArchit Taneja 	__le32 ecc_buf_cfg;
32391af95c1SAbhishek Sahu 	__le32 read_location0;
32491af95c1SAbhishek Sahu 	__le32 read_location1;
32591af95c1SAbhishek Sahu 	__le32 read_location2;
32691af95c1SAbhishek Sahu 	__le32 read_location3;
32791af95c1SAbhishek Sahu 
328a86b9c4fSAbhishek Sahu 	__le32 erased_cw_detect_cfg_clr;
329a86b9c4fSAbhishek Sahu 	__le32 erased_cw_detect_cfg_set;
330c76b78d8SArchit Taneja };
331c76b78d8SArchit Taneja 
332c76b78d8SArchit Taneja /*
333c76b78d8SArchit Taneja  * NAND controller data struct
334c76b78d8SArchit Taneja  *
335c76b78d8SArchit Taneja  * @controller:			base controller structure
336c76b78d8SArchit Taneja  * @host_list:			list containing all the chips attached to the
337c76b78d8SArchit Taneja  *				controller
338c76b78d8SArchit Taneja  * @dev:			parent device
339c76b78d8SArchit Taneja  * @base:			MMIO base
3408d6b6d7eSAbhishek Sahu  * @base_phys:			physical base address of controller registers
3418d6b6d7eSAbhishek Sahu  * @base_dma:			dma base address of controller registers
342c76b78d8SArchit Taneja  * @core_clk:			controller clock
343c76b78d8SArchit Taneja  * @aon_clk:			another controller clock
344c76b78d8SArchit Taneja  *
345c76b78d8SArchit Taneja  * @chan:			dma channel
346c76b78d8SArchit Taneja  * @cmd_crci:			ADM DMA CRCI for command flow control
347c76b78d8SArchit Taneja  * @data_crci:			ADM DMA CRCI for data flow control
348c76b78d8SArchit Taneja  * @desc_list:			DMA descriptor list (list of desc_infos)
349c76b78d8SArchit Taneja  *
350c76b78d8SArchit Taneja  * @data_buffer:		our local DMA buffer for page read/writes,
351c76b78d8SArchit Taneja  *				used when we can't use the buffer provided
352c76b78d8SArchit Taneja  *				by upper layers directly
353c76b78d8SArchit Taneja  * @buf_size/count/start:	markers for chip->read_buf/write_buf functions
354c76b78d8SArchit Taneja  * @reg_read_buf:		local buffer for reading back registers via DMA
3556192ff7aSAbhishek Sahu  * @reg_read_dma:		contains dma address for register read buffer
356c76b78d8SArchit Taneja  * @reg_read_pos:		marker for data read in reg_read_buf
357c76b78d8SArchit Taneja  *
358c76b78d8SArchit Taneja  * @regs:			a contiguous chunk of memory for DMA register
359c76b78d8SArchit Taneja  *				writes. contains the register values to be
360c76b78d8SArchit Taneja  *				written to controller
361c76b78d8SArchit Taneja  * @cmd1/vld:			some fixed controller register values
36258f1f22aSAbhishek Sahu  * @props:			properties of current NAND controller,
363c76b78d8SArchit Taneja  *				initialized via DT match data
364cb80f114SAbhishek Sahu  * @max_cwperpage:		maximum QPIC codewords required. calculated
365cb80f114SAbhishek Sahu  *				from all connected NAND devices pagesize
366c76b78d8SArchit Taneja  */
367c76b78d8SArchit Taneja struct qcom_nand_controller {
368c76b78d8SArchit Taneja 	struct nand_hw_control controller;
369c76b78d8SArchit Taneja 	struct list_head host_list;
370c76b78d8SArchit Taneja 
371c76b78d8SArchit Taneja 	struct device *dev;
372c76b78d8SArchit Taneja 
373c76b78d8SArchit Taneja 	void __iomem *base;
3748d6b6d7eSAbhishek Sahu 	phys_addr_t base_phys;
375c76b78d8SArchit Taneja 	dma_addr_t base_dma;
376c76b78d8SArchit Taneja 
377c76b78d8SArchit Taneja 	struct clk *core_clk;
378c76b78d8SArchit Taneja 	struct clk *aon_clk;
379c76b78d8SArchit Taneja 
380497d7d85SAbhishek Sahu 	union {
381497d7d85SAbhishek Sahu 		/* will be used only by QPIC for BAM DMA */
382497d7d85SAbhishek Sahu 		struct {
383497d7d85SAbhishek Sahu 			struct dma_chan *tx_chan;
384497d7d85SAbhishek Sahu 			struct dma_chan *rx_chan;
385497d7d85SAbhishek Sahu 			struct dma_chan *cmd_chan;
386497d7d85SAbhishek Sahu 		};
387497d7d85SAbhishek Sahu 
388497d7d85SAbhishek Sahu 		/* will be used only by EBI2 for ADM DMA */
389497d7d85SAbhishek Sahu 		struct {
390c76b78d8SArchit Taneja 			struct dma_chan *chan;
391c76b78d8SArchit Taneja 			unsigned int cmd_crci;
392c76b78d8SArchit Taneja 			unsigned int data_crci;
393497d7d85SAbhishek Sahu 		};
394497d7d85SAbhishek Sahu 	};
395497d7d85SAbhishek Sahu 
396c76b78d8SArchit Taneja 	struct list_head desc_list;
397cb80f114SAbhishek Sahu 	struct bam_transaction *bam_txn;
398c76b78d8SArchit Taneja 
399c76b78d8SArchit Taneja 	u8		*data_buffer;
400c76b78d8SArchit Taneja 	int		buf_size;
401c76b78d8SArchit Taneja 	int		buf_count;
402c76b78d8SArchit Taneja 	int		buf_start;
403cb80f114SAbhishek Sahu 	unsigned int	max_cwperpage;
404c76b78d8SArchit Taneja 
405c76b78d8SArchit Taneja 	__le32 *reg_read_buf;
4066192ff7aSAbhishek Sahu 	dma_addr_t reg_read_dma;
407c76b78d8SArchit Taneja 	int reg_read_pos;
408c76b78d8SArchit Taneja 
409c76b78d8SArchit Taneja 	struct nandc_regs *regs;
410c76b78d8SArchit Taneja 
411c76b78d8SArchit Taneja 	u32 cmd1, vld;
41258f1f22aSAbhishek Sahu 	const struct qcom_nandc_props *props;
413c76b78d8SArchit Taneja };
414c76b78d8SArchit Taneja 
415c76b78d8SArchit Taneja /*
416c76b78d8SArchit Taneja  * NAND chip structure
417c76b78d8SArchit Taneja  *
418c76b78d8SArchit Taneja  * @chip:			base NAND chip structure
419c76b78d8SArchit Taneja  * @node:			list node to add itself to host_list in
420c76b78d8SArchit Taneja  *				qcom_nand_controller
421c76b78d8SArchit Taneja  *
422c76b78d8SArchit Taneja  * @cs:				chip select value for this chip
423c76b78d8SArchit Taneja  * @cw_size:			the number of bytes in a single step/codeword
424c76b78d8SArchit Taneja  *				of a page, consisting of all data, ecc, spare
425c76b78d8SArchit Taneja  *				and reserved bytes
426c76b78d8SArchit Taneja  * @cw_data:			the number of bytes within a codeword protected
427c76b78d8SArchit Taneja  *				by ECC
428c76b78d8SArchit Taneja  * @use_ecc:			request the controller to use ECC for the
429c76b78d8SArchit Taneja  *				upcoming read/write
430c76b78d8SArchit Taneja  * @bch_enabled:		flag to tell whether BCH ECC mode is used
431c76b78d8SArchit Taneja  * @ecc_bytes_hw:		ECC bytes used by controller hardware for this
432c76b78d8SArchit Taneja  *				chip
433c76b78d8SArchit Taneja  * @status:			value to be returned if NAND_CMD_STATUS command
434c76b78d8SArchit Taneja  *				is executed
435c76b78d8SArchit Taneja  * @last_command:		keeps track of last command on this chip. used
436c76b78d8SArchit Taneja  *				for reading correct status
437c76b78d8SArchit Taneja  *
438c76b78d8SArchit Taneja  * @cfg0, cfg1, cfg0_raw..:	NANDc register configurations needed for
439c76b78d8SArchit Taneja  *				ecc/non-ecc mode for the current nand flash
440c76b78d8SArchit Taneja  *				device
441c76b78d8SArchit Taneja  */
442c76b78d8SArchit Taneja struct qcom_nand_host {
443c76b78d8SArchit Taneja 	struct nand_chip chip;
444c76b78d8SArchit Taneja 	struct list_head node;
445c76b78d8SArchit Taneja 
446c76b78d8SArchit Taneja 	int cs;
447c76b78d8SArchit Taneja 	int cw_size;
448c76b78d8SArchit Taneja 	int cw_data;
449c76b78d8SArchit Taneja 	bool use_ecc;
450c76b78d8SArchit Taneja 	bool bch_enabled;
451c76b78d8SArchit Taneja 	int ecc_bytes_hw;
452c76b78d8SArchit Taneja 	int spare_bytes;
453c76b78d8SArchit Taneja 	int bbm_size;
454c76b78d8SArchit Taneja 	u8 status;
455c76b78d8SArchit Taneja 	int last_command;
456c76b78d8SArchit Taneja 
457c76b78d8SArchit Taneja 	u32 cfg0, cfg1;
458c76b78d8SArchit Taneja 	u32 cfg0_raw, cfg1_raw;
459c76b78d8SArchit Taneja 	u32 ecc_buf_cfg;
460c76b78d8SArchit Taneja 	u32 ecc_bch_cfg;
461c76b78d8SArchit Taneja 	u32 clrflashstatus;
462c76b78d8SArchit Taneja 	u32 clrreadstatus;
463c76b78d8SArchit Taneja };
464c76b78d8SArchit Taneja 
46558f1f22aSAbhishek Sahu /*
46658f1f22aSAbhishek Sahu  * This data type corresponds to the NAND controller properties which varies
46758f1f22aSAbhishek Sahu  * among different NAND controllers.
46858f1f22aSAbhishek Sahu  * @ecc_modes - ecc mode for NAND
4698c5d5d6aSAbhishek Sahu  * @is_bam - whether NAND controller is using BAM
470cc409b9aSAbhishek Sahu  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
47158f1f22aSAbhishek Sahu  */
47258f1f22aSAbhishek Sahu struct qcom_nandc_props {
47358f1f22aSAbhishek Sahu 	u32 ecc_modes;
4748c5d5d6aSAbhishek Sahu 	bool is_bam;
475cc409b9aSAbhishek Sahu 	u32 dev_cmd_reg_start;
47658f1f22aSAbhishek Sahu };
47758f1f22aSAbhishek Sahu 
478cb80f114SAbhishek Sahu /* Frees the BAM transaction memory */
479cb80f114SAbhishek Sahu static void free_bam_transaction(struct qcom_nand_controller *nandc)
480cb80f114SAbhishek Sahu {
481cb80f114SAbhishek Sahu 	struct bam_transaction *bam_txn = nandc->bam_txn;
482cb80f114SAbhishek Sahu 
483cb80f114SAbhishek Sahu 	devm_kfree(nandc->dev, bam_txn);
484cb80f114SAbhishek Sahu }
485cb80f114SAbhishek Sahu 
486cb80f114SAbhishek Sahu /* Allocates and Initializes the BAM transaction */
487cb80f114SAbhishek Sahu static struct bam_transaction *
488cb80f114SAbhishek Sahu alloc_bam_transaction(struct qcom_nand_controller *nandc)
489cb80f114SAbhishek Sahu {
490cb80f114SAbhishek Sahu 	struct bam_transaction *bam_txn;
491cb80f114SAbhishek Sahu 	size_t bam_txn_size;
492cb80f114SAbhishek Sahu 	unsigned int num_cw = nandc->max_cwperpage;
493cb80f114SAbhishek Sahu 	void *bam_txn_buf;
494cb80f114SAbhishek Sahu 
495cb80f114SAbhishek Sahu 	bam_txn_size =
496cb80f114SAbhishek Sahu 		sizeof(*bam_txn) + num_cw *
4978c4cdce8SAbhishek Sahu 		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
4988c4cdce8SAbhishek Sahu 		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
499cb80f114SAbhishek Sahu 		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
500cb80f114SAbhishek Sahu 
501cb80f114SAbhishek Sahu 	bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
502cb80f114SAbhishek Sahu 	if (!bam_txn_buf)
503cb80f114SAbhishek Sahu 		return NULL;
504cb80f114SAbhishek Sahu 
505cb80f114SAbhishek Sahu 	bam_txn = bam_txn_buf;
506cb80f114SAbhishek Sahu 	bam_txn_buf += sizeof(*bam_txn);
507cb80f114SAbhishek Sahu 
5088c4cdce8SAbhishek Sahu 	bam_txn->bam_ce = bam_txn_buf;
5098c4cdce8SAbhishek Sahu 	bam_txn_buf +=
5108c4cdce8SAbhishek Sahu 		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
5118c4cdce8SAbhishek Sahu 
512cb80f114SAbhishek Sahu 	bam_txn->cmd_sgl = bam_txn_buf;
513cb80f114SAbhishek Sahu 	bam_txn_buf +=
514cb80f114SAbhishek Sahu 		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
515cb80f114SAbhishek Sahu 
516cb80f114SAbhishek Sahu 	bam_txn->data_sgl = bam_txn_buf;
517cb80f114SAbhishek Sahu 
5186f20070dSAbhishek Sahu 	init_completion(&bam_txn->txn_done);
5196f20070dSAbhishek Sahu 
520cb80f114SAbhishek Sahu 	return bam_txn;
521cb80f114SAbhishek Sahu }
522cb80f114SAbhishek Sahu 
5234e2f6c52SAbhishek Sahu /* Clears the BAM transaction indexes */
5244e2f6c52SAbhishek Sahu static void clear_bam_transaction(struct qcom_nand_controller *nandc)
5254e2f6c52SAbhishek Sahu {
5264e2f6c52SAbhishek Sahu 	struct bam_transaction *bam_txn = nandc->bam_txn;
5274e2f6c52SAbhishek Sahu 
5284e2f6c52SAbhishek Sahu 	if (!nandc->props->is_bam)
5294e2f6c52SAbhishek Sahu 		return;
5304e2f6c52SAbhishek Sahu 
5318c4cdce8SAbhishek Sahu 	bam_txn->bam_ce_pos = 0;
5328c4cdce8SAbhishek Sahu 	bam_txn->bam_ce_start = 0;
5334e2f6c52SAbhishek Sahu 	bam_txn->cmd_sgl_pos = 0;
5344e2f6c52SAbhishek Sahu 	bam_txn->cmd_sgl_start = 0;
5354e2f6c52SAbhishek Sahu 	bam_txn->tx_sgl_pos = 0;
5364e2f6c52SAbhishek Sahu 	bam_txn->tx_sgl_start = 0;
5374e2f6c52SAbhishek Sahu 	bam_txn->rx_sgl_pos = 0;
5384e2f6c52SAbhishek Sahu 	bam_txn->rx_sgl_start = 0;
5396f20070dSAbhishek Sahu 	bam_txn->last_data_desc = NULL;
5406f20070dSAbhishek Sahu 	bam_txn->wait_second_completion = false;
5414e2f6c52SAbhishek Sahu 
5424e2f6c52SAbhishek Sahu 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
5434e2f6c52SAbhishek Sahu 		      QPIC_PER_CW_CMD_SGL);
5444e2f6c52SAbhishek Sahu 	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
5454e2f6c52SAbhishek Sahu 		      QPIC_PER_CW_DATA_SGL);
5466f20070dSAbhishek Sahu 
5476f20070dSAbhishek Sahu 	reinit_completion(&bam_txn->txn_done);
5486f20070dSAbhishek Sahu }
5496f20070dSAbhishek Sahu 
5506f20070dSAbhishek Sahu /* Callback for DMA descriptor completion */
5516f20070dSAbhishek Sahu static void qpic_bam_dma_done(void *data)
5526f20070dSAbhishek Sahu {
5536f20070dSAbhishek Sahu 	struct bam_transaction *bam_txn = data;
5546f20070dSAbhishek Sahu 
5556f20070dSAbhishek Sahu 	/*
5566f20070dSAbhishek Sahu 	 * In case of data transfer with NAND, 2 callbacks will be generated.
5576f20070dSAbhishek Sahu 	 * One for command channel and another one for data channel.
5586f20070dSAbhishek Sahu 	 * If current transaction has data descriptors
5596f20070dSAbhishek Sahu 	 * (i.e. wait_second_completion is true), then set this to false
5606f20070dSAbhishek Sahu 	 * and wait for second DMA descriptor completion.
5616f20070dSAbhishek Sahu 	 */
5626f20070dSAbhishek Sahu 	if (bam_txn->wait_second_completion)
5636f20070dSAbhishek Sahu 		bam_txn->wait_second_completion = false;
5646f20070dSAbhishek Sahu 	else
5656f20070dSAbhishek Sahu 		complete(&bam_txn->txn_done);
5664e2f6c52SAbhishek Sahu }
5674e2f6c52SAbhishek Sahu 
568c76b78d8SArchit Taneja static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
569c76b78d8SArchit Taneja {
570c76b78d8SArchit Taneja 	return container_of(chip, struct qcom_nand_host, chip);
571c76b78d8SArchit Taneja }
572c76b78d8SArchit Taneja 
573c76b78d8SArchit Taneja static inline struct qcom_nand_controller *
574c76b78d8SArchit Taneja get_qcom_nand_controller(struct nand_chip *chip)
575c76b78d8SArchit Taneja {
576c76b78d8SArchit Taneja 	return container_of(chip->controller, struct qcom_nand_controller,
577c76b78d8SArchit Taneja 			    controller);
578c76b78d8SArchit Taneja }
579c76b78d8SArchit Taneja 
580c76b78d8SArchit Taneja static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
581c76b78d8SArchit Taneja {
582c76b78d8SArchit Taneja 	return ioread32(nandc->base + offset);
583c76b78d8SArchit Taneja }
584c76b78d8SArchit Taneja 
585c76b78d8SArchit Taneja static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
586c76b78d8SArchit Taneja 			       u32 val)
587c76b78d8SArchit Taneja {
588c76b78d8SArchit Taneja 	iowrite32(val, nandc->base + offset);
589c76b78d8SArchit Taneja }
590c76b78d8SArchit Taneja 
5916192ff7aSAbhishek Sahu static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
5926192ff7aSAbhishek Sahu 					  bool is_cpu)
5936192ff7aSAbhishek Sahu {
5946192ff7aSAbhishek Sahu 	if (!nandc->props->is_bam)
5956192ff7aSAbhishek Sahu 		return;
5966192ff7aSAbhishek Sahu 
5976192ff7aSAbhishek Sahu 	if (is_cpu)
5986192ff7aSAbhishek Sahu 		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
5996192ff7aSAbhishek Sahu 					MAX_REG_RD *
6006192ff7aSAbhishek Sahu 					sizeof(*nandc->reg_read_buf),
6016192ff7aSAbhishek Sahu 					DMA_FROM_DEVICE);
6026192ff7aSAbhishek Sahu 	else
6036192ff7aSAbhishek Sahu 		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
6046192ff7aSAbhishek Sahu 					   MAX_REG_RD *
6056192ff7aSAbhishek Sahu 					   sizeof(*nandc->reg_read_buf),
6066192ff7aSAbhishek Sahu 					   DMA_FROM_DEVICE);
6076192ff7aSAbhishek Sahu }
6086192ff7aSAbhishek Sahu 
609c76b78d8SArchit Taneja static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
610c76b78d8SArchit Taneja {
611c76b78d8SArchit Taneja 	switch (offset) {
612c76b78d8SArchit Taneja 	case NAND_FLASH_CMD:
613c76b78d8SArchit Taneja 		return &regs->cmd;
614c76b78d8SArchit Taneja 	case NAND_ADDR0:
615c76b78d8SArchit Taneja 		return &regs->addr0;
616c76b78d8SArchit Taneja 	case NAND_ADDR1:
617c76b78d8SArchit Taneja 		return &regs->addr1;
618c76b78d8SArchit Taneja 	case NAND_FLASH_CHIP_SELECT:
619c76b78d8SArchit Taneja 		return &regs->chip_sel;
620c76b78d8SArchit Taneja 	case NAND_EXEC_CMD:
621c76b78d8SArchit Taneja 		return &regs->exec;
622c76b78d8SArchit Taneja 	case NAND_FLASH_STATUS:
623c76b78d8SArchit Taneja 		return &regs->clrflashstatus;
624c76b78d8SArchit Taneja 	case NAND_DEV0_CFG0:
625c76b78d8SArchit Taneja 		return &regs->cfg0;
626c76b78d8SArchit Taneja 	case NAND_DEV0_CFG1:
627c76b78d8SArchit Taneja 		return &regs->cfg1;
628c76b78d8SArchit Taneja 	case NAND_DEV0_ECC_CFG:
629c76b78d8SArchit Taneja 		return &regs->ecc_bch_cfg;
630c76b78d8SArchit Taneja 	case NAND_READ_STATUS:
631c76b78d8SArchit Taneja 		return &regs->clrreadstatus;
632c76b78d8SArchit Taneja 	case NAND_DEV_CMD1:
633c76b78d8SArchit Taneja 		return &regs->cmd1;
634c76b78d8SArchit Taneja 	case NAND_DEV_CMD1_RESTORE:
635c76b78d8SArchit Taneja 		return &regs->orig_cmd1;
636c76b78d8SArchit Taneja 	case NAND_DEV_CMD_VLD:
637c76b78d8SArchit Taneja 		return &regs->vld;
638c76b78d8SArchit Taneja 	case NAND_DEV_CMD_VLD_RESTORE:
639c76b78d8SArchit Taneja 		return &regs->orig_vld;
640c76b78d8SArchit Taneja 	case NAND_EBI2_ECC_BUF_CFG:
641c76b78d8SArchit Taneja 		return &regs->ecc_buf_cfg;
64291af95c1SAbhishek Sahu 	case NAND_READ_LOCATION_0:
64391af95c1SAbhishek Sahu 		return &regs->read_location0;
64491af95c1SAbhishek Sahu 	case NAND_READ_LOCATION_1:
64591af95c1SAbhishek Sahu 		return &regs->read_location1;
64691af95c1SAbhishek Sahu 	case NAND_READ_LOCATION_2:
64791af95c1SAbhishek Sahu 		return &regs->read_location2;
64891af95c1SAbhishek Sahu 	case NAND_READ_LOCATION_3:
64991af95c1SAbhishek Sahu 		return &regs->read_location3;
650c76b78d8SArchit Taneja 	default:
651c76b78d8SArchit Taneja 		return NULL;
652c76b78d8SArchit Taneja 	}
653c76b78d8SArchit Taneja }
654c76b78d8SArchit Taneja 
655c76b78d8SArchit Taneja static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
656c76b78d8SArchit Taneja 			  u32 val)
657c76b78d8SArchit Taneja {
658c76b78d8SArchit Taneja 	struct nandc_regs *regs = nandc->regs;
659c76b78d8SArchit Taneja 	__le32 *reg;
660c76b78d8SArchit Taneja 
661c76b78d8SArchit Taneja 	reg = offset_to_nandc_reg(regs, offset);
662c76b78d8SArchit Taneja 
663c76b78d8SArchit Taneja 	if (reg)
664c76b78d8SArchit Taneja 		*reg = cpu_to_le32(val);
665c76b78d8SArchit Taneja }
666c76b78d8SArchit Taneja 
667c76b78d8SArchit Taneja /* helper to configure address register values */
668c76b78d8SArchit Taneja static void set_address(struct qcom_nand_host *host, u16 column, int page)
669c76b78d8SArchit Taneja {
670c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
671c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
672c76b78d8SArchit Taneja 
673c76b78d8SArchit Taneja 	if (chip->options & NAND_BUSWIDTH_16)
674c76b78d8SArchit Taneja 		column >>= 1;
675c76b78d8SArchit Taneja 
676c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
677c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
678c76b78d8SArchit Taneja }
679c76b78d8SArchit Taneja 
680c76b78d8SArchit Taneja /*
681c76b78d8SArchit Taneja  * update_rw_regs:	set up read/write register values, these will be
682c76b78d8SArchit Taneja  *			written to the NAND controller registers via DMA
683c76b78d8SArchit Taneja  *
684c76b78d8SArchit Taneja  * @num_cw:		number of steps for the read/write operation
685c76b78d8SArchit Taneja  * @read:		read or write operation
686c76b78d8SArchit Taneja  */
687c76b78d8SArchit Taneja static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
688c76b78d8SArchit Taneja {
689c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
690c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
691c76b78d8SArchit Taneja 	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
692c76b78d8SArchit Taneja 
693c76b78d8SArchit Taneja 	if (read) {
694c76b78d8SArchit Taneja 		if (host->use_ecc)
695c76b78d8SArchit Taneja 			cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
696c76b78d8SArchit Taneja 		else
697c76b78d8SArchit Taneja 			cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
698c76b78d8SArchit Taneja 	} else {
699c76b78d8SArchit Taneja 			cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
700c76b78d8SArchit Taneja 	}
701c76b78d8SArchit Taneja 
702c76b78d8SArchit Taneja 	if (host->use_ecc) {
703c76b78d8SArchit Taneja 		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
704c76b78d8SArchit Taneja 				(num_cw - 1) << CW_PER_PAGE;
705c76b78d8SArchit Taneja 
706c76b78d8SArchit Taneja 		cfg1 = host->cfg1;
707c76b78d8SArchit Taneja 		ecc_bch_cfg = host->ecc_bch_cfg;
708c76b78d8SArchit Taneja 	} else {
709c76b78d8SArchit Taneja 		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
710c76b78d8SArchit Taneja 				(num_cw - 1) << CW_PER_PAGE;
711c76b78d8SArchit Taneja 
712c76b78d8SArchit Taneja 		cfg1 = host->cfg1_raw;
713c76b78d8SArchit Taneja 		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
714c76b78d8SArchit Taneja 	}
715c76b78d8SArchit Taneja 
716c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
717c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
718c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
719c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
720c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
721c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
722c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
723c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
72491af95c1SAbhishek Sahu 
72591af95c1SAbhishek Sahu 	if (read)
72691af95c1SAbhishek Sahu 		nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
72791af95c1SAbhishek Sahu 				   host->cw_data : host->cw_size, 1);
728c76b78d8SArchit Taneja }
729c76b78d8SArchit Taneja 
730381dd245SAbhishek Sahu /*
731381dd245SAbhishek Sahu  * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
732381dd245SAbhishek Sahu  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
733381dd245SAbhishek Sahu  * which will be submitted to DMA engine.
734381dd245SAbhishek Sahu  */
735381dd245SAbhishek Sahu static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
736381dd245SAbhishek Sahu 				  struct dma_chan *chan,
737381dd245SAbhishek Sahu 				  unsigned long flags)
738381dd245SAbhishek Sahu {
739381dd245SAbhishek Sahu 	struct desc_info *desc;
740381dd245SAbhishek Sahu 	struct scatterlist *sgl;
741381dd245SAbhishek Sahu 	unsigned int sgl_cnt;
742381dd245SAbhishek Sahu 	int ret;
743381dd245SAbhishek Sahu 	struct bam_transaction *bam_txn = nandc->bam_txn;
744381dd245SAbhishek Sahu 	enum dma_transfer_direction dir_eng;
745381dd245SAbhishek Sahu 	struct dma_async_tx_descriptor *dma_desc;
746381dd245SAbhishek Sahu 
747381dd245SAbhishek Sahu 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
748381dd245SAbhishek Sahu 	if (!desc)
749381dd245SAbhishek Sahu 		return -ENOMEM;
750381dd245SAbhishek Sahu 
751381dd245SAbhishek Sahu 	if (chan == nandc->cmd_chan) {
752381dd245SAbhishek Sahu 		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
753381dd245SAbhishek Sahu 		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
754381dd245SAbhishek Sahu 		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
755381dd245SAbhishek Sahu 		dir_eng = DMA_MEM_TO_DEV;
756381dd245SAbhishek Sahu 		desc->dir = DMA_TO_DEVICE;
757381dd245SAbhishek Sahu 	} else if (chan == nandc->tx_chan) {
758381dd245SAbhishek Sahu 		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
759381dd245SAbhishek Sahu 		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
760381dd245SAbhishek Sahu 		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
761381dd245SAbhishek Sahu 		dir_eng = DMA_MEM_TO_DEV;
762381dd245SAbhishek Sahu 		desc->dir = DMA_TO_DEVICE;
763381dd245SAbhishek Sahu 	} else {
764381dd245SAbhishek Sahu 		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
765381dd245SAbhishek Sahu 		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
766381dd245SAbhishek Sahu 		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
767381dd245SAbhishek Sahu 		dir_eng = DMA_DEV_TO_MEM;
768381dd245SAbhishek Sahu 		desc->dir = DMA_FROM_DEVICE;
769381dd245SAbhishek Sahu 	}
770381dd245SAbhishek Sahu 
771381dd245SAbhishek Sahu 	sg_mark_end(sgl + sgl_cnt - 1);
772381dd245SAbhishek Sahu 	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
773381dd245SAbhishek Sahu 	if (ret == 0) {
774381dd245SAbhishek Sahu 		dev_err(nandc->dev, "failure in mapping desc\n");
775381dd245SAbhishek Sahu 		kfree(desc);
776381dd245SAbhishek Sahu 		return -ENOMEM;
777381dd245SAbhishek Sahu 	}
778381dd245SAbhishek Sahu 
779381dd245SAbhishek Sahu 	desc->sgl_cnt = sgl_cnt;
780381dd245SAbhishek Sahu 	desc->bam_sgl = sgl;
781381dd245SAbhishek Sahu 
782381dd245SAbhishek Sahu 	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
783381dd245SAbhishek Sahu 					   flags);
784381dd245SAbhishek Sahu 
785381dd245SAbhishek Sahu 	if (!dma_desc) {
786381dd245SAbhishek Sahu 		dev_err(nandc->dev, "failure in prep desc\n");
787381dd245SAbhishek Sahu 		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
788381dd245SAbhishek Sahu 		kfree(desc);
789381dd245SAbhishek Sahu 		return -EINVAL;
790381dd245SAbhishek Sahu 	}
791381dd245SAbhishek Sahu 
792381dd245SAbhishek Sahu 	desc->dma_desc = dma_desc;
793381dd245SAbhishek Sahu 
7946f20070dSAbhishek Sahu 	/* update last data/command descriptor */
7956f20070dSAbhishek Sahu 	if (chan == nandc->cmd_chan)
7966f20070dSAbhishek Sahu 		bam_txn->last_cmd_desc = dma_desc;
7976f20070dSAbhishek Sahu 	else
7986f20070dSAbhishek Sahu 		bam_txn->last_data_desc = dma_desc;
7996f20070dSAbhishek Sahu 
800381dd245SAbhishek Sahu 	list_add_tail(&desc->node, &nandc->desc_list);
801381dd245SAbhishek Sahu 
802381dd245SAbhishek Sahu 	return 0;
803381dd245SAbhishek Sahu }
804381dd245SAbhishek Sahu 
8054e2f6c52SAbhishek Sahu /*
8068d6b6d7eSAbhishek Sahu  * Prepares the command descriptor for BAM DMA which will be used for NAND
8078d6b6d7eSAbhishek Sahu  * register reads and writes. The command descriptor requires the command
8088d6b6d7eSAbhishek Sahu  * to be formed in command element type so this function uses the command
8098d6b6d7eSAbhishek Sahu  * element from bam transaction ce array and fills the same with required
8108d6b6d7eSAbhishek Sahu  * data. A single SGL can contain multiple command elements so
8118d6b6d7eSAbhishek Sahu  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
8128d6b6d7eSAbhishek Sahu  * after the current command element.
8138d6b6d7eSAbhishek Sahu  */
8148d6b6d7eSAbhishek Sahu static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
8158d6b6d7eSAbhishek Sahu 				 int reg_off, const void *vaddr,
8168d6b6d7eSAbhishek Sahu 				 int size, unsigned int flags)
8178d6b6d7eSAbhishek Sahu {
8188d6b6d7eSAbhishek Sahu 	int bam_ce_size;
8198d6b6d7eSAbhishek Sahu 	int i, ret;
8208d6b6d7eSAbhishek Sahu 	struct bam_cmd_element *bam_ce_buffer;
8218d6b6d7eSAbhishek Sahu 	struct bam_transaction *bam_txn = nandc->bam_txn;
8228d6b6d7eSAbhishek Sahu 
8238d6b6d7eSAbhishek Sahu 	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
8248d6b6d7eSAbhishek Sahu 
8258d6b6d7eSAbhishek Sahu 	/* fill the command desc */
8268d6b6d7eSAbhishek Sahu 	for (i = 0; i < size; i++) {
8278d6b6d7eSAbhishek Sahu 		if (read)
8288d6b6d7eSAbhishek Sahu 			bam_prep_ce(&bam_ce_buffer[i],
8298d6b6d7eSAbhishek Sahu 				    nandc_reg_phys(nandc, reg_off + 4 * i),
8308d6b6d7eSAbhishek Sahu 				    BAM_READ_COMMAND,
8318d6b6d7eSAbhishek Sahu 				    reg_buf_dma_addr(nandc,
8328d6b6d7eSAbhishek Sahu 						     (__le32 *)vaddr + i));
8338d6b6d7eSAbhishek Sahu 		else
8348d6b6d7eSAbhishek Sahu 			bam_prep_ce_le32(&bam_ce_buffer[i],
8358d6b6d7eSAbhishek Sahu 					 nandc_reg_phys(nandc, reg_off + 4 * i),
8368d6b6d7eSAbhishek Sahu 					 BAM_WRITE_COMMAND,
8378d6b6d7eSAbhishek Sahu 					 *((__le32 *)vaddr + i));
8388d6b6d7eSAbhishek Sahu 	}
8398d6b6d7eSAbhishek Sahu 
8408d6b6d7eSAbhishek Sahu 	bam_txn->bam_ce_pos += size;
8418d6b6d7eSAbhishek Sahu 
8428d6b6d7eSAbhishek Sahu 	/* use the separate sgl after this command */
8438d6b6d7eSAbhishek Sahu 	if (flags & NAND_BAM_NEXT_SGL) {
8448d6b6d7eSAbhishek Sahu 		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
8458d6b6d7eSAbhishek Sahu 		bam_ce_size = (bam_txn->bam_ce_pos -
8468d6b6d7eSAbhishek Sahu 				bam_txn->bam_ce_start) *
8478d6b6d7eSAbhishek Sahu 				sizeof(struct bam_cmd_element);
8488d6b6d7eSAbhishek Sahu 		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
8498d6b6d7eSAbhishek Sahu 			   bam_ce_buffer, bam_ce_size);
8508d6b6d7eSAbhishek Sahu 		bam_txn->cmd_sgl_pos++;
8518d6b6d7eSAbhishek Sahu 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
8528d6b6d7eSAbhishek Sahu 
8538d6b6d7eSAbhishek Sahu 		if (flags & NAND_BAM_NWD) {
8548d6b6d7eSAbhishek Sahu 			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
8558d6b6d7eSAbhishek Sahu 						     DMA_PREP_FENCE |
8568d6b6d7eSAbhishek Sahu 						     DMA_PREP_CMD);
8578d6b6d7eSAbhishek Sahu 			if (ret)
8588d6b6d7eSAbhishek Sahu 				return ret;
8598d6b6d7eSAbhishek Sahu 		}
8608d6b6d7eSAbhishek Sahu 	}
8618d6b6d7eSAbhishek Sahu 
8628d6b6d7eSAbhishek Sahu 	return 0;
8638d6b6d7eSAbhishek Sahu }
8648d6b6d7eSAbhishek Sahu 
8658d6b6d7eSAbhishek Sahu /*
8664e2f6c52SAbhishek Sahu  * Prepares the data descriptor for BAM DMA which will be used for NAND
8674e2f6c52SAbhishek Sahu  * data reads and writes.
8684e2f6c52SAbhishek Sahu  */
8694e2f6c52SAbhishek Sahu static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
8704e2f6c52SAbhishek Sahu 				  const void *vaddr,
8714e2f6c52SAbhishek Sahu 				  int size, unsigned int flags)
8724e2f6c52SAbhishek Sahu {
8734e2f6c52SAbhishek Sahu 	int ret;
8744e2f6c52SAbhishek Sahu 	struct bam_transaction *bam_txn = nandc->bam_txn;
8754e2f6c52SAbhishek Sahu 
8764e2f6c52SAbhishek Sahu 	if (read) {
8774e2f6c52SAbhishek Sahu 		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
8784e2f6c52SAbhishek Sahu 			   vaddr, size);
8794e2f6c52SAbhishek Sahu 		bam_txn->rx_sgl_pos++;
8804e2f6c52SAbhishek Sahu 	} else {
8814e2f6c52SAbhishek Sahu 		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
8824e2f6c52SAbhishek Sahu 			   vaddr, size);
8834e2f6c52SAbhishek Sahu 		bam_txn->tx_sgl_pos++;
8844e2f6c52SAbhishek Sahu 
8854e2f6c52SAbhishek Sahu 		/*
8864e2f6c52SAbhishek Sahu 		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
8874e2f6c52SAbhishek Sahu 		 * is not set, form the DMA descriptor
8884e2f6c52SAbhishek Sahu 		 */
8894e2f6c52SAbhishek Sahu 		if (!(flags & NAND_BAM_NO_EOT)) {
8904e2f6c52SAbhishek Sahu 			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
8914e2f6c52SAbhishek Sahu 						     DMA_PREP_INTERRUPT);
8924e2f6c52SAbhishek Sahu 			if (ret)
8934e2f6c52SAbhishek Sahu 				return ret;
8944e2f6c52SAbhishek Sahu 		}
8954e2f6c52SAbhishek Sahu 	}
8964e2f6c52SAbhishek Sahu 
8974e2f6c52SAbhishek Sahu 	return 0;
8984e2f6c52SAbhishek Sahu }
8994e2f6c52SAbhishek Sahu 
900381dd245SAbhishek Sahu static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
901c76b78d8SArchit Taneja 			     int reg_off, const void *vaddr, int size,
902c76b78d8SArchit Taneja 			     bool flow_control)
903c76b78d8SArchit Taneja {
904c76b78d8SArchit Taneja 	struct desc_info *desc;
905c76b78d8SArchit Taneja 	struct dma_async_tx_descriptor *dma_desc;
906c76b78d8SArchit Taneja 	struct scatterlist *sgl;
907c76b78d8SArchit Taneja 	struct dma_slave_config slave_conf;
908c76b78d8SArchit Taneja 	enum dma_transfer_direction dir_eng;
909c76b78d8SArchit Taneja 	int ret;
910c76b78d8SArchit Taneja 
911c76b78d8SArchit Taneja 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
912c76b78d8SArchit Taneja 	if (!desc)
913c76b78d8SArchit Taneja 		return -ENOMEM;
914c76b78d8SArchit Taneja 
915381dd245SAbhishek Sahu 	sgl = &desc->adm_sgl;
916c76b78d8SArchit Taneja 
917c76b78d8SArchit Taneja 	sg_init_one(sgl, vaddr, size);
918c76b78d8SArchit Taneja 
919c76b78d8SArchit Taneja 	if (read) {
920c76b78d8SArchit Taneja 		dir_eng = DMA_DEV_TO_MEM;
921c76b78d8SArchit Taneja 		desc->dir = DMA_FROM_DEVICE;
922c76b78d8SArchit Taneja 	} else {
923c76b78d8SArchit Taneja 		dir_eng = DMA_MEM_TO_DEV;
924c76b78d8SArchit Taneja 		desc->dir = DMA_TO_DEVICE;
925c76b78d8SArchit Taneja 	}
926c76b78d8SArchit Taneja 
927c76b78d8SArchit Taneja 	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
928c76b78d8SArchit Taneja 	if (ret == 0) {
929c76b78d8SArchit Taneja 		ret = -ENOMEM;
930c76b78d8SArchit Taneja 		goto err;
931c76b78d8SArchit Taneja 	}
932c76b78d8SArchit Taneja 
933c76b78d8SArchit Taneja 	memset(&slave_conf, 0x00, sizeof(slave_conf));
934c76b78d8SArchit Taneja 
935c76b78d8SArchit Taneja 	slave_conf.device_fc = flow_control;
936c76b78d8SArchit Taneja 	if (read) {
937c76b78d8SArchit Taneja 		slave_conf.src_maxburst = 16;
938c76b78d8SArchit Taneja 		slave_conf.src_addr = nandc->base_dma + reg_off;
939c76b78d8SArchit Taneja 		slave_conf.slave_id = nandc->data_crci;
940c76b78d8SArchit Taneja 	} else {
941c76b78d8SArchit Taneja 		slave_conf.dst_maxburst = 16;
942c76b78d8SArchit Taneja 		slave_conf.dst_addr = nandc->base_dma + reg_off;
943c76b78d8SArchit Taneja 		slave_conf.slave_id = nandc->cmd_crci;
944c76b78d8SArchit Taneja 	}
945c76b78d8SArchit Taneja 
946c76b78d8SArchit Taneja 	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
947c76b78d8SArchit Taneja 	if (ret) {
948c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failed to configure dma channel\n");
949c76b78d8SArchit Taneja 		goto err;
950c76b78d8SArchit Taneja 	}
951c76b78d8SArchit Taneja 
952c76b78d8SArchit Taneja 	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
953c76b78d8SArchit Taneja 	if (!dma_desc) {
954c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failed to prepare desc\n");
955c76b78d8SArchit Taneja 		ret = -EINVAL;
956c76b78d8SArchit Taneja 		goto err;
957c76b78d8SArchit Taneja 	}
958c76b78d8SArchit Taneja 
959c76b78d8SArchit Taneja 	desc->dma_desc = dma_desc;
960c76b78d8SArchit Taneja 
961c76b78d8SArchit Taneja 	list_add_tail(&desc->node, &nandc->desc_list);
962c76b78d8SArchit Taneja 
963c76b78d8SArchit Taneja 	return 0;
964c76b78d8SArchit Taneja err:
965c76b78d8SArchit Taneja 	kfree(desc);
966c76b78d8SArchit Taneja 
967c76b78d8SArchit Taneja 	return ret;
968c76b78d8SArchit Taneja }
969c76b78d8SArchit Taneja 
970c76b78d8SArchit Taneja /*
971c76b78d8SArchit Taneja  * read_reg_dma:	prepares a descriptor to read a given number of
972c76b78d8SArchit Taneja  *			contiguous registers to the reg_read_buf pointer
973c76b78d8SArchit Taneja  *
974c76b78d8SArchit Taneja  * @first:		offset of the first register in the contiguous block
975c76b78d8SArchit Taneja  * @num_regs:		number of registers to read
97667e830aeSAbhishek Sahu  * @flags:		flags to control DMA descriptor preparation
977c76b78d8SArchit Taneja  */
978c76b78d8SArchit Taneja static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
97967e830aeSAbhishek Sahu 			int num_regs, unsigned int flags)
980c76b78d8SArchit Taneja {
981c76b78d8SArchit Taneja 	bool flow_control = false;
982c76b78d8SArchit Taneja 	void *vaddr;
983c76b78d8SArchit Taneja 
9848d6b6d7eSAbhishek Sahu 	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
9858d6b6d7eSAbhishek Sahu 	nandc->reg_read_pos += num_regs;
986c76b78d8SArchit Taneja 
987cc409b9aSAbhishek Sahu 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
988cc409b9aSAbhishek Sahu 		first = dev_cmd_reg_addr(nandc, first);
989cc409b9aSAbhishek Sahu 
9908d6b6d7eSAbhishek Sahu 	if (nandc->props->is_bam)
9918d6b6d7eSAbhishek Sahu 		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
9928d6b6d7eSAbhishek Sahu 					     num_regs, flags);
993c76b78d8SArchit Taneja 
9948d6b6d7eSAbhishek Sahu 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
9958d6b6d7eSAbhishek Sahu 		flow_control = true;
9968d6b6d7eSAbhishek Sahu 
9978d6b6d7eSAbhishek Sahu 	return prep_adm_dma_desc(nandc, true, first, vaddr,
9988d6b6d7eSAbhishek Sahu 				 num_regs * sizeof(u32), flow_control);
999c76b78d8SArchit Taneja }
1000c76b78d8SArchit Taneja 
1001c76b78d8SArchit Taneja /*
1002c76b78d8SArchit Taneja  * write_reg_dma:	prepares a descriptor to write a given number of
1003c76b78d8SArchit Taneja  *			contiguous registers
1004c76b78d8SArchit Taneja  *
1005c76b78d8SArchit Taneja  * @first:		offset of the first register in the contiguous block
1006c76b78d8SArchit Taneja  * @num_regs:		number of registers to write
100767e830aeSAbhishek Sahu  * @flags:		flags to control DMA descriptor preparation
1008c76b78d8SArchit Taneja  */
1009c76b78d8SArchit Taneja static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
101067e830aeSAbhishek Sahu 			 int num_regs, unsigned int flags)
1011c76b78d8SArchit Taneja {
1012c76b78d8SArchit Taneja 	bool flow_control = false;
1013c76b78d8SArchit Taneja 	struct nandc_regs *regs = nandc->regs;
1014c76b78d8SArchit Taneja 	void *vaddr;
1015c76b78d8SArchit Taneja 
1016c76b78d8SArchit Taneja 	vaddr = offset_to_nandc_reg(regs, first);
1017c76b78d8SArchit Taneja 
1018a86b9c4fSAbhishek Sahu 	if (first == NAND_ERASED_CW_DETECT_CFG) {
1019a86b9c4fSAbhishek Sahu 		if (flags & NAND_ERASED_CW_SET)
1020a86b9c4fSAbhishek Sahu 			vaddr = &regs->erased_cw_detect_cfg_set;
1021a86b9c4fSAbhishek Sahu 		else
1022a86b9c4fSAbhishek Sahu 			vaddr = &regs->erased_cw_detect_cfg_clr;
1023a86b9c4fSAbhishek Sahu 	}
1024a86b9c4fSAbhishek Sahu 
102567e830aeSAbhishek Sahu 	if (first == NAND_EXEC_CMD)
102667e830aeSAbhishek Sahu 		flags |= NAND_BAM_NWD;
102767e830aeSAbhishek Sahu 
1028cc409b9aSAbhishek Sahu 	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1029cc409b9aSAbhishek Sahu 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1030c76b78d8SArchit Taneja 
1031cc409b9aSAbhishek Sahu 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1032cc409b9aSAbhishek Sahu 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1033c76b78d8SArchit Taneja 
10348d6b6d7eSAbhishek Sahu 	if (nandc->props->is_bam)
10358d6b6d7eSAbhishek Sahu 		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
10368d6b6d7eSAbhishek Sahu 					     num_regs, flags);
1037c76b78d8SArchit Taneja 
10388d6b6d7eSAbhishek Sahu 	if (first == NAND_FLASH_CMD)
10398d6b6d7eSAbhishek Sahu 		flow_control = true;
10408d6b6d7eSAbhishek Sahu 
10418d6b6d7eSAbhishek Sahu 	return prep_adm_dma_desc(nandc, false, first, vaddr,
10428d6b6d7eSAbhishek Sahu 				 num_regs * sizeof(u32), flow_control);
1043c76b78d8SArchit Taneja }
1044c76b78d8SArchit Taneja 
1045c76b78d8SArchit Taneja /*
1046c76b78d8SArchit Taneja  * read_data_dma:	prepares a DMA descriptor to transfer data from the
1047c76b78d8SArchit Taneja  *			controller's internal buffer to the buffer 'vaddr'
1048c76b78d8SArchit Taneja  *
1049c76b78d8SArchit Taneja  * @reg_off:		offset within the controller's data buffer
1050c76b78d8SArchit Taneja  * @vaddr:		virtual address of the buffer we want to write to
1051c76b78d8SArchit Taneja  * @size:		DMA transaction size in bytes
105267e830aeSAbhishek Sahu  * @flags:		flags to control DMA descriptor preparation
1053c76b78d8SArchit Taneja  */
1054c76b78d8SArchit Taneja static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
105567e830aeSAbhishek Sahu 			 const u8 *vaddr, int size, unsigned int flags)
1056c76b78d8SArchit Taneja {
10574e2f6c52SAbhishek Sahu 	if (nandc->props->is_bam)
10584e2f6c52SAbhishek Sahu 		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
10594e2f6c52SAbhishek Sahu 
1060381dd245SAbhishek Sahu 	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1061c76b78d8SArchit Taneja }
1062c76b78d8SArchit Taneja 
1063c76b78d8SArchit Taneja /*
1064c76b78d8SArchit Taneja  * write_data_dma:	prepares a DMA descriptor to transfer data from
1065c76b78d8SArchit Taneja  *			'vaddr' to the controller's internal buffer
1066c76b78d8SArchit Taneja  *
1067c76b78d8SArchit Taneja  * @reg_off:		offset within the controller's data buffer
1068c76b78d8SArchit Taneja  * @vaddr:		virtual address of the buffer we want to read from
1069c76b78d8SArchit Taneja  * @size:		DMA transaction size in bytes
107067e830aeSAbhishek Sahu  * @flags:		flags to control DMA descriptor preparation
1071c76b78d8SArchit Taneja  */
1072c76b78d8SArchit Taneja static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
107367e830aeSAbhishek Sahu 			  const u8 *vaddr, int size, unsigned int flags)
1074c76b78d8SArchit Taneja {
10754e2f6c52SAbhishek Sahu 	if (nandc->props->is_bam)
10764e2f6c52SAbhishek Sahu 		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
10774e2f6c52SAbhishek Sahu 
1078381dd245SAbhishek Sahu 	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1079c76b78d8SArchit Taneja }
1080c76b78d8SArchit Taneja 
1081c76b78d8SArchit Taneja /*
1082bde4330aSAbhishek Sahu  * Helper to prepare DMA descriptors for configuring registers
1083bde4330aSAbhishek Sahu  * before reading a NAND page.
1084c76b78d8SArchit Taneja  */
1085bde4330aSAbhishek Sahu static void config_nand_page_read(struct qcom_nand_controller *nandc)
1086c76b78d8SArchit Taneja {
108767e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
108867e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
108967e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1090a86b9c4fSAbhishek Sahu 	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1091a86b9c4fSAbhishek Sahu 	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1092a86b9c4fSAbhishek Sahu 		      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1093bde4330aSAbhishek Sahu }
1094c76b78d8SArchit Taneja 
1095bde4330aSAbhishek Sahu /*
1096bde4330aSAbhishek Sahu  * Helper to prepare DMA descriptors for configuring registers
1097bde4330aSAbhishek Sahu  * before reading each codeword in NAND page.
1098bde4330aSAbhishek Sahu  */
1099bde4330aSAbhishek Sahu static void config_nand_cw_read(struct qcom_nand_controller *nandc)
1100bde4330aSAbhishek Sahu {
110191af95c1SAbhishek Sahu 	if (nandc->props->is_bam)
110291af95c1SAbhishek Sahu 		write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
110391af95c1SAbhishek Sahu 			      NAND_BAM_NEXT_SGL);
110491af95c1SAbhishek Sahu 
110567e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
110667e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1107c76b78d8SArchit Taneja 
110867e830aeSAbhishek Sahu 	read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
110967e830aeSAbhishek Sahu 	read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
111067e830aeSAbhishek Sahu 		     NAND_BAM_NEXT_SGL);
1111c76b78d8SArchit Taneja }
1112c76b78d8SArchit Taneja 
1113c76b78d8SArchit Taneja /*
1114bde4330aSAbhishek Sahu  * Helper to prepare dma descriptors to configure registers needed for reading a
1115bde4330aSAbhishek Sahu  * single codeword in page
1116c76b78d8SArchit Taneja  */
1117bde4330aSAbhishek Sahu static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
1118bde4330aSAbhishek Sahu {
1119bde4330aSAbhishek Sahu 	config_nand_page_read(nandc);
1120bde4330aSAbhishek Sahu 	config_nand_cw_read(nandc);
1121bde4330aSAbhishek Sahu }
1122bde4330aSAbhishek Sahu 
112377cc5364SAbhishek Sahu /*
112477cc5364SAbhishek Sahu  * Helper to prepare DMA descriptors used to configure registers needed for
112577cc5364SAbhishek Sahu  * before writing a NAND page.
112677cc5364SAbhishek Sahu  */
112777cc5364SAbhishek Sahu static void config_nand_page_write(struct qcom_nand_controller *nandc)
1128c76b78d8SArchit Taneja {
112967e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
113067e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
113167e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
113267e830aeSAbhishek Sahu 		      NAND_BAM_NEXT_SGL);
1133c76b78d8SArchit Taneja }
1134c76b78d8SArchit Taneja 
113577cc5364SAbhishek Sahu /*
113677cc5364SAbhishek Sahu  * Helper to prepare DMA descriptors for configuring registers
113777cc5364SAbhishek Sahu  * before writing each codeword in NAND page.
113877cc5364SAbhishek Sahu  */
113977cc5364SAbhishek Sahu static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1140c76b78d8SArchit Taneja {
114167e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
114267e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1143c76b78d8SArchit Taneja 
114467e830aeSAbhishek Sahu 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1145c76b78d8SArchit Taneja 
114667e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
114767e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1148c76b78d8SArchit Taneja }
1149c76b78d8SArchit Taneja 
1150c76b78d8SArchit Taneja /*
1151c76b78d8SArchit Taneja  * the following functions are used within chip->cmdfunc() to perform different
1152c76b78d8SArchit Taneja  * NAND_CMD_* commands
1153c76b78d8SArchit Taneja  */
1154c76b78d8SArchit Taneja 
1155c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_PARAM */
1156c76b78d8SArchit Taneja static int nandc_param(struct qcom_nand_host *host)
1157c76b78d8SArchit Taneja {
1158c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1159c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1160c76b78d8SArchit Taneja 
1161c76b78d8SArchit Taneja 	/*
1162c76b78d8SArchit Taneja 	 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1163c76b78d8SArchit Taneja 	 * in use. we configure the controller to perform a raw read of 512
1164c76b78d8SArchit Taneja 	 * bytes to read onfi params
1165c76b78d8SArchit Taneja 	 */
1166c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1167c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR0, 0);
1168c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR1, 0);
1169c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1170c76b78d8SArchit Taneja 					| 512 << UD_SIZE_BYTES
1171c76b78d8SArchit Taneja 					| 5 << NUM_ADDR_CYCLES
1172c76b78d8SArchit Taneja 					| 0 << SPARE_SIZE_BYTES);
1173c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1174c76b78d8SArchit Taneja 					| 0 << CS_ACTIVE_BSY
1175c76b78d8SArchit Taneja 					| 17 << BAD_BLOCK_BYTE_NUM
1176c76b78d8SArchit Taneja 					| 1 << BAD_BLOCK_IN_SPARE_AREA
1177c76b78d8SArchit Taneja 					| 2 << WR_RD_BSY_GAP
1178c76b78d8SArchit Taneja 					| 0 << WIDE_FLASH
1179c76b78d8SArchit Taneja 					| 1 << DEV0_CFG1_ECC_DISABLE);
1180c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1181c76b78d8SArchit Taneja 
1182c76b78d8SArchit Taneja 	/* configure CMD1 and VLD for ONFI param probing */
1183c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1184d8a9b320SAbhishek Sahu 		      (nandc->vld & ~READ_START_VLD));
1185c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV_CMD1,
1186c76b78d8SArchit Taneja 		      (nandc->cmd1 & ~(0xFF << READ_ADDR))
1187c76b78d8SArchit Taneja 		      | NAND_CMD_PARAM << READ_ADDR);
1188c76b78d8SArchit Taneja 
1189c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1190c76b78d8SArchit Taneja 
1191c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1192c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
119391af95c1SAbhishek Sahu 	nandc_set_read_loc(nandc, 0, 0, 512, 1);
1194c76b78d8SArchit Taneja 
119567e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
119667e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1197c76b78d8SArchit Taneja 
1198c76b78d8SArchit Taneja 	nandc->buf_count = 512;
1199c76b78d8SArchit Taneja 	memset(nandc->data_buffer, 0xff, nandc->buf_count);
1200c76b78d8SArchit Taneja 
1201bde4330aSAbhishek Sahu 	config_nand_single_cw_page_read(nandc);
1202c76b78d8SArchit Taneja 
1203c76b78d8SArchit Taneja 	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
120467e830aeSAbhishek Sahu 		      nandc->buf_count, 0);
1205c76b78d8SArchit Taneja 
1206c76b78d8SArchit Taneja 	/* restore CMD1 and VLD regs */
120767e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
120867e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1209c76b78d8SArchit Taneja 
1210c76b78d8SArchit Taneja 	return 0;
1211c76b78d8SArchit Taneja }
1212c76b78d8SArchit Taneja 
1213c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_ERASE1 */
1214c76b78d8SArchit Taneja static int erase_block(struct qcom_nand_host *host, int page_addr)
1215c76b78d8SArchit Taneja {
1216c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1217c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1218c76b78d8SArchit Taneja 
1219c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_CMD,
1220c76b78d8SArchit Taneja 		      BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1221c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1222c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR1, 0);
1223c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_CFG0,
1224c76b78d8SArchit Taneja 		      host->cfg0_raw & ~(7 << CW_PER_PAGE));
1225c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1226c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1227c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1228c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1229c76b78d8SArchit Taneja 
123067e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
123167e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
123267e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1233c76b78d8SArchit Taneja 
123467e830aeSAbhishek Sahu 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1235c76b78d8SArchit Taneja 
123667e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
123767e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1238c76b78d8SArchit Taneja 
1239c76b78d8SArchit Taneja 	return 0;
1240c76b78d8SArchit Taneja }
1241c76b78d8SArchit Taneja 
1242c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_READID */
1243c76b78d8SArchit Taneja static int read_id(struct qcom_nand_host *host, int column)
1244c76b78d8SArchit Taneja {
1245c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1246c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1247c76b78d8SArchit Taneja 
1248c76b78d8SArchit Taneja 	if (column == -1)
1249c76b78d8SArchit Taneja 		return 0;
1250c76b78d8SArchit Taneja 
1251c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1252c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR0, column);
1253c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_ADDR1, 0);
12549d43f915SAbhishek Sahu 	nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
12559d43f915SAbhishek Sahu 		      nandc->props->is_bam ? 0 : DM_EN);
1256c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1257c76b78d8SArchit Taneja 
125867e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
125967e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1260c76b78d8SArchit Taneja 
126167e830aeSAbhishek Sahu 	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1262c76b78d8SArchit Taneja 
1263c76b78d8SArchit Taneja 	return 0;
1264c76b78d8SArchit Taneja }
1265c76b78d8SArchit Taneja 
1266c76b78d8SArchit Taneja /* sets up descriptors for NAND_CMD_RESET */
1267c76b78d8SArchit Taneja static int reset(struct qcom_nand_host *host)
1268c76b78d8SArchit Taneja {
1269c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1270c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1271c76b78d8SArchit Taneja 
1272c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1273c76b78d8SArchit Taneja 	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1274c76b78d8SArchit Taneja 
127567e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
127667e830aeSAbhishek Sahu 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1277c76b78d8SArchit Taneja 
127867e830aeSAbhishek Sahu 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1279c76b78d8SArchit Taneja 
1280c76b78d8SArchit Taneja 	return 0;
1281c76b78d8SArchit Taneja }
1282c76b78d8SArchit Taneja 
1283c76b78d8SArchit Taneja /* helpers to submit/free our list of dma descriptors */
1284c76b78d8SArchit Taneja static int submit_descs(struct qcom_nand_controller *nandc)
1285c76b78d8SArchit Taneja {
1286c76b78d8SArchit Taneja 	struct desc_info *desc;
1287c76b78d8SArchit Taneja 	dma_cookie_t cookie = 0;
1288381dd245SAbhishek Sahu 	struct bam_transaction *bam_txn = nandc->bam_txn;
1289381dd245SAbhishek Sahu 	int r;
1290381dd245SAbhishek Sahu 
1291381dd245SAbhishek Sahu 	if (nandc->props->is_bam) {
1292381dd245SAbhishek Sahu 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1293381dd245SAbhishek Sahu 			r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1294381dd245SAbhishek Sahu 			if (r)
1295381dd245SAbhishek Sahu 				return r;
1296381dd245SAbhishek Sahu 		}
1297381dd245SAbhishek Sahu 
1298381dd245SAbhishek Sahu 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1299381dd245SAbhishek Sahu 			r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1300381dd245SAbhishek Sahu 						   DMA_PREP_INTERRUPT);
1301381dd245SAbhishek Sahu 			if (r)
1302381dd245SAbhishek Sahu 				return r;
1303381dd245SAbhishek Sahu 		}
1304381dd245SAbhishek Sahu 
1305381dd245SAbhishek Sahu 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
13068d6b6d7eSAbhishek Sahu 			r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
13078d6b6d7eSAbhishek Sahu 						   DMA_PREP_CMD);
1308381dd245SAbhishek Sahu 			if (r)
1309381dd245SAbhishek Sahu 				return r;
1310381dd245SAbhishek Sahu 		}
1311381dd245SAbhishek Sahu 	}
1312c76b78d8SArchit Taneja 
1313c76b78d8SArchit Taneja 	list_for_each_entry(desc, &nandc->desc_list, node)
1314c76b78d8SArchit Taneja 		cookie = dmaengine_submit(desc->dma_desc);
1315c76b78d8SArchit Taneja 
1316381dd245SAbhishek Sahu 	if (nandc->props->is_bam) {
13176f20070dSAbhishek Sahu 		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
13186f20070dSAbhishek Sahu 		bam_txn->last_cmd_desc->callback_param = bam_txn;
13196f20070dSAbhishek Sahu 		if (bam_txn->last_data_desc) {
13206f20070dSAbhishek Sahu 			bam_txn->last_data_desc->callback = qpic_bam_dma_done;
13216f20070dSAbhishek Sahu 			bam_txn->last_data_desc->callback_param = bam_txn;
13226f20070dSAbhishek Sahu 			bam_txn->wait_second_completion = true;
13236f20070dSAbhishek Sahu 		}
13246f20070dSAbhishek Sahu 
1325381dd245SAbhishek Sahu 		dma_async_issue_pending(nandc->tx_chan);
1326381dd245SAbhishek Sahu 		dma_async_issue_pending(nandc->rx_chan);
13276f20070dSAbhishek Sahu 		dma_async_issue_pending(nandc->cmd_chan);
1328381dd245SAbhishek Sahu 
13296f20070dSAbhishek Sahu 		if (!wait_for_completion_timeout(&bam_txn->txn_done,
13306f20070dSAbhishek Sahu 						 QPIC_NAND_COMPLETION_TIMEOUT))
1331381dd245SAbhishek Sahu 			return -ETIMEDOUT;
1332381dd245SAbhishek Sahu 	} else {
1333c76b78d8SArchit Taneja 		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1334c76b78d8SArchit Taneja 			return -ETIMEDOUT;
1335381dd245SAbhishek Sahu 	}
1336c76b78d8SArchit Taneja 
1337c76b78d8SArchit Taneja 	return 0;
1338c76b78d8SArchit Taneja }
1339c76b78d8SArchit Taneja 
1340c76b78d8SArchit Taneja static void free_descs(struct qcom_nand_controller *nandc)
1341c76b78d8SArchit Taneja {
1342c76b78d8SArchit Taneja 	struct desc_info *desc, *n;
1343c76b78d8SArchit Taneja 
1344c76b78d8SArchit Taneja 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1345c76b78d8SArchit Taneja 		list_del(&desc->node);
1346381dd245SAbhishek Sahu 
1347381dd245SAbhishek Sahu 		if (nandc->props->is_bam)
1348381dd245SAbhishek Sahu 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
1349381dd245SAbhishek Sahu 				     desc->sgl_cnt, desc->dir);
1350381dd245SAbhishek Sahu 		else
1351381dd245SAbhishek Sahu 			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1352381dd245SAbhishek Sahu 				     desc->dir);
1353381dd245SAbhishek Sahu 
1354c76b78d8SArchit Taneja 		kfree(desc);
1355c76b78d8SArchit Taneja 	}
1356c76b78d8SArchit Taneja }
1357c76b78d8SArchit Taneja 
1358c76b78d8SArchit Taneja /* reset the register read buffer for next NAND operation */
1359c76b78d8SArchit Taneja static void clear_read_regs(struct qcom_nand_controller *nandc)
1360c76b78d8SArchit Taneja {
1361c76b78d8SArchit Taneja 	nandc->reg_read_pos = 0;
13626192ff7aSAbhishek Sahu 	nandc_read_buffer_sync(nandc, false);
1363c76b78d8SArchit Taneja }
1364c76b78d8SArchit Taneja 
1365c76b78d8SArchit Taneja static void pre_command(struct qcom_nand_host *host, int command)
1366c76b78d8SArchit Taneja {
1367c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1368c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1369c76b78d8SArchit Taneja 
1370c76b78d8SArchit Taneja 	nandc->buf_count = 0;
1371c76b78d8SArchit Taneja 	nandc->buf_start = 0;
1372c76b78d8SArchit Taneja 	host->use_ecc = false;
1373c76b78d8SArchit Taneja 	host->last_command = command;
1374c76b78d8SArchit Taneja 
1375c76b78d8SArchit Taneja 	clear_read_regs(nandc);
13764e2f6c52SAbhishek Sahu 
13774e2f6c52SAbhishek Sahu 	if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
13784e2f6c52SAbhishek Sahu 	    command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
13794e2f6c52SAbhishek Sahu 		clear_bam_transaction(nandc);
1380c76b78d8SArchit Taneja }
1381c76b78d8SArchit Taneja 
1382c76b78d8SArchit Taneja /*
1383c76b78d8SArchit Taneja  * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1384c76b78d8SArchit Taneja  * privately maintained status byte, this status byte can be read after
1385c76b78d8SArchit Taneja  * NAND_CMD_STATUS is called
1386c76b78d8SArchit Taneja  */
1387c76b78d8SArchit Taneja static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1388c76b78d8SArchit Taneja {
1389c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1390c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1391c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1392c76b78d8SArchit Taneja 	int num_cw;
1393c76b78d8SArchit Taneja 	int i;
1394c76b78d8SArchit Taneja 
1395c76b78d8SArchit Taneja 	num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
13966192ff7aSAbhishek Sahu 	nandc_read_buffer_sync(nandc, true);
1397c76b78d8SArchit Taneja 
1398c76b78d8SArchit Taneja 	for (i = 0; i < num_cw; i++) {
1399c76b78d8SArchit Taneja 		u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1400c76b78d8SArchit Taneja 
1401c76b78d8SArchit Taneja 		if (flash_status & FS_MPU_ERR)
1402c76b78d8SArchit Taneja 			host->status &= ~NAND_STATUS_WP;
1403c76b78d8SArchit Taneja 
1404c76b78d8SArchit Taneja 		if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1405c76b78d8SArchit Taneja 						 (flash_status &
1406c76b78d8SArchit Taneja 						  FS_DEVICE_STS_ERR)))
1407c76b78d8SArchit Taneja 			host->status |= NAND_STATUS_FAIL;
1408c76b78d8SArchit Taneja 	}
1409c76b78d8SArchit Taneja }
1410c76b78d8SArchit Taneja 
1411c76b78d8SArchit Taneja static void post_command(struct qcom_nand_host *host, int command)
1412c76b78d8SArchit Taneja {
1413c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1414c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1415c76b78d8SArchit Taneja 
1416c76b78d8SArchit Taneja 	switch (command) {
1417c76b78d8SArchit Taneja 	case NAND_CMD_READID:
14186192ff7aSAbhishek Sahu 		nandc_read_buffer_sync(nandc, true);
1419c76b78d8SArchit Taneja 		memcpy(nandc->data_buffer, nandc->reg_read_buf,
1420c76b78d8SArchit Taneja 		       nandc->buf_count);
1421c76b78d8SArchit Taneja 		break;
1422c76b78d8SArchit Taneja 	case NAND_CMD_PAGEPROG:
1423c76b78d8SArchit Taneja 	case NAND_CMD_ERASE1:
1424c76b78d8SArchit Taneja 		parse_erase_write_errors(host, command);
1425c76b78d8SArchit Taneja 		break;
1426c76b78d8SArchit Taneja 	default:
1427c76b78d8SArchit Taneja 		break;
1428c76b78d8SArchit Taneja 	}
1429c76b78d8SArchit Taneja }
1430c76b78d8SArchit Taneja 
1431c76b78d8SArchit Taneja /*
1432c76b78d8SArchit Taneja  * Implements chip->cmdfunc. It's  only used for a limited set of commands.
1433c76b78d8SArchit Taneja  * The rest of the commands wouldn't be called by upper layers. For example,
1434c76b78d8SArchit Taneja  * NAND_CMD_READOOB would never be called because we have our own versions
1435c76b78d8SArchit Taneja  * of read_oob ops for nand_ecc_ctrl.
1436c76b78d8SArchit Taneja  */
1437c76b78d8SArchit Taneja static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1438c76b78d8SArchit Taneja 			       int column, int page_addr)
1439c76b78d8SArchit Taneja {
1440c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
1441c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1442c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1443c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1444c76b78d8SArchit Taneja 	bool wait = false;
1445c76b78d8SArchit Taneja 	int ret = 0;
1446c76b78d8SArchit Taneja 
1447c76b78d8SArchit Taneja 	pre_command(host, command);
1448c76b78d8SArchit Taneja 
1449c76b78d8SArchit Taneja 	switch (command) {
1450c76b78d8SArchit Taneja 	case NAND_CMD_RESET:
1451c76b78d8SArchit Taneja 		ret = reset(host);
1452c76b78d8SArchit Taneja 		wait = true;
1453c76b78d8SArchit Taneja 		break;
1454c76b78d8SArchit Taneja 
1455c76b78d8SArchit Taneja 	case NAND_CMD_READID:
1456c76b78d8SArchit Taneja 		nandc->buf_count = 4;
1457c76b78d8SArchit Taneja 		ret = read_id(host, column);
1458c76b78d8SArchit Taneja 		wait = true;
1459c76b78d8SArchit Taneja 		break;
1460c76b78d8SArchit Taneja 
1461c76b78d8SArchit Taneja 	case NAND_CMD_PARAM:
1462c76b78d8SArchit Taneja 		ret = nandc_param(host);
1463c76b78d8SArchit Taneja 		wait = true;
1464c76b78d8SArchit Taneja 		break;
1465c76b78d8SArchit Taneja 
1466c76b78d8SArchit Taneja 	case NAND_CMD_ERASE1:
1467c76b78d8SArchit Taneja 		ret = erase_block(host, page_addr);
1468c76b78d8SArchit Taneja 		wait = true;
1469c76b78d8SArchit Taneja 		break;
1470c76b78d8SArchit Taneja 
1471c76b78d8SArchit Taneja 	case NAND_CMD_READ0:
1472c76b78d8SArchit Taneja 		/* we read the entire page for now */
1473c76b78d8SArchit Taneja 		WARN_ON(column != 0);
1474c76b78d8SArchit Taneja 
1475c76b78d8SArchit Taneja 		host->use_ecc = true;
1476c76b78d8SArchit Taneja 		set_address(host, 0, page_addr);
1477c76b78d8SArchit Taneja 		update_rw_regs(host, ecc->steps, true);
1478c76b78d8SArchit Taneja 		break;
1479c76b78d8SArchit Taneja 
1480c76b78d8SArchit Taneja 	case NAND_CMD_SEQIN:
1481c76b78d8SArchit Taneja 		WARN_ON(column != 0);
1482c76b78d8SArchit Taneja 		set_address(host, 0, page_addr);
1483c76b78d8SArchit Taneja 		break;
1484c76b78d8SArchit Taneja 
1485c76b78d8SArchit Taneja 	case NAND_CMD_PAGEPROG:
1486c76b78d8SArchit Taneja 	case NAND_CMD_STATUS:
1487c76b78d8SArchit Taneja 	case NAND_CMD_NONE:
1488c76b78d8SArchit Taneja 	default:
1489c76b78d8SArchit Taneja 		break;
1490c76b78d8SArchit Taneja 	}
1491c76b78d8SArchit Taneja 
1492c76b78d8SArchit Taneja 	if (ret) {
1493c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure executing command %d\n",
1494c76b78d8SArchit Taneja 			command);
1495c76b78d8SArchit Taneja 		free_descs(nandc);
1496c76b78d8SArchit Taneja 		return;
1497c76b78d8SArchit Taneja 	}
1498c76b78d8SArchit Taneja 
1499c76b78d8SArchit Taneja 	if (wait) {
1500c76b78d8SArchit Taneja 		ret = submit_descs(nandc);
1501c76b78d8SArchit Taneja 		if (ret)
1502c76b78d8SArchit Taneja 			dev_err(nandc->dev,
1503c76b78d8SArchit Taneja 				"failure submitting descs for command %d\n",
1504c76b78d8SArchit Taneja 				command);
1505c76b78d8SArchit Taneja 	}
1506c76b78d8SArchit Taneja 
1507c76b78d8SArchit Taneja 	free_descs(nandc);
1508c76b78d8SArchit Taneja 
1509c76b78d8SArchit Taneja 	post_command(host, command);
1510c76b78d8SArchit Taneja }
1511c76b78d8SArchit Taneja 
1512c76b78d8SArchit Taneja /*
1513c76b78d8SArchit Taneja  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1514c76b78d8SArchit Taneja  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1515c76b78d8SArchit Taneja  *
1516c76b78d8SArchit Taneja  * when using RS ECC, the HW reports the same erros when reading an erased CW,
1517c76b78d8SArchit Taneja  * but it notifies that it is an erased CW by placing special characters at
1518c76b78d8SArchit Taneja  * certain offsets in the buffer.
1519c76b78d8SArchit Taneja  *
1520c76b78d8SArchit Taneja  * verify if the page is erased or not, and fix up the page for RS ECC by
1521c76b78d8SArchit Taneja  * replacing the special characters with 0xff.
1522c76b78d8SArchit Taneja  */
1523c76b78d8SArchit Taneja static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1524c76b78d8SArchit Taneja {
1525c76b78d8SArchit Taneja 	u8 empty1, empty2;
1526c76b78d8SArchit Taneja 
1527c76b78d8SArchit Taneja 	/*
1528c76b78d8SArchit Taneja 	 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1529c76b78d8SArchit Taneja 	 * is erased by looking for 0x54s at offsets 3 and 175 from the
1530c76b78d8SArchit Taneja 	 * beginning of each codeword
1531c76b78d8SArchit Taneja 	 */
1532c76b78d8SArchit Taneja 
1533c76b78d8SArchit Taneja 	empty1 = data_buf[3];
1534c76b78d8SArchit Taneja 	empty2 = data_buf[175];
1535c76b78d8SArchit Taneja 
1536c76b78d8SArchit Taneja 	/*
1537c76b78d8SArchit Taneja 	 * if the erased codework markers, if they exist override them with
1538c76b78d8SArchit Taneja 	 * 0xffs
1539c76b78d8SArchit Taneja 	 */
1540c76b78d8SArchit Taneja 	if ((empty1 == 0x54 && empty2 == 0xff) ||
1541c76b78d8SArchit Taneja 	    (empty1 == 0xff && empty2 == 0x54)) {
1542c76b78d8SArchit Taneja 		data_buf[3] = 0xff;
1543c76b78d8SArchit Taneja 		data_buf[175] = 0xff;
1544c76b78d8SArchit Taneja 	}
1545c76b78d8SArchit Taneja 
1546c76b78d8SArchit Taneja 	/*
1547c76b78d8SArchit Taneja 	 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1548c76b78d8SArchit Taneja 	 * restore the original values at the special offsets
1549c76b78d8SArchit Taneja 	 */
1550c76b78d8SArchit Taneja 	if (memchr_inv(data_buf, 0xff, data_len)) {
1551c76b78d8SArchit Taneja 		data_buf[3] = empty1;
1552c76b78d8SArchit Taneja 		data_buf[175] = empty2;
1553c76b78d8SArchit Taneja 
1554c76b78d8SArchit Taneja 		return false;
1555c76b78d8SArchit Taneja 	}
1556c76b78d8SArchit Taneja 
1557c76b78d8SArchit Taneja 	return true;
1558c76b78d8SArchit Taneja }
1559c76b78d8SArchit Taneja 
1560c76b78d8SArchit Taneja struct read_stats {
1561c76b78d8SArchit Taneja 	__le32 flash;
1562c76b78d8SArchit Taneja 	__le32 buffer;
1563c76b78d8SArchit Taneja 	__le32 erased_cw;
1564c76b78d8SArchit Taneja };
1565c76b78d8SArchit Taneja 
1566c76b78d8SArchit Taneja /*
1567c76b78d8SArchit Taneja  * reads back status registers set by the controller to notify page read
1568c76b78d8SArchit Taneja  * errors. this is equivalent to what 'ecc->correct()' would do.
1569c76b78d8SArchit Taneja  */
1570c76b78d8SArchit Taneja static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1571c76b78d8SArchit Taneja 			     u8 *oob_buf)
1572c76b78d8SArchit Taneja {
1573c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1574c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1575c76b78d8SArchit Taneja 	struct mtd_info *mtd = nand_to_mtd(chip);
1576c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1577c76b78d8SArchit Taneja 	unsigned int max_bitflips = 0;
1578c76b78d8SArchit Taneja 	struct read_stats *buf;
15798eab7214SAbhishek Sahu 	bool flash_op_err = false;
1580c76b78d8SArchit Taneja 	int i;
1581c76b78d8SArchit Taneja 
1582c76b78d8SArchit Taneja 	buf = (struct read_stats *)nandc->reg_read_buf;
15836192ff7aSAbhishek Sahu 	nandc_read_buffer_sync(nandc, true);
1584c76b78d8SArchit Taneja 
1585c76b78d8SArchit Taneja 	for (i = 0; i < ecc->steps; i++, buf++) {
1586c76b78d8SArchit Taneja 		u32 flash, buffer, erased_cw;
1587c76b78d8SArchit Taneja 		int data_len, oob_len;
1588c76b78d8SArchit Taneja 
1589c76b78d8SArchit Taneja 		if (i == (ecc->steps - 1)) {
1590c76b78d8SArchit Taneja 			data_len = ecc->size - ((ecc->steps - 1) << 2);
1591c76b78d8SArchit Taneja 			oob_len = ecc->steps << 2;
1592c76b78d8SArchit Taneja 		} else {
1593c76b78d8SArchit Taneja 			data_len = host->cw_data;
1594c76b78d8SArchit Taneja 			oob_len = 0;
1595c76b78d8SArchit Taneja 		}
1596c76b78d8SArchit Taneja 
1597c76b78d8SArchit Taneja 		flash = le32_to_cpu(buf->flash);
1598c76b78d8SArchit Taneja 		buffer = le32_to_cpu(buf->buffer);
1599c76b78d8SArchit Taneja 		erased_cw = le32_to_cpu(buf->erased_cw);
1600c76b78d8SArchit Taneja 
16018eab7214SAbhishek Sahu 		/*
16028eab7214SAbhishek Sahu 		 * Check ECC failure for each codeword. ECC failure can
16038eab7214SAbhishek Sahu 		 * happen in either of the following conditions
16048eab7214SAbhishek Sahu 		 * 1. If number of bitflips are greater than ECC engine
16058eab7214SAbhishek Sahu 		 *    capability.
16068eab7214SAbhishek Sahu 		 * 2. If this codeword contains all 0xff for which erased
16078eab7214SAbhishek Sahu 		 *    codeword detection check will be done.
16088eab7214SAbhishek Sahu 		 */
16098eab7214SAbhishek Sahu 		if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1610c76b78d8SArchit Taneja 			bool erased;
16118eab7214SAbhishek Sahu 			int ret, ecclen, extraooblen;
16128eab7214SAbhishek Sahu 			void *eccbuf;
1613c76b78d8SArchit Taneja 
1614*2f610386SAbhishek Sahu 			/*
1615*2f610386SAbhishek Sahu 			 * For BCH ECC, ignore erased codeword errors, if
1616*2f610386SAbhishek Sahu 			 * ERASED_CW bits are set.
1617*2f610386SAbhishek Sahu 			 */
1618c76b78d8SArchit Taneja 			if (host->bch_enabled) {
1619c76b78d8SArchit Taneja 				erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1620c76b78d8SArchit Taneja 					 true : false;
1621*2f610386SAbhishek Sahu 			/*
1622*2f610386SAbhishek Sahu 			 * For RS ECC, HW reports the erased CW by placing
1623*2f610386SAbhishek Sahu 			 * special characters at certain offsets in the buffer.
1624*2f610386SAbhishek Sahu 			 * These special characters will be valid only if
1625*2f610386SAbhishek Sahu 			 * complete page is read i.e. data_buf is not NULL.
1626*2f610386SAbhishek Sahu 			 */
1627*2f610386SAbhishek Sahu 			} else if (data_buf) {
1628c76b78d8SArchit Taneja 				erased = erased_chunk_check_and_fixup(data_buf,
1629c76b78d8SArchit Taneja 								      data_len);
1630*2f610386SAbhishek Sahu 			} else {
1631*2f610386SAbhishek Sahu 				erased = false;
1632c76b78d8SArchit Taneja 			}
1633c76b78d8SArchit Taneja 
1634c76b78d8SArchit Taneja 			if (erased) {
1635c76b78d8SArchit Taneja 				data_buf += data_len;
1636c76b78d8SArchit Taneja 				if (oob_buf)
1637c76b78d8SArchit Taneja 					oob_buf += oob_len + ecc->bytes;
1638c76b78d8SArchit Taneja 				continue;
1639c76b78d8SArchit Taneja 			}
1640c76b78d8SArchit Taneja 
1641c76b78d8SArchit Taneja 			eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1642c76b78d8SArchit Taneja 			ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1643c76b78d8SArchit Taneja 			extraooblen = oob_buf ? oob_len : 0;
1644c76b78d8SArchit Taneja 
1645c76b78d8SArchit Taneja 			/*
1646c76b78d8SArchit Taneja 			 * make sure it isn't an erased page reported
1647c76b78d8SArchit Taneja 			 * as not-erased by HW because of a few bitflips
1648c76b78d8SArchit Taneja 			 */
1649c76b78d8SArchit Taneja 			ret = nand_check_erased_ecc_chunk(data_buf,
1650c76b78d8SArchit Taneja 				data_len, eccbuf, ecclen, oob_buf,
1651c76b78d8SArchit Taneja 				extraooblen, ecc->strength);
1652c76b78d8SArchit Taneja 			if (ret < 0) {
1653c76b78d8SArchit Taneja 				mtd->ecc_stats.failed++;
1654c76b78d8SArchit Taneja 			} else {
1655c76b78d8SArchit Taneja 				mtd->ecc_stats.corrected += ret;
1656c76b78d8SArchit Taneja 				max_bitflips =
1657c76b78d8SArchit Taneja 					max_t(unsigned int, max_bitflips, ret);
1658c76b78d8SArchit Taneja 			}
16598eab7214SAbhishek Sahu 		/*
16608eab7214SAbhishek Sahu 		 * Check if MPU or any other operational error (timeout,
16618eab7214SAbhishek Sahu 		 * device failure, etc.) happened for this codeword and
16628eab7214SAbhishek Sahu 		 * make flash_op_err true. If flash_op_err is set, then
16638eab7214SAbhishek Sahu 		 * EIO will be returned for page read.
16648eab7214SAbhishek Sahu 		 */
16658eab7214SAbhishek Sahu 		} else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
16668eab7214SAbhishek Sahu 			flash_op_err = true;
16678eab7214SAbhishek Sahu 		/*
16688eab7214SAbhishek Sahu 		 * No ECC or operational errors happened. Check the number of
16698eab7214SAbhishek Sahu 		 * bits corrected and update the ecc_stats.corrected.
16708eab7214SAbhishek Sahu 		 */
1671c76b78d8SArchit Taneja 		} else {
1672c76b78d8SArchit Taneja 			unsigned int stat;
1673c76b78d8SArchit Taneja 
1674c76b78d8SArchit Taneja 			stat = buffer & BS_CORRECTABLE_ERR_MSK;
1675c76b78d8SArchit Taneja 			mtd->ecc_stats.corrected += stat;
1676c76b78d8SArchit Taneja 			max_bitflips = max(max_bitflips, stat);
1677c76b78d8SArchit Taneja 		}
1678c76b78d8SArchit Taneja 
1679*2f610386SAbhishek Sahu 		if (data_buf)
1680c76b78d8SArchit Taneja 			data_buf += data_len;
1681c76b78d8SArchit Taneja 		if (oob_buf)
1682c76b78d8SArchit Taneja 			oob_buf += oob_len + ecc->bytes;
1683c76b78d8SArchit Taneja 	}
1684c76b78d8SArchit Taneja 
16858eab7214SAbhishek Sahu 	if (flash_op_err)
16868eab7214SAbhishek Sahu 		return -EIO;
16878eab7214SAbhishek Sahu 
1688c76b78d8SArchit Taneja 	return max_bitflips;
1689c76b78d8SArchit Taneja }
1690c76b78d8SArchit Taneja 
1691c76b78d8SArchit Taneja /*
1692c76b78d8SArchit Taneja  * helper to perform the actual page read operation, used by ecc->read_page(),
1693c76b78d8SArchit Taneja  * ecc->read_oob()
1694c76b78d8SArchit Taneja  */
1695c76b78d8SArchit Taneja static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1696c76b78d8SArchit Taneja 			 u8 *oob_buf)
1697c76b78d8SArchit Taneja {
1698c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1699c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1700c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1701c76b78d8SArchit Taneja 	int i, ret;
1702c76b78d8SArchit Taneja 
1703bde4330aSAbhishek Sahu 	config_nand_page_read(nandc);
1704bde4330aSAbhishek Sahu 
1705c76b78d8SArchit Taneja 	/* queue cmd descs for each codeword */
1706c76b78d8SArchit Taneja 	for (i = 0; i < ecc->steps; i++) {
1707c76b78d8SArchit Taneja 		int data_size, oob_size;
1708c76b78d8SArchit Taneja 
1709c76b78d8SArchit Taneja 		if (i == (ecc->steps - 1)) {
1710c76b78d8SArchit Taneja 			data_size = ecc->size - ((ecc->steps - 1) << 2);
1711c76b78d8SArchit Taneja 			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1712c76b78d8SArchit Taneja 				   host->spare_bytes;
1713c76b78d8SArchit Taneja 		} else {
1714c76b78d8SArchit Taneja 			data_size = host->cw_data;
1715c76b78d8SArchit Taneja 			oob_size = host->ecc_bytes_hw + host->spare_bytes;
1716c76b78d8SArchit Taneja 		}
1717c76b78d8SArchit Taneja 
171891af95c1SAbhishek Sahu 		if (nandc->props->is_bam) {
171991af95c1SAbhishek Sahu 			if (data_buf && oob_buf) {
172091af95c1SAbhishek Sahu 				nandc_set_read_loc(nandc, 0, 0, data_size, 0);
172191af95c1SAbhishek Sahu 				nandc_set_read_loc(nandc, 1, data_size,
172291af95c1SAbhishek Sahu 						   oob_size, 1);
172391af95c1SAbhishek Sahu 			} else if (data_buf) {
172491af95c1SAbhishek Sahu 				nandc_set_read_loc(nandc, 0, 0, data_size, 1);
172591af95c1SAbhishek Sahu 			} else {
172691af95c1SAbhishek Sahu 				nandc_set_read_loc(nandc, 0, data_size,
172791af95c1SAbhishek Sahu 						   oob_size, 1);
172891af95c1SAbhishek Sahu 			}
172991af95c1SAbhishek Sahu 		}
173091af95c1SAbhishek Sahu 
1731bde4330aSAbhishek Sahu 		config_nand_cw_read(nandc);
1732c76b78d8SArchit Taneja 
1733c76b78d8SArchit Taneja 		if (data_buf)
1734c76b78d8SArchit Taneja 			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
173567e830aeSAbhishek Sahu 				      data_size, 0);
1736c76b78d8SArchit Taneja 
1737c76b78d8SArchit Taneja 		/*
1738c76b78d8SArchit Taneja 		 * when ecc is enabled, the controller doesn't read the real
1739c76b78d8SArchit Taneja 		 * or dummy bad block markers in each chunk. To maintain a
1740c76b78d8SArchit Taneja 		 * consistent layout across RAW and ECC reads, we just
1741c76b78d8SArchit Taneja 		 * leave the real/dummy BBM offsets empty (i.e, filled with
1742c76b78d8SArchit Taneja 		 * 0xffs)
1743c76b78d8SArchit Taneja 		 */
1744c76b78d8SArchit Taneja 		if (oob_buf) {
1745c76b78d8SArchit Taneja 			int j;
1746c76b78d8SArchit Taneja 
1747c76b78d8SArchit Taneja 			for (j = 0; j < host->bbm_size; j++)
1748c76b78d8SArchit Taneja 				*oob_buf++ = 0xff;
1749c76b78d8SArchit Taneja 
1750c76b78d8SArchit Taneja 			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
175167e830aeSAbhishek Sahu 				      oob_buf, oob_size, 0);
1752c76b78d8SArchit Taneja 		}
1753c76b78d8SArchit Taneja 
1754c76b78d8SArchit Taneja 		if (data_buf)
1755c76b78d8SArchit Taneja 			data_buf += data_size;
1756c76b78d8SArchit Taneja 		if (oob_buf)
1757c76b78d8SArchit Taneja 			oob_buf += oob_size;
1758c76b78d8SArchit Taneja 	}
1759c76b78d8SArchit Taneja 
1760c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
1761c76b78d8SArchit Taneja 	if (ret)
1762c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to read page/oob\n");
1763c76b78d8SArchit Taneja 
1764c76b78d8SArchit Taneja 	free_descs(nandc);
1765c76b78d8SArchit Taneja 
1766c76b78d8SArchit Taneja 	return ret;
1767c76b78d8SArchit Taneja }
1768c76b78d8SArchit Taneja 
1769c76b78d8SArchit Taneja /*
1770c76b78d8SArchit Taneja  * a helper that copies the last step/codeword of a page (containing free oob)
1771c76b78d8SArchit Taneja  * into our local buffer
1772c76b78d8SArchit Taneja  */
1773c76b78d8SArchit Taneja static int copy_last_cw(struct qcom_nand_host *host, int page)
1774c76b78d8SArchit Taneja {
1775c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
1776c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1777c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1778c76b78d8SArchit Taneja 	int size;
1779c76b78d8SArchit Taneja 	int ret;
1780c76b78d8SArchit Taneja 
1781c76b78d8SArchit Taneja 	clear_read_regs(nandc);
1782c76b78d8SArchit Taneja 
1783c76b78d8SArchit Taneja 	size = host->use_ecc ? host->cw_data : host->cw_size;
1784c76b78d8SArchit Taneja 
1785c76b78d8SArchit Taneja 	/* prepare a clean read buffer */
1786c76b78d8SArchit Taneja 	memset(nandc->data_buffer, 0xff, size);
1787c76b78d8SArchit Taneja 
1788c76b78d8SArchit Taneja 	set_address(host, host->cw_size * (ecc->steps - 1), page);
1789c76b78d8SArchit Taneja 	update_rw_regs(host, 1, true);
1790c76b78d8SArchit Taneja 
1791bde4330aSAbhishek Sahu 	config_nand_single_cw_page_read(nandc);
1792c76b78d8SArchit Taneja 
179367e830aeSAbhishek Sahu 	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1794c76b78d8SArchit Taneja 
1795c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
1796c76b78d8SArchit Taneja 	if (ret)
1797c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failed to copy last codeword\n");
1798c76b78d8SArchit Taneja 
1799c76b78d8SArchit Taneja 	free_descs(nandc);
1800c76b78d8SArchit Taneja 
1801c76b78d8SArchit Taneja 	return ret;
1802c76b78d8SArchit Taneja }
1803c76b78d8SArchit Taneja 
1804c76b78d8SArchit Taneja /* implements ecc->read_page() */
1805c76b78d8SArchit Taneja static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1806c76b78d8SArchit Taneja 				uint8_t *buf, int oob_required, int page)
1807c76b78d8SArchit Taneja {
1808c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1809c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1810c76b78d8SArchit Taneja 	u8 *data_buf, *oob_buf = NULL;
1811c76b78d8SArchit Taneja 	int ret;
1812c76b78d8SArchit Taneja 
181325f815f6SBoris Brezillon 	nand_read_page_op(chip, page, 0, NULL, 0);
1814c76b78d8SArchit Taneja 	data_buf = buf;
1815c76b78d8SArchit Taneja 	oob_buf = oob_required ? chip->oob_poi : NULL;
1816c76b78d8SArchit Taneja 
18174e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
1818c76b78d8SArchit Taneja 	ret = read_page_ecc(host, data_buf, oob_buf);
1819c76b78d8SArchit Taneja 	if (ret) {
1820c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to read page\n");
1821c76b78d8SArchit Taneja 		return ret;
1822c76b78d8SArchit Taneja 	}
1823c76b78d8SArchit Taneja 
1824c76b78d8SArchit Taneja 	return parse_read_errors(host, data_buf, oob_buf);
1825c76b78d8SArchit Taneja }
1826c76b78d8SArchit Taneja 
1827c76b78d8SArchit Taneja /* implements ecc->read_page_raw() */
1828c76b78d8SArchit Taneja static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1829c76b78d8SArchit Taneja 				    struct nand_chip *chip, uint8_t *buf,
1830c76b78d8SArchit Taneja 				    int oob_required, int page)
1831c76b78d8SArchit Taneja {
1832c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1833c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1834c76b78d8SArchit Taneja 	u8 *data_buf, *oob_buf;
1835c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1836c76b78d8SArchit Taneja 	int i, ret;
183791af95c1SAbhishek Sahu 	int read_loc;
1838c76b78d8SArchit Taneja 
183925f815f6SBoris Brezillon 	nand_read_page_op(chip, page, 0, NULL, 0);
1840c76b78d8SArchit Taneja 	data_buf = buf;
1841c76b78d8SArchit Taneja 	oob_buf = chip->oob_poi;
1842c76b78d8SArchit Taneja 
1843c76b78d8SArchit Taneja 	host->use_ecc = false;
18444e2f6c52SAbhishek Sahu 
18454e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
1846c76b78d8SArchit Taneja 	update_rw_regs(host, ecc->steps, true);
1847bde4330aSAbhishek Sahu 	config_nand_page_read(nandc);
1848c76b78d8SArchit Taneja 
1849c76b78d8SArchit Taneja 	for (i = 0; i < ecc->steps; i++) {
1850c76b78d8SArchit Taneja 		int data_size1, data_size2, oob_size1, oob_size2;
1851c76b78d8SArchit Taneja 		int reg_off = FLASH_BUF_ACC;
1852c76b78d8SArchit Taneja 
1853c76b78d8SArchit Taneja 		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1854c76b78d8SArchit Taneja 		oob_size1 = host->bbm_size;
1855c76b78d8SArchit Taneja 
1856c76b78d8SArchit Taneja 		if (i == (ecc->steps - 1)) {
1857c76b78d8SArchit Taneja 			data_size2 = ecc->size - data_size1 -
1858c76b78d8SArchit Taneja 				     ((ecc->steps - 1) << 2);
1859c76b78d8SArchit Taneja 			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1860c76b78d8SArchit Taneja 				    host->spare_bytes;
1861c76b78d8SArchit Taneja 		} else {
1862c76b78d8SArchit Taneja 			data_size2 = host->cw_data - data_size1;
1863c76b78d8SArchit Taneja 			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1864c76b78d8SArchit Taneja 		}
1865c76b78d8SArchit Taneja 
186691af95c1SAbhishek Sahu 		if (nandc->props->is_bam) {
186791af95c1SAbhishek Sahu 			read_loc = 0;
186891af95c1SAbhishek Sahu 			nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
186991af95c1SAbhishek Sahu 			read_loc += data_size1;
187091af95c1SAbhishek Sahu 
187191af95c1SAbhishek Sahu 			nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
187291af95c1SAbhishek Sahu 			read_loc += oob_size1;
187391af95c1SAbhishek Sahu 
187491af95c1SAbhishek Sahu 			nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
187591af95c1SAbhishek Sahu 			read_loc += data_size2;
187691af95c1SAbhishek Sahu 
187791af95c1SAbhishek Sahu 			nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
187891af95c1SAbhishek Sahu 		}
187991af95c1SAbhishek Sahu 
1880bde4330aSAbhishek Sahu 		config_nand_cw_read(nandc);
1881c76b78d8SArchit Taneja 
188267e830aeSAbhishek Sahu 		read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1883c76b78d8SArchit Taneja 		reg_off += data_size1;
1884c76b78d8SArchit Taneja 		data_buf += data_size1;
1885c76b78d8SArchit Taneja 
188667e830aeSAbhishek Sahu 		read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1887c76b78d8SArchit Taneja 		reg_off += oob_size1;
1888c76b78d8SArchit Taneja 		oob_buf += oob_size1;
1889c76b78d8SArchit Taneja 
189067e830aeSAbhishek Sahu 		read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
1891c76b78d8SArchit Taneja 		reg_off += data_size2;
1892c76b78d8SArchit Taneja 		data_buf += data_size2;
1893c76b78d8SArchit Taneja 
189467e830aeSAbhishek Sahu 		read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1895c76b78d8SArchit Taneja 		oob_buf += oob_size2;
1896c76b78d8SArchit Taneja 	}
1897c76b78d8SArchit Taneja 
1898c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
1899c76b78d8SArchit Taneja 	if (ret)
1900c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to read raw page\n");
1901c76b78d8SArchit Taneja 
1902c76b78d8SArchit Taneja 	free_descs(nandc);
1903c76b78d8SArchit Taneja 
1904c76b78d8SArchit Taneja 	return 0;
1905c76b78d8SArchit Taneja }
1906c76b78d8SArchit Taneja 
1907c76b78d8SArchit Taneja /* implements ecc->read_oob() */
1908c76b78d8SArchit Taneja static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1909c76b78d8SArchit Taneja 			       int page)
1910c76b78d8SArchit Taneja {
1911c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1912c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1913c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1914c76b78d8SArchit Taneja 	int ret;
1915c76b78d8SArchit Taneja 
1916c76b78d8SArchit Taneja 	clear_read_regs(nandc);
19174e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
1918c76b78d8SArchit Taneja 
1919c76b78d8SArchit Taneja 	host->use_ecc = true;
1920c76b78d8SArchit Taneja 	set_address(host, 0, page);
1921c76b78d8SArchit Taneja 	update_rw_regs(host, ecc->steps, true);
1922c76b78d8SArchit Taneja 
1923c76b78d8SArchit Taneja 	ret = read_page_ecc(host, NULL, chip->oob_poi);
1924c76b78d8SArchit Taneja 	if (ret)
1925c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to read oob\n");
1926c76b78d8SArchit Taneja 
1927c76b78d8SArchit Taneja 	return ret;
1928c76b78d8SArchit Taneja }
1929c76b78d8SArchit Taneja 
1930c76b78d8SArchit Taneja /* implements ecc->write_page() */
1931c76b78d8SArchit Taneja static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1932c76b78d8SArchit Taneja 				 const uint8_t *buf, int oob_required, int page)
1933c76b78d8SArchit Taneja {
1934c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1935c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1936c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1937c76b78d8SArchit Taneja 	u8 *data_buf, *oob_buf;
1938c76b78d8SArchit Taneja 	int i, ret;
1939c76b78d8SArchit Taneja 
194025f815f6SBoris Brezillon 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
194125f815f6SBoris Brezillon 
1942c76b78d8SArchit Taneja 	clear_read_regs(nandc);
19434e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
1944c76b78d8SArchit Taneja 
1945c76b78d8SArchit Taneja 	data_buf = (u8 *)buf;
1946c76b78d8SArchit Taneja 	oob_buf = chip->oob_poi;
1947c76b78d8SArchit Taneja 
1948c76b78d8SArchit Taneja 	host->use_ecc = true;
1949c76b78d8SArchit Taneja 	update_rw_regs(host, ecc->steps, false);
195077cc5364SAbhishek Sahu 	config_nand_page_write(nandc);
1951c76b78d8SArchit Taneja 
1952c76b78d8SArchit Taneja 	for (i = 0; i < ecc->steps; i++) {
1953c76b78d8SArchit Taneja 		int data_size, oob_size;
1954c76b78d8SArchit Taneja 
1955c76b78d8SArchit Taneja 		if (i == (ecc->steps - 1)) {
1956c76b78d8SArchit Taneja 			data_size = ecc->size - ((ecc->steps - 1) << 2);
1957c76b78d8SArchit Taneja 			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1958c76b78d8SArchit Taneja 				   host->spare_bytes;
1959c76b78d8SArchit Taneja 		} else {
1960c76b78d8SArchit Taneja 			data_size = host->cw_data;
1961c76b78d8SArchit Taneja 			oob_size = ecc->bytes;
1962c76b78d8SArchit Taneja 		}
1963c76b78d8SArchit Taneja 
1964c76b78d8SArchit Taneja 
196567e830aeSAbhishek Sahu 		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
196667e830aeSAbhishek Sahu 			       i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
1967c76b78d8SArchit Taneja 
1968c76b78d8SArchit Taneja 		/*
1969c76b78d8SArchit Taneja 		 * when ECC is enabled, we don't really need to write anything
1970c76b78d8SArchit Taneja 		 * to oob for the first n - 1 codewords since these oob regions
1971c76b78d8SArchit Taneja 		 * just contain ECC bytes that's written by the controller
1972c76b78d8SArchit Taneja 		 * itself. For the last codeword, we skip the bbm positions and
1973c76b78d8SArchit Taneja 		 * write to the free oob area.
1974c76b78d8SArchit Taneja 		 */
1975c76b78d8SArchit Taneja 		if (i == (ecc->steps - 1)) {
1976c76b78d8SArchit Taneja 			oob_buf += host->bbm_size;
1977c76b78d8SArchit Taneja 
1978c76b78d8SArchit Taneja 			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
197967e830aeSAbhishek Sahu 				       oob_buf, oob_size, 0);
1980c76b78d8SArchit Taneja 		}
1981c76b78d8SArchit Taneja 
198277cc5364SAbhishek Sahu 		config_nand_cw_write(nandc);
1983c76b78d8SArchit Taneja 
1984c76b78d8SArchit Taneja 		data_buf += data_size;
1985c76b78d8SArchit Taneja 		oob_buf += oob_size;
1986c76b78d8SArchit Taneja 	}
1987c76b78d8SArchit Taneja 
1988c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
1989c76b78d8SArchit Taneja 	if (ret)
1990c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to write page\n");
1991c76b78d8SArchit Taneja 
1992c76b78d8SArchit Taneja 	free_descs(nandc);
1993c76b78d8SArchit Taneja 
199425f815f6SBoris Brezillon 	if (!ret)
199525f815f6SBoris Brezillon 		ret = nand_prog_page_end_op(chip);
199625f815f6SBoris Brezillon 
1997c76b78d8SArchit Taneja 	return ret;
1998c76b78d8SArchit Taneja }
1999c76b78d8SArchit Taneja 
2000c76b78d8SArchit Taneja /* implements ecc->write_page_raw() */
2001c76b78d8SArchit Taneja static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
2002c76b78d8SArchit Taneja 				     struct nand_chip *chip, const uint8_t *buf,
2003c76b78d8SArchit Taneja 				     int oob_required, int page)
2004c76b78d8SArchit Taneja {
2005c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2006c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2007c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2008c76b78d8SArchit Taneja 	u8 *data_buf, *oob_buf;
2009c76b78d8SArchit Taneja 	int i, ret;
2010c76b78d8SArchit Taneja 
201125f815f6SBoris Brezillon 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2012c76b78d8SArchit Taneja 	clear_read_regs(nandc);
20134e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
2014c76b78d8SArchit Taneja 
2015c76b78d8SArchit Taneja 	data_buf = (u8 *)buf;
2016c76b78d8SArchit Taneja 	oob_buf = chip->oob_poi;
2017c76b78d8SArchit Taneja 
2018c76b78d8SArchit Taneja 	host->use_ecc = false;
2019c76b78d8SArchit Taneja 	update_rw_regs(host, ecc->steps, false);
202077cc5364SAbhishek Sahu 	config_nand_page_write(nandc);
2021c76b78d8SArchit Taneja 
2022c76b78d8SArchit Taneja 	for (i = 0; i < ecc->steps; i++) {
2023c76b78d8SArchit Taneja 		int data_size1, data_size2, oob_size1, oob_size2;
2024c76b78d8SArchit Taneja 		int reg_off = FLASH_BUF_ACC;
2025c76b78d8SArchit Taneja 
2026c76b78d8SArchit Taneja 		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2027c76b78d8SArchit Taneja 		oob_size1 = host->bbm_size;
2028c76b78d8SArchit Taneja 
2029c76b78d8SArchit Taneja 		if (i == (ecc->steps - 1)) {
2030c76b78d8SArchit Taneja 			data_size2 = ecc->size - data_size1 -
2031c76b78d8SArchit Taneja 				     ((ecc->steps - 1) << 2);
2032c76b78d8SArchit Taneja 			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2033c76b78d8SArchit Taneja 				    host->spare_bytes;
2034c76b78d8SArchit Taneja 		} else {
2035c76b78d8SArchit Taneja 			data_size2 = host->cw_data - data_size1;
2036c76b78d8SArchit Taneja 			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2037c76b78d8SArchit Taneja 		}
2038c76b78d8SArchit Taneja 
203967e830aeSAbhishek Sahu 		write_data_dma(nandc, reg_off, data_buf, data_size1,
204067e830aeSAbhishek Sahu 			       NAND_BAM_NO_EOT);
2041c76b78d8SArchit Taneja 		reg_off += data_size1;
2042c76b78d8SArchit Taneja 		data_buf += data_size1;
2043c76b78d8SArchit Taneja 
204467e830aeSAbhishek Sahu 		write_data_dma(nandc, reg_off, oob_buf, oob_size1,
204567e830aeSAbhishek Sahu 			       NAND_BAM_NO_EOT);
2046c76b78d8SArchit Taneja 		reg_off += oob_size1;
2047c76b78d8SArchit Taneja 		oob_buf += oob_size1;
2048c76b78d8SArchit Taneja 
204967e830aeSAbhishek Sahu 		write_data_dma(nandc, reg_off, data_buf, data_size2,
205067e830aeSAbhishek Sahu 			       NAND_BAM_NO_EOT);
2051c76b78d8SArchit Taneja 		reg_off += data_size2;
2052c76b78d8SArchit Taneja 		data_buf += data_size2;
2053c76b78d8SArchit Taneja 
205467e830aeSAbhishek Sahu 		write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2055c76b78d8SArchit Taneja 		oob_buf += oob_size2;
2056c76b78d8SArchit Taneja 
205777cc5364SAbhishek Sahu 		config_nand_cw_write(nandc);
2058c76b78d8SArchit Taneja 	}
2059c76b78d8SArchit Taneja 
2060c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
2061c76b78d8SArchit Taneja 	if (ret)
2062c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to write raw page\n");
2063c76b78d8SArchit Taneja 
2064c76b78d8SArchit Taneja 	free_descs(nandc);
2065c76b78d8SArchit Taneja 
206625f815f6SBoris Brezillon 	if (!ret)
206725f815f6SBoris Brezillon 		ret = nand_prog_page_end_op(chip);
206825f815f6SBoris Brezillon 
2069c76b78d8SArchit Taneja 	return ret;
2070c76b78d8SArchit Taneja }
2071c76b78d8SArchit Taneja 
2072c76b78d8SArchit Taneja /*
2073c76b78d8SArchit Taneja  * implements ecc->write_oob()
2074c76b78d8SArchit Taneja  *
2075c76b78d8SArchit Taneja  * the NAND controller cannot write only data or only oob within a codeword,
2076c76b78d8SArchit Taneja  * since ecc is calculated for the combined codeword. we first copy the
2077c76b78d8SArchit Taneja  * entire contents for the last codeword(data + oob), replace the old oob
2078c76b78d8SArchit Taneja  * with the new one in chip->oob_poi, and then write the entire codeword.
2079c76b78d8SArchit Taneja  * this read-copy-write operation results in a slight performance loss.
2080c76b78d8SArchit Taneja  */
2081c76b78d8SArchit Taneja static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
2082c76b78d8SArchit Taneja 				int page)
2083c76b78d8SArchit Taneja {
2084c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2085c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2086c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2087c76b78d8SArchit Taneja 	u8 *oob = chip->oob_poi;
2088c76b78d8SArchit Taneja 	int data_size, oob_size;
208997d90da8SBoris Brezillon 	int ret;
2090c76b78d8SArchit Taneja 
2091c76b78d8SArchit Taneja 	host->use_ecc = true;
2092c76b78d8SArchit Taneja 
20934e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
2094c76b78d8SArchit Taneja 	ret = copy_last_cw(host, page);
2095c76b78d8SArchit Taneja 	if (ret)
2096c76b78d8SArchit Taneja 		return ret;
2097c76b78d8SArchit Taneja 
2098c76b78d8SArchit Taneja 	clear_read_regs(nandc);
20994e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
2100c76b78d8SArchit Taneja 
2101c76b78d8SArchit Taneja 	/* calculate the data and oob size for the last codeword/step */
2102c76b78d8SArchit Taneja 	data_size = ecc->size - ((ecc->steps - 1) << 2);
2103aa02fcf5SBoris Brezillon 	oob_size = mtd->oobavail;
2104c76b78d8SArchit Taneja 
2105c76b78d8SArchit Taneja 	/* override new oob content to last codeword */
2106aa02fcf5SBoris Brezillon 	mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2107aa02fcf5SBoris Brezillon 				    0, mtd->oobavail);
2108c76b78d8SArchit Taneja 
2109c76b78d8SArchit Taneja 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2110c76b78d8SArchit Taneja 	update_rw_regs(host, 1, false);
2111c76b78d8SArchit Taneja 
211277cc5364SAbhishek Sahu 	config_nand_page_write(nandc);
211367e830aeSAbhishek Sahu 	write_data_dma(nandc, FLASH_BUF_ACC,
211467e830aeSAbhishek Sahu 		       nandc->data_buffer, data_size + oob_size, 0);
211577cc5364SAbhishek Sahu 	config_nand_cw_write(nandc);
2116c76b78d8SArchit Taneja 
2117c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
2118c76b78d8SArchit Taneja 
2119c76b78d8SArchit Taneja 	free_descs(nandc);
2120c76b78d8SArchit Taneja 
2121c76b78d8SArchit Taneja 	if (ret) {
2122c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to write oob\n");
2123c76b78d8SArchit Taneja 		return -EIO;
2124c76b78d8SArchit Taneja 	}
2125c76b78d8SArchit Taneja 
212697d90da8SBoris Brezillon 	return nand_prog_page_end_op(chip);
2127c76b78d8SArchit Taneja }
2128c76b78d8SArchit Taneja 
2129c76b78d8SArchit Taneja static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2130c76b78d8SArchit Taneja {
2131c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
2132c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2133c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2134c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2135c76b78d8SArchit Taneja 	int page, ret, bbpos, bad = 0;
2136c76b78d8SArchit Taneja 	u32 flash_status;
2137c76b78d8SArchit Taneja 
2138c76b78d8SArchit Taneja 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2139c76b78d8SArchit Taneja 
2140c76b78d8SArchit Taneja 	/*
2141c76b78d8SArchit Taneja 	 * configure registers for a raw sub page read, the address is set to
2142c76b78d8SArchit Taneja 	 * the beginning of the last codeword, we don't care about reading ecc
2143c76b78d8SArchit Taneja 	 * portion of oob. we just want the first few bytes from this codeword
2144c76b78d8SArchit Taneja 	 * that contains the BBM
2145c76b78d8SArchit Taneja 	 */
2146c76b78d8SArchit Taneja 	host->use_ecc = false;
2147c76b78d8SArchit Taneja 
21484e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
2149c76b78d8SArchit Taneja 	ret = copy_last_cw(host, page);
2150c76b78d8SArchit Taneja 	if (ret)
2151c76b78d8SArchit Taneja 		goto err;
2152c76b78d8SArchit Taneja 
2153c76b78d8SArchit Taneja 	flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
2154c76b78d8SArchit Taneja 
2155c76b78d8SArchit Taneja 	if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
2156c76b78d8SArchit Taneja 		dev_warn(nandc->dev, "error when trying to read BBM\n");
2157c76b78d8SArchit Taneja 		goto err;
2158c76b78d8SArchit Taneja 	}
2159c76b78d8SArchit Taneja 
2160c76b78d8SArchit Taneja 	bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2161c76b78d8SArchit Taneja 
2162c76b78d8SArchit Taneja 	bad = nandc->data_buffer[bbpos] != 0xff;
2163c76b78d8SArchit Taneja 
2164c76b78d8SArchit Taneja 	if (chip->options & NAND_BUSWIDTH_16)
2165c76b78d8SArchit Taneja 		bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2166c76b78d8SArchit Taneja err:
2167c76b78d8SArchit Taneja 	return bad;
2168c76b78d8SArchit Taneja }
2169c76b78d8SArchit Taneja 
2170c76b78d8SArchit Taneja static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2171c76b78d8SArchit Taneja {
2172c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
2173c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2174c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2175c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
217697d90da8SBoris Brezillon 	int page, ret;
2177c76b78d8SArchit Taneja 
2178c76b78d8SArchit Taneja 	clear_read_regs(nandc);
21794e2f6c52SAbhishek Sahu 	clear_bam_transaction(nandc);
2180c76b78d8SArchit Taneja 
2181c76b78d8SArchit Taneja 	/*
2182c76b78d8SArchit Taneja 	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2183c76b78d8SArchit Taneja 	 * we don't care about the rest of the content in the codeword since
2184c76b78d8SArchit Taneja 	 * we aren't going to use this block again
2185c76b78d8SArchit Taneja 	 */
2186c76b78d8SArchit Taneja 	memset(nandc->data_buffer, 0x00, host->cw_size);
2187c76b78d8SArchit Taneja 
2188c76b78d8SArchit Taneja 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2189c76b78d8SArchit Taneja 
2190c76b78d8SArchit Taneja 	/* prepare write */
2191c76b78d8SArchit Taneja 	host->use_ecc = false;
2192c76b78d8SArchit Taneja 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2193c76b78d8SArchit Taneja 	update_rw_regs(host, 1, false);
2194c76b78d8SArchit Taneja 
219577cc5364SAbhishek Sahu 	config_nand_page_write(nandc);
219667e830aeSAbhishek Sahu 	write_data_dma(nandc, FLASH_BUF_ACC,
219767e830aeSAbhishek Sahu 		       nandc->data_buffer, host->cw_size, 0);
219877cc5364SAbhishek Sahu 	config_nand_cw_write(nandc);
2199c76b78d8SArchit Taneja 
2200c76b78d8SArchit Taneja 	ret = submit_descs(nandc);
2201c76b78d8SArchit Taneja 
2202c76b78d8SArchit Taneja 	free_descs(nandc);
2203c76b78d8SArchit Taneja 
2204c76b78d8SArchit Taneja 	if (ret) {
2205c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failure to update BBM\n");
2206c76b78d8SArchit Taneja 		return -EIO;
2207c76b78d8SArchit Taneja 	}
2208c76b78d8SArchit Taneja 
220997d90da8SBoris Brezillon 	return nand_prog_page_end_op(chip);
2210c76b78d8SArchit Taneja }
2211c76b78d8SArchit Taneja 
2212c76b78d8SArchit Taneja /*
2213c76b78d8SArchit Taneja  * the three functions below implement chip->read_byte(), chip->read_buf()
2214c76b78d8SArchit Taneja  * and chip->write_buf() respectively. these aren't used for
2215c76b78d8SArchit Taneja  * reading/writing page data, they are used for smaller data like reading
2216c76b78d8SArchit Taneja  * id, status etc
2217c76b78d8SArchit Taneja  */
2218c76b78d8SArchit Taneja static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2219c76b78d8SArchit Taneja {
2220c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
2221c76b78d8SArchit Taneja 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2222c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2223c76b78d8SArchit Taneja 	u8 *buf = nandc->data_buffer;
2224c76b78d8SArchit Taneja 	u8 ret = 0x0;
2225c76b78d8SArchit Taneja 
2226c76b78d8SArchit Taneja 	if (host->last_command == NAND_CMD_STATUS) {
2227c76b78d8SArchit Taneja 		ret = host->status;
2228c76b78d8SArchit Taneja 
2229c76b78d8SArchit Taneja 		host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2230c76b78d8SArchit Taneja 
2231c76b78d8SArchit Taneja 		return ret;
2232c76b78d8SArchit Taneja 	}
2233c76b78d8SArchit Taneja 
2234c76b78d8SArchit Taneja 	if (nandc->buf_start < nandc->buf_count)
2235c76b78d8SArchit Taneja 		ret = buf[nandc->buf_start++];
2236c76b78d8SArchit Taneja 
2237c76b78d8SArchit Taneja 	return ret;
2238c76b78d8SArchit Taneja }
2239c76b78d8SArchit Taneja 
2240c76b78d8SArchit Taneja static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2241c76b78d8SArchit Taneja {
2242c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
2243c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2244c76b78d8SArchit Taneja 	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2245c76b78d8SArchit Taneja 
2246c76b78d8SArchit Taneja 	memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2247c76b78d8SArchit Taneja 	nandc->buf_start += real_len;
2248c76b78d8SArchit Taneja }
2249c76b78d8SArchit Taneja 
2250c76b78d8SArchit Taneja static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2251c76b78d8SArchit Taneja 				 int len)
2252c76b78d8SArchit Taneja {
2253c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
2254c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2255c76b78d8SArchit Taneja 	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2256c76b78d8SArchit Taneja 
2257c76b78d8SArchit Taneja 	memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2258c76b78d8SArchit Taneja 
2259c76b78d8SArchit Taneja 	nandc->buf_start += real_len;
2260c76b78d8SArchit Taneja }
2261c76b78d8SArchit Taneja 
2262c76b78d8SArchit Taneja /* we support only one external chip for now */
2263c76b78d8SArchit Taneja static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2264c76b78d8SArchit Taneja {
2265c76b78d8SArchit Taneja 	struct nand_chip *chip = mtd_to_nand(mtd);
2266c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2267c76b78d8SArchit Taneja 
2268c76b78d8SArchit Taneja 	if (chipnr <= 0)
2269c76b78d8SArchit Taneja 		return;
2270c76b78d8SArchit Taneja 
2271c76b78d8SArchit Taneja 	dev_warn(nandc->dev, "invalid chip select\n");
2272c76b78d8SArchit Taneja }
2273c76b78d8SArchit Taneja 
2274c76b78d8SArchit Taneja /*
2275c76b78d8SArchit Taneja  * NAND controller page layout info
2276c76b78d8SArchit Taneja  *
2277c76b78d8SArchit Taneja  * Layout with ECC enabled:
2278c76b78d8SArchit Taneja  *
2279c76b78d8SArchit Taneja  * |----------------------|  |---------------------------------|
2280c76b78d8SArchit Taneja  * |           xx.......yy|  |             *********xx.......yy|
2281c76b78d8SArchit Taneja  * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2282c76b78d8SArchit Taneja  * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2283c76b78d8SArchit Taneja  * |           xx.......yy|  |             *********xx.......yy|
2284c76b78d8SArchit Taneja  * |----------------------|  |---------------------------------|
2285c76b78d8SArchit Taneja  *     codeword 1,2..n-1                  codeword n
2286c76b78d8SArchit Taneja  *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2287c76b78d8SArchit Taneja  *
2288c76b78d8SArchit Taneja  * n = Number of codewords in the page
2289c76b78d8SArchit Taneja  * . = ECC bytes
2290c76b78d8SArchit Taneja  * * = Spare/free bytes
2291c76b78d8SArchit Taneja  * x = Unused byte(s)
2292c76b78d8SArchit Taneja  * y = Reserved byte(s)
2293c76b78d8SArchit Taneja  *
2294c76b78d8SArchit Taneja  * 2K page: n = 4, spare = 16 bytes
2295c76b78d8SArchit Taneja  * 4K page: n = 8, spare = 32 bytes
2296c76b78d8SArchit Taneja  * 8K page: n = 16, spare = 64 bytes
2297c76b78d8SArchit Taneja  *
2298c76b78d8SArchit Taneja  * the qcom nand controller operates at a sub page/codeword level. each
2299c76b78d8SArchit Taneja  * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2300c76b78d8SArchit Taneja  * the number of ECC bytes vary based on the ECC strength and the bus width.
2301c76b78d8SArchit Taneja  *
2302c76b78d8SArchit Taneja  * the first n - 1 codewords contains 516 bytes of user data, the remaining
2303c76b78d8SArchit Taneja  * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2304c76b78d8SArchit Taneja  * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2305c76b78d8SArchit Taneja  *
2306c76b78d8SArchit Taneja  * When we access a page with ECC enabled, the reserved bytes(s) are not
2307c76b78d8SArchit Taneja  * accessible at all. When reading, we fill up these unreadable positions
2308c76b78d8SArchit Taneja  * with 0xffs. When writing, the controller skips writing the inaccessible
2309c76b78d8SArchit Taneja  * bytes.
2310c76b78d8SArchit Taneja  *
2311c76b78d8SArchit Taneja  * Layout with ECC disabled:
2312c76b78d8SArchit Taneja  *
2313c76b78d8SArchit Taneja  * |------------------------------|  |---------------------------------------|
2314c76b78d8SArchit Taneja  * |         yy          xx.......|  |         bb          *********xx.......|
2315c76b78d8SArchit Taneja  * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2316c76b78d8SArchit Taneja  * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2317c76b78d8SArchit Taneja  * |         yy          xx.......|  |         bb          *********xx.......|
2318c76b78d8SArchit Taneja  * |------------------------------|  |---------------------------------------|
2319c76b78d8SArchit Taneja  *         codeword 1,2..n-1                        codeword n
2320c76b78d8SArchit Taneja  *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2321c76b78d8SArchit Taneja  *
2322c76b78d8SArchit Taneja  * n = Number of codewords in the page
2323c76b78d8SArchit Taneja  * . = ECC bytes
2324c76b78d8SArchit Taneja  * * = Spare/free bytes
2325c76b78d8SArchit Taneja  * x = Unused byte(s)
2326c76b78d8SArchit Taneja  * y = Dummy Bad Bock byte(s)
2327c76b78d8SArchit Taneja  * b = Real Bad Block byte(s)
2328c76b78d8SArchit Taneja  * size1/size2 = function of codeword size and 'n'
2329c76b78d8SArchit Taneja  *
2330c76b78d8SArchit Taneja  * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2331c76b78d8SArchit Taneja  * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2332c76b78d8SArchit Taneja  * Block Markers. In the last codeword, this position contains the real BBM
2333c76b78d8SArchit Taneja  *
2334c76b78d8SArchit Taneja  * In order to have a consistent layout between RAW and ECC modes, we assume
2335c76b78d8SArchit Taneja  * the following OOB layout arrangement:
2336c76b78d8SArchit Taneja  *
2337c76b78d8SArchit Taneja  * |-----------|  |--------------------|
2338c76b78d8SArchit Taneja  * |yyxx.......|  |bb*********xx.......|
2339c76b78d8SArchit Taneja  * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2340c76b78d8SArchit Taneja  * |yyxx.......|  |bb*********xx.......|
2341c76b78d8SArchit Taneja  * |yyxx.......|  |bb*********xx.......|
2342c76b78d8SArchit Taneja  * |-----------|  |--------------------|
2343c76b78d8SArchit Taneja  *  first n - 1       nth OOB region
2344c76b78d8SArchit Taneja  *  OOB regions
2345c76b78d8SArchit Taneja  *
2346c76b78d8SArchit Taneja  * n = Number of codewords in the page
2347c76b78d8SArchit Taneja  * . = ECC bytes
2348c76b78d8SArchit Taneja  * * = FREE OOB bytes
2349c76b78d8SArchit Taneja  * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2350c76b78d8SArchit Taneja  * x = Unused byte(s)
2351c76b78d8SArchit Taneja  * b = Real bad block byte(s) (inaccessible when ECC enabled)
2352c76b78d8SArchit Taneja  *
2353c76b78d8SArchit Taneja  * This layout is read as is when ECC is disabled. When ECC is enabled, the
2354c76b78d8SArchit Taneja  * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2355c76b78d8SArchit Taneja  * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2356421e81c4SBoris Brezillon  * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2357421e81c4SBoris Brezillon  * the sum of the three).
2358c76b78d8SArchit Taneja  */
2359421e81c4SBoris Brezillon static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2360421e81c4SBoris Brezillon 				   struct mtd_oob_region *oobregion)
2361c76b78d8SArchit Taneja {
2362421e81c4SBoris Brezillon 	struct nand_chip *chip = mtd_to_nand(mtd);
2363421e81c4SBoris Brezillon 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2364c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2365c76b78d8SArchit Taneja 
2366421e81c4SBoris Brezillon 	if (section > 1)
2367421e81c4SBoris Brezillon 		return -ERANGE;
2368c76b78d8SArchit Taneja 
2369421e81c4SBoris Brezillon 	if (!section) {
2370421e81c4SBoris Brezillon 		oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2371421e81c4SBoris Brezillon 				    host->bbm_size;
2372421e81c4SBoris Brezillon 		oobregion->offset = 0;
2373421e81c4SBoris Brezillon 	} else {
2374421e81c4SBoris Brezillon 		oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2375421e81c4SBoris Brezillon 		oobregion->offset = mtd->oobsize - oobregion->length;
2376c76b78d8SArchit Taneja 	}
2377c76b78d8SArchit Taneja 
2378421e81c4SBoris Brezillon 	return 0;
2379c76b78d8SArchit Taneja }
2380c76b78d8SArchit Taneja 
2381421e81c4SBoris Brezillon static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2382421e81c4SBoris Brezillon 				     struct mtd_oob_region *oobregion)
2383421e81c4SBoris Brezillon {
2384421e81c4SBoris Brezillon 	struct nand_chip *chip = mtd_to_nand(mtd);
2385421e81c4SBoris Brezillon 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2386421e81c4SBoris Brezillon 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2387421e81c4SBoris Brezillon 
2388421e81c4SBoris Brezillon 	if (section)
2389421e81c4SBoris Brezillon 		return -ERANGE;
2390421e81c4SBoris Brezillon 
2391421e81c4SBoris Brezillon 	oobregion->length = ecc->steps * 4;
2392421e81c4SBoris Brezillon 	oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2393421e81c4SBoris Brezillon 
2394421e81c4SBoris Brezillon 	return 0;
2395421e81c4SBoris Brezillon }
2396421e81c4SBoris Brezillon 
2397421e81c4SBoris Brezillon static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2398421e81c4SBoris Brezillon 	.ecc = qcom_nand_ooblayout_ecc,
2399421e81c4SBoris Brezillon 	.free = qcom_nand_ooblayout_free,
2400421e81c4SBoris Brezillon };
2401421e81c4SBoris Brezillon 
24027ddb937fSAbhishek Sahu static int
24037ddb937fSAbhishek Sahu qcom_nandc_calc_ecc_bytes(int step_size, int strength)
24047ddb937fSAbhishek Sahu {
24057ddb937fSAbhishek Sahu 	return strength == 4 ? 12 : 16;
24067ddb937fSAbhishek Sahu }
24077ddb937fSAbhishek Sahu NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
24087ddb937fSAbhishek Sahu 		     NANDC_STEP_SIZE, 4, 8);
24097ddb937fSAbhishek Sahu 
2410c76b78d8SArchit Taneja static int qcom_nand_host_setup(struct qcom_nand_host *host)
2411c76b78d8SArchit Taneja {
2412c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
2413c76b78d8SArchit Taneja 	struct mtd_info *mtd = nand_to_mtd(chip);
2414c76b78d8SArchit Taneja 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2415c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
24167ddb937fSAbhishek Sahu 	int cwperpage, bad_block_byte, ret;
2417c76b78d8SArchit Taneja 	bool wide_bus;
2418c76b78d8SArchit Taneja 	int ecc_mode = 1;
2419c76b78d8SArchit Taneja 
2420320bdb5fSAbhishek Sahu 	/* controller only supports 512 bytes data steps */
2421320bdb5fSAbhishek Sahu 	ecc->size = NANDC_STEP_SIZE;
2422c76b78d8SArchit Taneja 	wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
24237ddb937fSAbhishek Sahu 	cwperpage = mtd->writesize / NANDC_STEP_SIZE;
24247ddb937fSAbhishek Sahu 
24257ddb937fSAbhishek Sahu 	/*
24267ddb937fSAbhishek Sahu 	 * Each CW has 4 available OOB bytes which will be protected with ECC
24277ddb937fSAbhishek Sahu 	 * so remaining bytes can be used for ECC.
24287ddb937fSAbhishek Sahu 	 */
24297ddb937fSAbhishek Sahu 	ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
24307ddb937fSAbhishek Sahu 				   mtd->oobsize - (cwperpage * 4));
24317ddb937fSAbhishek Sahu 	if (ret) {
24327ddb937fSAbhishek Sahu 		dev_err(nandc->dev, "No valid ECC settings possible\n");
24337ddb937fSAbhishek Sahu 		return ret;
24347ddb937fSAbhishek Sahu 	}
2435c76b78d8SArchit Taneja 
2436c76b78d8SArchit Taneja 	if (ecc->strength >= 8) {
2437c76b78d8SArchit Taneja 		/* 8 bit ECC defaults to BCH ECC on all platforms */
2438c76b78d8SArchit Taneja 		host->bch_enabled = true;
2439c76b78d8SArchit Taneja 		ecc_mode = 1;
2440c76b78d8SArchit Taneja 
2441c76b78d8SArchit Taneja 		if (wide_bus) {
2442c76b78d8SArchit Taneja 			host->ecc_bytes_hw = 14;
2443c76b78d8SArchit Taneja 			host->spare_bytes = 0;
2444c76b78d8SArchit Taneja 			host->bbm_size = 2;
2445c76b78d8SArchit Taneja 		} else {
2446c76b78d8SArchit Taneja 			host->ecc_bytes_hw = 13;
2447c76b78d8SArchit Taneja 			host->spare_bytes = 2;
2448c76b78d8SArchit Taneja 			host->bbm_size = 1;
2449c76b78d8SArchit Taneja 		}
2450c76b78d8SArchit Taneja 	} else {
2451c76b78d8SArchit Taneja 		/*
2452c76b78d8SArchit Taneja 		 * if the controller supports BCH for 4 bit ECC, the controller
2453c76b78d8SArchit Taneja 		 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2454c76b78d8SArchit Taneja 		 * always 10 bytes
2455c76b78d8SArchit Taneja 		 */
245658f1f22aSAbhishek Sahu 		if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2457c76b78d8SArchit Taneja 			/* BCH */
2458c76b78d8SArchit Taneja 			host->bch_enabled = true;
2459c76b78d8SArchit Taneja 			ecc_mode = 0;
2460c76b78d8SArchit Taneja 
2461c76b78d8SArchit Taneja 			if (wide_bus) {
2462c76b78d8SArchit Taneja 				host->ecc_bytes_hw = 8;
2463c76b78d8SArchit Taneja 				host->spare_bytes = 2;
2464c76b78d8SArchit Taneja 				host->bbm_size = 2;
2465c76b78d8SArchit Taneja 			} else {
2466c76b78d8SArchit Taneja 				host->ecc_bytes_hw = 7;
2467c76b78d8SArchit Taneja 				host->spare_bytes = 4;
2468c76b78d8SArchit Taneja 				host->bbm_size = 1;
2469c76b78d8SArchit Taneja 			}
2470c76b78d8SArchit Taneja 		} else {
2471c76b78d8SArchit Taneja 			/* RS */
2472c76b78d8SArchit Taneja 			host->ecc_bytes_hw = 10;
2473c76b78d8SArchit Taneja 
2474c76b78d8SArchit Taneja 			if (wide_bus) {
2475c76b78d8SArchit Taneja 				host->spare_bytes = 0;
2476c76b78d8SArchit Taneja 				host->bbm_size = 2;
2477c76b78d8SArchit Taneja 			} else {
2478c76b78d8SArchit Taneja 				host->spare_bytes = 1;
2479c76b78d8SArchit Taneja 				host->bbm_size = 1;
2480c76b78d8SArchit Taneja 			}
2481c76b78d8SArchit Taneja 		}
2482c76b78d8SArchit Taneja 	}
2483c76b78d8SArchit Taneja 
2484c76b78d8SArchit Taneja 	/*
2485c76b78d8SArchit Taneja 	 * we consider ecc->bytes as the sum of all the non-data content in a
2486c76b78d8SArchit Taneja 	 * step. It gives us a clean representation of the oob area (even if
2487c76b78d8SArchit Taneja 	 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2488c76b78d8SArchit Taneja 	 * ECC and 12 bytes for 4 bit ECC
2489c76b78d8SArchit Taneja 	 */
2490c76b78d8SArchit Taneja 	ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2491c76b78d8SArchit Taneja 
2492c76b78d8SArchit Taneja 	ecc->read_page		= qcom_nandc_read_page;
2493c76b78d8SArchit Taneja 	ecc->read_page_raw	= qcom_nandc_read_page_raw;
2494c76b78d8SArchit Taneja 	ecc->read_oob		= qcom_nandc_read_oob;
2495c76b78d8SArchit Taneja 	ecc->write_page		= qcom_nandc_write_page;
2496c76b78d8SArchit Taneja 	ecc->write_page_raw	= qcom_nandc_write_page_raw;
2497c76b78d8SArchit Taneja 	ecc->write_oob		= qcom_nandc_write_oob;
2498c76b78d8SArchit Taneja 
2499c76b78d8SArchit Taneja 	ecc->mode = NAND_ECC_HW;
2500c76b78d8SArchit Taneja 
2501421e81c4SBoris Brezillon 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2502c76b78d8SArchit Taneja 
2503cb80f114SAbhishek Sahu 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2504cb80f114SAbhishek Sahu 				     cwperpage);
2505c76b78d8SArchit Taneja 
2506c76b78d8SArchit Taneja 	/*
2507c76b78d8SArchit Taneja 	 * DATA_UD_BYTES varies based on whether the read/write command protects
2508c76b78d8SArchit Taneja 	 * spare data with ECC too. We protect spare data by default, so we set
2509c76b78d8SArchit Taneja 	 * it to main + spare data, which are 512 and 4 bytes respectively.
2510c76b78d8SArchit Taneja 	 */
2511c76b78d8SArchit Taneja 	host->cw_data = 516;
2512c76b78d8SArchit Taneja 
2513c76b78d8SArchit Taneja 	/*
2514c76b78d8SArchit Taneja 	 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2515c76b78d8SArchit Taneja 	 * for 8 bit ECC
2516c76b78d8SArchit Taneja 	 */
2517c76b78d8SArchit Taneja 	host->cw_size = host->cw_data + ecc->bytes;
2518c76b78d8SArchit Taneja 	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2519c76b78d8SArchit Taneja 
2520c76b78d8SArchit Taneja 	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2521c76b78d8SArchit Taneja 				| host->cw_data << UD_SIZE_BYTES
2522c76b78d8SArchit Taneja 				| 0 << DISABLE_STATUS_AFTER_WRITE
2523c76b78d8SArchit Taneja 				| 5 << NUM_ADDR_CYCLES
2524c76b78d8SArchit Taneja 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2525c76b78d8SArchit Taneja 				| 0 << STATUS_BFR_READ
2526c76b78d8SArchit Taneja 				| 1 << SET_RD_MODE_AFTER_STATUS
2527c76b78d8SArchit Taneja 				| host->spare_bytes << SPARE_SIZE_BYTES;
2528c76b78d8SArchit Taneja 
2529c76b78d8SArchit Taneja 	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2530c76b78d8SArchit Taneja 				| 0 <<  CS_ACTIVE_BSY
2531c76b78d8SArchit Taneja 				| bad_block_byte << BAD_BLOCK_BYTE_NUM
2532c76b78d8SArchit Taneja 				| 0 << BAD_BLOCK_IN_SPARE_AREA
2533c76b78d8SArchit Taneja 				| 2 << WR_RD_BSY_GAP
2534c76b78d8SArchit Taneja 				| wide_bus << WIDE_FLASH
2535c76b78d8SArchit Taneja 				| host->bch_enabled << ENABLE_BCH_ECC;
2536c76b78d8SArchit Taneja 
2537c76b78d8SArchit Taneja 	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2538c76b78d8SArchit Taneja 				| host->cw_size << UD_SIZE_BYTES
2539c76b78d8SArchit Taneja 				| 5 << NUM_ADDR_CYCLES
2540c76b78d8SArchit Taneja 				| 0 << SPARE_SIZE_BYTES;
2541c76b78d8SArchit Taneja 
2542c76b78d8SArchit Taneja 	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2543c76b78d8SArchit Taneja 				| 0 << CS_ACTIVE_BSY
2544c76b78d8SArchit Taneja 				| 17 << BAD_BLOCK_BYTE_NUM
2545c76b78d8SArchit Taneja 				| 1 << BAD_BLOCK_IN_SPARE_AREA
2546c76b78d8SArchit Taneja 				| 2 << WR_RD_BSY_GAP
2547c76b78d8SArchit Taneja 				| wide_bus << WIDE_FLASH
2548c76b78d8SArchit Taneja 				| 1 << DEV0_CFG1_ECC_DISABLE;
2549c76b78d8SArchit Taneja 
255010777de5SAbhishek Sahu 	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2551c76b78d8SArchit Taneja 				| 0 << ECC_SW_RESET
2552c76b78d8SArchit Taneja 				| host->cw_data << ECC_NUM_DATA_BYTES
2553c76b78d8SArchit Taneja 				| 1 << ECC_FORCE_CLK_OPEN
2554c76b78d8SArchit Taneja 				| ecc_mode << ECC_MODE
2555c76b78d8SArchit Taneja 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2556c76b78d8SArchit Taneja 
2557c76b78d8SArchit Taneja 	host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2558c76b78d8SArchit Taneja 
2559c76b78d8SArchit Taneja 	host->clrflashstatus = FS_READY_BSY_N;
2560c76b78d8SArchit Taneja 	host->clrreadstatus = 0xc0;
2561a86b9c4fSAbhishek Sahu 	nandc->regs->erased_cw_detect_cfg_clr =
2562a86b9c4fSAbhishek Sahu 		cpu_to_le32(CLR_ERASED_PAGE_DET);
2563a86b9c4fSAbhishek Sahu 	nandc->regs->erased_cw_detect_cfg_set =
2564a86b9c4fSAbhishek Sahu 		cpu_to_le32(SET_ERASED_PAGE_DET);
2565c76b78d8SArchit Taneja 
2566c76b78d8SArchit Taneja 	dev_dbg(nandc->dev,
2567c76b78d8SArchit Taneja 		"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2568c76b78d8SArchit Taneja 		host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2569c76b78d8SArchit Taneja 		host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2570c76b78d8SArchit Taneja 		cwperpage);
2571c76b78d8SArchit Taneja 
2572c76b78d8SArchit Taneja 	return 0;
2573c76b78d8SArchit Taneja }
2574c76b78d8SArchit Taneja 
2575c76b78d8SArchit Taneja static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2576c76b78d8SArchit Taneja {
2577c76b78d8SArchit Taneja 	int ret;
2578c76b78d8SArchit Taneja 
2579c76b78d8SArchit Taneja 	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2580c76b78d8SArchit Taneja 	if (ret) {
2581c76b78d8SArchit Taneja 		dev_err(nandc->dev, "failed to set DMA mask\n");
2582c76b78d8SArchit Taneja 		return ret;
2583c76b78d8SArchit Taneja 	}
2584c76b78d8SArchit Taneja 
2585c76b78d8SArchit Taneja 	/*
2586c76b78d8SArchit Taneja 	 * we use the internal buffer for reading ONFI params, reading small
2587c76b78d8SArchit Taneja 	 * data like ID and status, and preforming read-copy-write operations
2588c76b78d8SArchit Taneja 	 * when writing to a codeword partially. 532 is the maximum possible
2589c76b78d8SArchit Taneja 	 * size of a codeword for our nand controller
2590c76b78d8SArchit Taneja 	 */
2591c76b78d8SArchit Taneja 	nandc->buf_size = 532;
2592c76b78d8SArchit Taneja 
2593c76b78d8SArchit Taneja 	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2594c76b78d8SArchit Taneja 					GFP_KERNEL);
2595c76b78d8SArchit Taneja 	if (!nandc->data_buffer)
2596c76b78d8SArchit Taneja 		return -ENOMEM;
2597c76b78d8SArchit Taneja 
2598c76b78d8SArchit Taneja 	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2599c76b78d8SArchit Taneja 					GFP_KERNEL);
2600c76b78d8SArchit Taneja 	if (!nandc->regs)
2601c76b78d8SArchit Taneja 		return -ENOMEM;
2602c76b78d8SArchit Taneja 
2603a86854d0SKees Cook 	nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2604a86854d0SKees Cook 				MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2605c76b78d8SArchit Taneja 				GFP_KERNEL);
2606c76b78d8SArchit Taneja 	if (!nandc->reg_read_buf)
2607c76b78d8SArchit Taneja 		return -ENOMEM;
2608c76b78d8SArchit Taneja 
2609497d7d85SAbhishek Sahu 	if (nandc->props->is_bam) {
26106192ff7aSAbhishek Sahu 		nandc->reg_read_dma =
26116192ff7aSAbhishek Sahu 			dma_map_single(nandc->dev, nandc->reg_read_buf,
26126192ff7aSAbhishek Sahu 				       MAX_REG_RD *
26136192ff7aSAbhishek Sahu 				       sizeof(*nandc->reg_read_buf),
26146192ff7aSAbhishek Sahu 				       DMA_FROM_DEVICE);
26156192ff7aSAbhishek Sahu 		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
26166192ff7aSAbhishek Sahu 			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
26176192ff7aSAbhishek Sahu 			return -EIO;
26186192ff7aSAbhishek Sahu 		}
26196192ff7aSAbhishek Sahu 
2620497d7d85SAbhishek Sahu 		nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2621497d7d85SAbhishek Sahu 		if (!nandc->tx_chan) {
2622497d7d85SAbhishek Sahu 			dev_err(nandc->dev, "failed to request tx channel\n");
2623497d7d85SAbhishek Sahu 			return -ENODEV;
2624497d7d85SAbhishek Sahu 		}
2625497d7d85SAbhishek Sahu 
2626497d7d85SAbhishek Sahu 		nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2627497d7d85SAbhishek Sahu 		if (!nandc->rx_chan) {
2628497d7d85SAbhishek Sahu 			dev_err(nandc->dev, "failed to request rx channel\n");
2629497d7d85SAbhishek Sahu 			return -ENODEV;
2630497d7d85SAbhishek Sahu 		}
2631497d7d85SAbhishek Sahu 
2632497d7d85SAbhishek Sahu 		nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2633497d7d85SAbhishek Sahu 		if (!nandc->cmd_chan) {
2634497d7d85SAbhishek Sahu 			dev_err(nandc->dev, "failed to request cmd channel\n");
2635497d7d85SAbhishek Sahu 			return -ENODEV;
2636497d7d85SAbhishek Sahu 		}
2637cb80f114SAbhishek Sahu 
2638cb80f114SAbhishek Sahu 		/*
2639cb80f114SAbhishek Sahu 		 * Initially allocate BAM transaction to read ONFI param page.
2640cb80f114SAbhishek Sahu 		 * After detecting all the devices, this BAM transaction will
2641cb80f114SAbhishek Sahu 		 * be freed and the next BAM tranasction will be allocated with
2642cb80f114SAbhishek Sahu 		 * maximum codeword size
2643cb80f114SAbhishek Sahu 		 */
2644cb80f114SAbhishek Sahu 		nandc->max_cwperpage = 1;
2645cb80f114SAbhishek Sahu 		nandc->bam_txn = alloc_bam_transaction(nandc);
2646cb80f114SAbhishek Sahu 		if (!nandc->bam_txn) {
2647cb80f114SAbhishek Sahu 			dev_err(nandc->dev,
2648cb80f114SAbhishek Sahu 				"failed to allocate bam transaction\n");
2649cb80f114SAbhishek Sahu 			return -ENOMEM;
2650cb80f114SAbhishek Sahu 		}
2651497d7d85SAbhishek Sahu 	} else {
2652c76b78d8SArchit Taneja 		nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2653c76b78d8SArchit Taneja 		if (!nandc->chan) {
2654497d7d85SAbhishek Sahu 			dev_err(nandc->dev,
2655497d7d85SAbhishek Sahu 				"failed to request slave channel\n");
2656c76b78d8SArchit Taneja 			return -ENODEV;
2657c76b78d8SArchit Taneja 		}
2658497d7d85SAbhishek Sahu 	}
2659c76b78d8SArchit Taneja 
2660c76b78d8SArchit Taneja 	INIT_LIST_HEAD(&nandc->desc_list);
2661c76b78d8SArchit Taneja 	INIT_LIST_HEAD(&nandc->host_list);
2662c76b78d8SArchit Taneja 
2663d45bc58dSMarc Gonzalez 	nand_hw_control_init(&nandc->controller);
2664c76b78d8SArchit Taneja 
2665c76b78d8SArchit Taneja 	return 0;
2666c76b78d8SArchit Taneja }
2667c76b78d8SArchit Taneja 
2668c76b78d8SArchit Taneja static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2669c76b78d8SArchit Taneja {
2670497d7d85SAbhishek Sahu 	if (nandc->props->is_bam) {
26716192ff7aSAbhishek Sahu 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
26726192ff7aSAbhishek Sahu 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
26736192ff7aSAbhishek Sahu 					 MAX_REG_RD *
26746192ff7aSAbhishek Sahu 					 sizeof(*nandc->reg_read_buf),
26756192ff7aSAbhishek Sahu 					 DMA_FROM_DEVICE);
26766192ff7aSAbhishek Sahu 
2677497d7d85SAbhishek Sahu 		if (nandc->tx_chan)
2678497d7d85SAbhishek Sahu 			dma_release_channel(nandc->tx_chan);
2679497d7d85SAbhishek Sahu 
2680497d7d85SAbhishek Sahu 		if (nandc->rx_chan)
2681497d7d85SAbhishek Sahu 			dma_release_channel(nandc->rx_chan);
2682497d7d85SAbhishek Sahu 
2683497d7d85SAbhishek Sahu 		if (nandc->cmd_chan)
2684497d7d85SAbhishek Sahu 			dma_release_channel(nandc->cmd_chan);
2685497d7d85SAbhishek Sahu 	} else {
2686497d7d85SAbhishek Sahu 		if (nandc->chan)
2687c76b78d8SArchit Taneja 			dma_release_channel(nandc->chan);
2688c76b78d8SArchit Taneja 	}
2689497d7d85SAbhishek Sahu }
2690c76b78d8SArchit Taneja 
2691c76b78d8SArchit Taneja /* one time setup of a few nand controller registers */
2692c76b78d8SArchit Taneja static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2693c76b78d8SArchit Taneja {
26949d43f915SAbhishek Sahu 	u32 nand_ctrl;
26959d43f915SAbhishek Sahu 
2696c76b78d8SArchit Taneja 	/* kill onenand */
2697c76b78d8SArchit Taneja 	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2698cc409b9aSAbhishek Sahu 	nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2699cc409b9aSAbhishek Sahu 		    NAND_DEV_CMD_VLD_VAL);
2700c76b78d8SArchit Taneja 
27019d43f915SAbhishek Sahu 	/* enable ADM or BAM DMA */
27029d43f915SAbhishek Sahu 	if (nandc->props->is_bam) {
27039d43f915SAbhishek Sahu 		nand_ctrl = nandc_read(nandc, NAND_CTRL);
27049d43f915SAbhishek Sahu 		nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
27059d43f915SAbhishek Sahu 	} else {
2706c76b78d8SArchit Taneja 		nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
27079d43f915SAbhishek Sahu 	}
2708c76b78d8SArchit Taneja 
2709c76b78d8SArchit Taneja 	/* save the original values of these registers */
2710cc409b9aSAbhishek Sahu 	nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2711d8a9b320SAbhishek Sahu 	nandc->vld = NAND_DEV_CMD_VLD_VAL;
2712c76b78d8SArchit Taneja 
2713c76b78d8SArchit Taneja 	return 0;
2714c76b78d8SArchit Taneja }
2715c76b78d8SArchit Taneja 
2716c76b78d8SArchit Taneja static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2717c76b78d8SArchit Taneja 			       struct qcom_nand_host *host,
2718c76b78d8SArchit Taneja 			       struct device_node *dn)
2719c76b78d8SArchit Taneja {
2720c76b78d8SArchit Taneja 	struct nand_chip *chip = &host->chip;
2721c76b78d8SArchit Taneja 	struct mtd_info *mtd = nand_to_mtd(chip);
2722c76b78d8SArchit Taneja 	struct device *dev = nandc->dev;
2723c76b78d8SArchit Taneja 	int ret;
2724c76b78d8SArchit Taneja 
2725c76b78d8SArchit Taneja 	ret = of_property_read_u32(dn, "reg", &host->cs);
2726c76b78d8SArchit Taneja 	if (ret) {
2727c76b78d8SArchit Taneja 		dev_err(dev, "can't get chip-select\n");
2728c76b78d8SArchit Taneja 		return -ENXIO;
2729c76b78d8SArchit Taneja 	}
2730c76b78d8SArchit Taneja 
2731c76b78d8SArchit Taneja 	nand_set_flash_node(chip, dn);
2732c76b78d8SArchit Taneja 	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2733069f0534SFabio Estevam 	if (!mtd->name)
2734069f0534SFabio Estevam 		return -ENOMEM;
2735069f0534SFabio Estevam 
2736c76b78d8SArchit Taneja 	mtd->owner = THIS_MODULE;
2737c76b78d8SArchit Taneja 	mtd->dev.parent = dev;
2738c76b78d8SArchit Taneja 
2739c76b78d8SArchit Taneja 	chip->cmdfunc		= qcom_nandc_command;
2740c76b78d8SArchit Taneja 	chip->select_chip	= qcom_nandc_select_chip;
2741c76b78d8SArchit Taneja 	chip->read_byte		= qcom_nandc_read_byte;
2742c76b78d8SArchit Taneja 	chip->read_buf		= qcom_nandc_read_buf;
2743c76b78d8SArchit Taneja 	chip->write_buf		= qcom_nandc_write_buf;
2744b958758eSMiquel Raynal 	chip->set_features	= nand_get_set_features_notsupp;
2745b958758eSMiquel Raynal 	chip->get_features	= nand_get_set_features_notsupp;
2746c76b78d8SArchit Taneja 
2747c76b78d8SArchit Taneja 	/*
2748c76b78d8SArchit Taneja 	 * the bad block marker is readable only when we read the last codeword
2749c76b78d8SArchit Taneja 	 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2750c76b78d8SArchit Taneja 	 * helpers don't allow us to read BB from a nand chip with ECC
2751c76b78d8SArchit Taneja 	 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2752c76b78d8SArchit Taneja 	 * and block_markbad helpers until we permanently switch to using
2753c76b78d8SArchit Taneja 	 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2754c76b78d8SArchit Taneja 	 */
2755c76b78d8SArchit Taneja 	chip->block_bad		= qcom_nandc_block_bad;
2756c76b78d8SArchit Taneja 	chip->block_markbad	= qcom_nandc_block_markbad;
2757c76b78d8SArchit Taneja 
2758c76b78d8SArchit Taneja 	chip->controller = &nandc->controller;
2759c76b78d8SArchit Taneja 	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2760c76b78d8SArchit Taneja 			 NAND_SKIP_BBTSCAN;
2761c76b78d8SArchit Taneja 
2762c76b78d8SArchit Taneja 	/* set up initial status value */
2763c76b78d8SArchit Taneja 	host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2764c76b78d8SArchit Taneja 
2765c76b78d8SArchit Taneja 	ret = nand_scan_ident(mtd, 1, NULL);
2766c76b78d8SArchit Taneja 	if (ret)
2767c76b78d8SArchit Taneja 		return ret;
2768c76b78d8SArchit Taneja 
2769c76b78d8SArchit Taneja 	ret = qcom_nand_host_setup(host);
277089f5127cSAbhishek Sahu 
2771c76b78d8SArchit Taneja 	return ret;
277289f5127cSAbhishek Sahu }
277389f5127cSAbhishek Sahu 
277489f5127cSAbhishek Sahu static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
277589f5127cSAbhishek Sahu 				  struct qcom_nand_host *host,
277689f5127cSAbhishek Sahu 				  struct device_node *dn)
277789f5127cSAbhishek Sahu {
277889f5127cSAbhishek Sahu 	struct nand_chip *chip = &host->chip;
277989f5127cSAbhishek Sahu 	struct mtd_info *mtd = nand_to_mtd(chip);
278089f5127cSAbhishek Sahu 	int ret;
2781c76b78d8SArchit Taneja 
2782c76b78d8SArchit Taneja 	ret = nand_scan_tail(mtd);
2783c76b78d8SArchit Taneja 	if (ret)
2784c76b78d8SArchit Taneja 		return ret;
2785c76b78d8SArchit Taneja 
278689f5127cSAbhishek Sahu 	ret = mtd_device_register(mtd, NULL, 0);
278789f5127cSAbhishek Sahu 	if (ret)
278889f5127cSAbhishek Sahu 		nand_cleanup(mtd_to_nand(mtd));
278989f5127cSAbhishek Sahu 
279089f5127cSAbhishek Sahu 	return ret;
279189f5127cSAbhishek Sahu }
279289f5127cSAbhishek Sahu 
279389f5127cSAbhishek Sahu static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
279489f5127cSAbhishek Sahu {
279589f5127cSAbhishek Sahu 	struct device *dev = nandc->dev;
279689f5127cSAbhishek Sahu 	struct device_node *dn = dev->of_node, *child;
279789f5127cSAbhishek Sahu 	struct qcom_nand_host *host, *tmp;
279889f5127cSAbhishek Sahu 	int ret;
279989f5127cSAbhishek Sahu 
280089f5127cSAbhishek Sahu 	for_each_available_child_of_node(dn, child) {
280189f5127cSAbhishek Sahu 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
280289f5127cSAbhishek Sahu 		if (!host) {
280389f5127cSAbhishek Sahu 			of_node_put(child);
280489f5127cSAbhishek Sahu 			return -ENOMEM;
280589f5127cSAbhishek Sahu 		}
280689f5127cSAbhishek Sahu 
280789f5127cSAbhishek Sahu 		ret = qcom_nand_host_init(nandc, host, child);
280889f5127cSAbhishek Sahu 		if (ret) {
280989f5127cSAbhishek Sahu 			devm_kfree(dev, host);
281089f5127cSAbhishek Sahu 			continue;
281189f5127cSAbhishek Sahu 		}
281289f5127cSAbhishek Sahu 
281389f5127cSAbhishek Sahu 		list_add_tail(&host->node, &nandc->host_list);
281489f5127cSAbhishek Sahu 	}
281589f5127cSAbhishek Sahu 
281689f5127cSAbhishek Sahu 	if (list_empty(&nandc->host_list))
281789f5127cSAbhishek Sahu 		return -ENODEV;
281889f5127cSAbhishek Sahu 
2819cb80f114SAbhishek Sahu 	if (nandc->props->is_bam) {
2820cb80f114SAbhishek Sahu 		free_bam_transaction(nandc);
2821cb80f114SAbhishek Sahu 		nandc->bam_txn = alloc_bam_transaction(nandc);
2822cb80f114SAbhishek Sahu 		if (!nandc->bam_txn) {
2823cb80f114SAbhishek Sahu 			dev_err(nandc->dev,
2824cb80f114SAbhishek Sahu 				"failed to allocate bam transaction\n");
2825cb80f114SAbhishek Sahu 			return -ENOMEM;
2826cb80f114SAbhishek Sahu 		}
2827cb80f114SAbhishek Sahu 	}
2828cb80f114SAbhishek Sahu 
282989f5127cSAbhishek Sahu 	list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
283089f5127cSAbhishek Sahu 		ret = qcom_nand_mtd_register(nandc, host, child);
283189f5127cSAbhishek Sahu 		if (ret) {
283289f5127cSAbhishek Sahu 			list_del(&host->node);
283389f5127cSAbhishek Sahu 			devm_kfree(dev, host);
283489f5127cSAbhishek Sahu 		}
283589f5127cSAbhishek Sahu 	}
283689f5127cSAbhishek Sahu 
283789f5127cSAbhishek Sahu 	if (list_empty(&nandc->host_list))
283889f5127cSAbhishek Sahu 		return -ENODEV;
283989f5127cSAbhishek Sahu 
284089f5127cSAbhishek Sahu 	return 0;
2841c76b78d8SArchit Taneja }
2842c76b78d8SArchit Taneja 
2843c76b78d8SArchit Taneja /* parse custom DT properties here */
2844c76b78d8SArchit Taneja static int qcom_nandc_parse_dt(struct platform_device *pdev)
2845c76b78d8SArchit Taneja {
2846c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2847c76b78d8SArchit Taneja 	struct device_node *np = nandc->dev->of_node;
2848c76b78d8SArchit Taneja 	int ret;
2849c76b78d8SArchit Taneja 
2850497d7d85SAbhishek Sahu 	if (!nandc->props->is_bam) {
2851497d7d85SAbhishek Sahu 		ret = of_property_read_u32(np, "qcom,cmd-crci",
2852497d7d85SAbhishek Sahu 					   &nandc->cmd_crci);
2853c76b78d8SArchit Taneja 		if (ret) {
2854c76b78d8SArchit Taneja 			dev_err(nandc->dev, "command CRCI unspecified\n");
2855c76b78d8SArchit Taneja 			return ret;
2856c76b78d8SArchit Taneja 		}
2857c76b78d8SArchit Taneja 
2858497d7d85SAbhishek Sahu 		ret = of_property_read_u32(np, "qcom,data-crci",
2859497d7d85SAbhishek Sahu 					   &nandc->data_crci);
2860c76b78d8SArchit Taneja 		if (ret) {
2861c76b78d8SArchit Taneja 			dev_err(nandc->dev, "data CRCI unspecified\n");
2862c76b78d8SArchit Taneja 			return ret;
2863c76b78d8SArchit Taneja 		}
2864497d7d85SAbhishek Sahu 	}
2865c76b78d8SArchit Taneja 
2866c76b78d8SArchit Taneja 	return 0;
2867c76b78d8SArchit Taneja }
2868c76b78d8SArchit Taneja 
2869c76b78d8SArchit Taneja static int qcom_nandc_probe(struct platform_device *pdev)
2870c76b78d8SArchit Taneja {
2871c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc;
2872c76b78d8SArchit Taneja 	const void *dev_data;
2873c76b78d8SArchit Taneja 	struct device *dev = &pdev->dev;
2874c76b78d8SArchit Taneja 	struct resource *res;
2875c76b78d8SArchit Taneja 	int ret;
2876c76b78d8SArchit Taneja 
2877c76b78d8SArchit Taneja 	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2878c76b78d8SArchit Taneja 	if (!nandc)
2879c76b78d8SArchit Taneja 		return -ENOMEM;
2880c76b78d8SArchit Taneja 
2881c76b78d8SArchit Taneja 	platform_set_drvdata(pdev, nandc);
2882c76b78d8SArchit Taneja 	nandc->dev = dev;
2883c76b78d8SArchit Taneja 
2884c76b78d8SArchit Taneja 	dev_data = of_device_get_match_data(dev);
2885c76b78d8SArchit Taneja 	if (!dev_data) {
2886c76b78d8SArchit Taneja 		dev_err(&pdev->dev, "failed to get device data\n");
2887c76b78d8SArchit Taneja 		return -ENODEV;
2888c76b78d8SArchit Taneja 	}
2889c76b78d8SArchit Taneja 
289058f1f22aSAbhishek Sahu 	nandc->props = dev_data;
2891c76b78d8SArchit Taneja 
2892c76b78d8SArchit Taneja 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2893c76b78d8SArchit Taneja 	nandc->base = devm_ioremap_resource(dev, res);
2894c76b78d8SArchit Taneja 	if (IS_ERR(nandc->base))
2895c76b78d8SArchit Taneja 		return PTR_ERR(nandc->base);
2896c76b78d8SArchit Taneja 
28978d6b6d7eSAbhishek Sahu 	nandc->base_phys = res->start;
2898c76b78d8SArchit Taneja 	nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2899c76b78d8SArchit Taneja 
2900c76b78d8SArchit Taneja 	nandc->core_clk = devm_clk_get(dev, "core");
2901c76b78d8SArchit Taneja 	if (IS_ERR(nandc->core_clk))
2902c76b78d8SArchit Taneja 		return PTR_ERR(nandc->core_clk);
2903c76b78d8SArchit Taneja 
2904c76b78d8SArchit Taneja 	nandc->aon_clk = devm_clk_get(dev, "aon");
2905c76b78d8SArchit Taneja 	if (IS_ERR(nandc->aon_clk))
2906c76b78d8SArchit Taneja 		return PTR_ERR(nandc->aon_clk);
2907c76b78d8SArchit Taneja 
2908c76b78d8SArchit Taneja 	ret = qcom_nandc_parse_dt(pdev);
2909c76b78d8SArchit Taneja 	if (ret)
2910c76b78d8SArchit Taneja 		return ret;
2911c76b78d8SArchit Taneja 
2912c76b78d8SArchit Taneja 	ret = qcom_nandc_alloc(nandc);
2913c76b78d8SArchit Taneja 	if (ret)
2914497d7d85SAbhishek Sahu 		goto err_core_clk;
2915c76b78d8SArchit Taneja 
2916c76b78d8SArchit Taneja 	ret = clk_prepare_enable(nandc->core_clk);
2917c76b78d8SArchit Taneja 	if (ret)
2918c76b78d8SArchit Taneja 		goto err_core_clk;
2919c76b78d8SArchit Taneja 
2920c76b78d8SArchit Taneja 	ret = clk_prepare_enable(nandc->aon_clk);
2921c76b78d8SArchit Taneja 	if (ret)
2922c76b78d8SArchit Taneja 		goto err_aon_clk;
2923c76b78d8SArchit Taneja 
2924c76b78d8SArchit Taneja 	ret = qcom_nandc_setup(nandc);
2925c76b78d8SArchit Taneja 	if (ret)
2926c76b78d8SArchit Taneja 		goto err_setup;
2927c76b78d8SArchit Taneja 
292889f5127cSAbhishek Sahu 	ret = qcom_probe_nand_devices(nandc);
292989f5127cSAbhishek Sahu 	if (ret)
293089f5127cSAbhishek Sahu 		goto err_setup;
2931c76b78d8SArchit Taneja 
2932c76b78d8SArchit Taneja 	return 0;
2933c76b78d8SArchit Taneja 
2934c76b78d8SArchit Taneja err_setup:
2935c76b78d8SArchit Taneja 	clk_disable_unprepare(nandc->aon_clk);
2936c76b78d8SArchit Taneja err_aon_clk:
2937c76b78d8SArchit Taneja 	clk_disable_unprepare(nandc->core_clk);
2938c76b78d8SArchit Taneja err_core_clk:
2939c76b78d8SArchit Taneja 	qcom_nandc_unalloc(nandc);
2940c76b78d8SArchit Taneja 
2941c76b78d8SArchit Taneja 	return ret;
2942c76b78d8SArchit Taneja }
2943c76b78d8SArchit Taneja 
2944c76b78d8SArchit Taneja static int qcom_nandc_remove(struct platform_device *pdev)
2945c76b78d8SArchit Taneja {
2946c76b78d8SArchit Taneja 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2947c76b78d8SArchit Taneja 	struct qcom_nand_host *host;
2948c76b78d8SArchit Taneja 
2949c76b78d8SArchit Taneja 	list_for_each_entry(host, &nandc->host_list, node)
2950c76b78d8SArchit Taneja 		nand_release(nand_to_mtd(&host->chip));
2951c76b78d8SArchit Taneja 
2952c76b78d8SArchit Taneja 	qcom_nandc_unalloc(nandc);
2953c76b78d8SArchit Taneja 
2954c76b78d8SArchit Taneja 	clk_disable_unprepare(nandc->aon_clk);
2955c76b78d8SArchit Taneja 	clk_disable_unprepare(nandc->core_clk);
2956c76b78d8SArchit Taneja 
2957c76b78d8SArchit Taneja 	return 0;
2958c76b78d8SArchit Taneja }
2959c76b78d8SArchit Taneja 
296058f1f22aSAbhishek Sahu static const struct qcom_nandc_props ipq806x_nandc_props = {
296158f1f22aSAbhishek Sahu 	.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
29628c5d5d6aSAbhishek Sahu 	.is_bam = false,
2963cc409b9aSAbhishek Sahu 	.dev_cmd_reg_start = 0x0,
296458f1f22aSAbhishek Sahu };
2965c76b78d8SArchit Taneja 
2966a0637834SAbhishek Sahu static const struct qcom_nandc_props ipq4019_nandc_props = {
2967a0637834SAbhishek Sahu 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2968a0637834SAbhishek Sahu 	.is_bam = true,
2969a0637834SAbhishek Sahu 	.dev_cmd_reg_start = 0x0,
2970a0637834SAbhishek Sahu };
2971a0637834SAbhishek Sahu 
2972dce84760SAbhishek Sahu static const struct qcom_nandc_props ipq8074_nandc_props = {
2973dce84760SAbhishek Sahu 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2974dce84760SAbhishek Sahu 	.is_bam = true,
2975dce84760SAbhishek Sahu 	.dev_cmd_reg_start = 0x7000,
2976dce84760SAbhishek Sahu };
2977dce84760SAbhishek Sahu 
2978c76b78d8SArchit Taneja /*
2979c76b78d8SArchit Taneja  * data will hold a struct pointer containing more differences once we support
2980c76b78d8SArchit Taneja  * more controller variants
2981c76b78d8SArchit Taneja  */
2982c76b78d8SArchit Taneja static const struct of_device_id qcom_nandc_of_match[] = {
298358f1f22aSAbhishek Sahu 	{
298458f1f22aSAbhishek Sahu 		.compatible = "qcom,ipq806x-nand",
298558f1f22aSAbhishek Sahu 		.data = &ipq806x_nandc_props,
2986c76b78d8SArchit Taneja 	},
2987a0637834SAbhishek Sahu 	{
2988a0637834SAbhishek Sahu 		.compatible = "qcom,ipq4019-nand",
2989a0637834SAbhishek Sahu 		.data = &ipq4019_nandc_props,
2990a0637834SAbhishek Sahu 	},
2991dce84760SAbhishek Sahu 	{
2992dce84760SAbhishek Sahu 		.compatible = "qcom,ipq8074-nand",
2993dce84760SAbhishek Sahu 		.data = &ipq8074_nandc_props,
2994dce84760SAbhishek Sahu 	},
2995c76b78d8SArchit Taneja 	{}
2996c76b78d8SArchit Taneja };
2997c76b78d8SArchit Taneja MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2998c76b78d8SArchit Taneja 
2999c76b78d8SArchit Taneja static struct platform_driver qcom_nandc_driver = {
3000c76b78d8SArchit Taneja 	.driver = {
3001c76b78d8SArchit Taneja 		.name = "qcom-nandc",
3002c76b78d8SArchit Taneja 		.of_match_table = qcom_nandc_of_match,
3003c76b78d8SArchit Taneja 	},
3004c76b78d8SArchit Taneja 	.probe   = qcom_nandc_probe,
3005c76b78d8SArchit Taneja 	.remove  = qcom_nandc_remove,
3006c76b78d8SArchit Taneja };
3007c76b78d8SArchit Taneja module_platform_driver(qcom_nandc_driver);
3008c76b78d8SArchit Taneja 
3009c76b78d8SArchit Taneja MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3010c76b78d8SArchit Taneja MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3011c76b78d8SArchit Taneja MODULE_LICENSE("GPL v2");
3012