1 /*
2 * SPDX-License-Identifier: GPL-2.0
3 *
4 * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
5 *
6 * Authors:
7 * Md Sadre Alam <quic_mdalam@quicinc.com>
8 * Sricharan R <quic_srichara@quicinc.com>
9 * Varadarajan Narayanan <quic_varada@quicinc.com>
10 */
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma/qcom_adm.h>
17 #include <linux/dma/qcom_bam_dma.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/mtd/nand-qpic-common.h>
23 #include <linux/mtd/spinand.h>
24 #include <linux/bitfield.h>
25
26 #define NAND_FLASH_SPI_CFG 0xc0
27 #define NAND_NUM_ADDR_CYCLES 0xc4
28 #define NAND_BUSY_CHECK_WAIT_CNT 0xc8
29 #define NAND_FLASH_FEATURES 0xf64
30
31 /* QSPI NAND config reg bits */
32 #define LOAD_CLK_CNTR_INIT_EN BIT(28)
33 #define CLK_CNTR_INIT_VAL_VEC 0x924
34 #define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16)
35 #define FEA_STATUS_DEV_ADDR 0xc0
36 #define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8)
37 #define SPI_CFG BIT(0)
38 #define SPI_NUM_ADDR 0xDA4DB
39 #define SPI_WAIT_CNT 0x10
40 #define QPIC_QSPI_NUM_CS 1
41 #define SPI_TRANSFER_MODE_x1 BIT(29)
42 #define SPI_TRANSFER_MODE_x4 (3 << 29)
43 #define SPI_WP BIT(28)
44 #define SPI_HOLD BIT(27)
45 #define QPIC_SET_FEATURE BIT(31)
46
47 #define SPINAND_RESET 0xff
48 #define SPINAND_READID 0x9f
49 #define SPINAND_GET_FEATURE 0x0f
50 #define SPINAND_SET_FEATURE 0x1f
51 #define SPINAND_READ 0x13
52 #define SPINAND_ERASE 0xd8
53 #define SPINAND_WRITE_EN 0x06
54 #define SPINAND_PROGRAM_EXECUTE 0x10
55 #define SPINAND_PROGRAM_LOAD 0x84
56
57 #define ACC_FEATURE 0xe
58 #define BAD_BLOCK_MARKER_SIZE 0x2
59 #define OOB_BUF_SIZE 128
60 #define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng)
61
62 struct snandc_read_status {
63 __le32 snandc_flash;
64 __le32 snandc_buffer;
65 __le32 snandc_erased_cw;
66 };
67
68 /*
69 * ECC state struct
70 * @corrected: ECC corrected
71 * @bitflips: Max bit flip
72 * @failed: ECC failed
73 */
74 struct qcom_ecc_stats {
75 u32 corrected;
76 u32 bitflips;
77 u32 failed;
78 };
79
80 struct qpic_ecc {
81 struct device *dev;
82 int ecc_bytes_hw;
83 int spare_bytes;
84 int bbm_size;
85 int ecc_mode;
86 int bytes;
87 int steps;
88 int step_size;
89 int strength;
90 int cw_size;
91 int cw_data;
92 u32 cfg0;
93 u32 cfg1;
94 u32 cfg0_raw;
95 u32 cfg1_raw;
96 u32 ecc_buf_cfg;
97 u32 ecc_bch_cfg;
98 u32 clrflashstatus;
99 u32 clrreadstatus;
100 bool bch_enabled;
101 };
102
103 struct qpic_spi_nand {
104 struct qcom_nand_controller *snandc;
105 struct spi_controller *ctlr;
106 struct mtd_info *mtd;
107 struct clk *iomacro_clk;
108 struct qpic_ecc *ecc;
109 struct qcom_ecc_stats ecc_stats;
110 struct nand_ecc_engine ecc_eng;
111 u8 *data_buf;
112 u8 *oob_buf;
113 __le32 addr1;
114 __le32 addr2;
115 __le32 cmd;
116 u32 num_cw;
117 bool oob_rw;
118 bool page_rw;
119 bool raw_rw;
120 };
121
qcom_spi_set_read_loc_first(struct qcom_nand_controller * snandc,int reg,int cw_offset,int read_size,int is_last_read_loc)122 static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
123 int reg, int cw_offset, int read_size,
124 int is_last_read_loc)
125 {
126 __le32 locreg_val;
127 u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
128 FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
129 FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
130
131 locreg_val = cpu_to_le32(val);
132
133 if (reg == NAND_READ_LOCATION_0)
134 snandc->regs->read_location0 = locreg_val;
135 else if (reg == NAND_READ_LOCATION_1)
136 snandc->regs->read_location1 = locreg_val;
137 else if (reg == NAND_READ_LOCATION_2)
138 snandc->regs->read_location2 = locreg_val;
139 else if (reg == NAND_READ_LOCATION_3)
140 snandc->regs->read_location3 = locreg_val;
141 }
142
qcom_spi_set_read_loc_last(struct qcom_nand_controller * snandc,int reg,int cw_offset,int read_size,int is_last_read_loc)143 static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
144 int reg, int cw_offset, int read_size,
145 int is_last_read_loc)
146 {
147 __le32 locreg_val;
148 u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
149 FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
150 FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
151
152 locreg_val = cpu_to_le32(val);
153
154 if (reg == NAND_READ_LOCATION_LAST_CW_0)
155 snandc->regs->read_location_last0 = locreg_val;
156 else if (reg == NAND_READ_LOCATION_LAST_CW_1)
157 snandc->regs->read_location_last1 = locreg_val;
158 else if (reg == NAND_READ_LOCATION_LAST_CW_2)
159 snandc->regs->read_location_last2 = locreg_val;
160 else if (reg == NAND_READ_LOCATION_LAST_CW_3)
161 snandc->regs->read_location_last3 = locreg_val;
162 }
163
nand_to_qcom_snand(struct nand_device * nand)164 static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
165 {
166 struct nand_ecc_engine *eng = nand->ecc.engine;
167 struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
168
169 return qspi->snandc;
170 }
171
qcom_spi_init(struct qcom_nand_controller * snandc)172 static int qcom_spi_init(struct qcom_nand_controller *snandc)
173 {
174 u32 snand_cfg_val = 0x0;
175 int ret;
176
177 snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
178 FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
179 FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
180 FIELD_PREP(SPI_CFG, 0);
181
182 snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
183 snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
184 snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
185
186 qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
187
188 snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
189 snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
190
191 qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
192
193 qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
194 qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
195 NAND_BAM_NEXT_SGL);
196
197 ret = qcom_submit_descs(snandc);
198 if (ret) {
199 dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
200 return ret;
201 }
202
203 return ret;
204 }
205
qcom_spi_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)206 static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
207 struct mtd_oob_region *oobregion)
208 {
209 struct nand_device *nand = mtd_to_nanddev(mtd);
210 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
211 struct qpic_ecc *qecc = snandc->qspi->ecc;
212
213 if (section > 1)
214 return -ERANGE;
215
216 oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
217 oobregion->offset = mtd->oobsize - oobregion->length;
218
219 return 0;
220 }
221
qcom_spi_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)222 static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
223 struct mtd_oob_region *oobregion)
224 {
225 struct nand_device *nand = mtd_to_nanddev(mtd);
226 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
227 struct qpic_ecc *qecc = snandc->qspi->ecc;
228
229 if (section)
230 return -ERANGE;
231
232 oobregion->length = qecc->steps * 4;
233 oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
234
235 return 0;
236 }
237
238 static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
239 .ecc = qcom_spi_ooblayout_ecc,
240 .free = qcom_spi_ooblayout_free,
241 };
242
qcom_spi_ecc_init_ctx_pipelined(struct nand_device * nand)243 static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
244 {
245 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
246 struct nand_ecc_props *reqs = &nand->ecc.requirements;
247 struct nand_ecc_props *user = &nand->ecc.user_conf;
248 struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
249 struct mtd_info *mtd = nanddev_to_mtd(nand);
250 int cwperpage, bad_block_byte, ret;
251 struct qpic_ecc *ecc_cfg;
252
253 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
254 snandc->qspi->num_cw = cwperpage;
255
256 ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
257 if (!ecc_cfg)
258 return -ENOMEM;
259
260 if (user->step_size && user->strength) {
261 ecc_cfg->step_size = user->step_size;
262 ecc_cfg->strength = user->strength;
263 } else if (reqs->step_size && reqs->strength) {
264 ecc_cfg->step_size = reqs->step_size;
265 ecc_cfg->strength = reqs->strength;
266 } else {
267 /* use defaults */
268 ecc_cfg->step_size = NANDC_STEP_SIZE;
269 ecc_cfg->strength = 4;
270 }
271
272 if (ecc_cfg->step_size != NANDC_STEP_SIZE) {
273 dev_err(snandc->dev,
274 "only %u bytes ECC step size is supported\n",
275 NANDC_STEP_SIZE);
276 ret = -EOPNOTSUPP;
277 goto err_free_ecc_cfg;
278 }
279
280 switch (ecc_cfg->strength) {
281 case 4:
282 ecc_cfg->ecc_mode = ECC_MODE_4BIT;
283 ecc_cfg->ecc_bytes_hw = 7;
284 ecc_cfg->spare_bytes = 4;
285 break;
286
287 case 8:
288 ecc_cfg->ecc_mode = ECC_MODE_8BIT;
289 ecc_cfg->ecc_bytes_hw = 13;
290 ecc_cfg->spare_bytes = 2;
291 break;
292
293 default:
294 dev_err(snandc->dev,
295 "only 4 or 8 bits ECC strength is supported\n");
296 ret = -EOPNOTSUPP;
297 goto err_free_ecc_cfg;
298 }
299
300 snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize,
301 GFP_KERNEL);
302 if (!snandc->qspi->oob_buf) {
303 ret = -ENOMEM;
304 goto err_free_ecc_cfg;
305 }
306
307 memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
308
309 nand->ecc.ctx.priv = ecc_cfg;
310 snandc->qspi->mtd = mtd;
311
312 ecc_cfg->bbm_size = 1;
313 ecc_cfg->bch_enabled = true;
314 ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
315
316 ecc_cfg->steps = cwperpage;
317 ecc_cfg->cw_data = 516;
318 ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
319 bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
320
321 mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
322
323 /*
324 * Free the temporary BAM transaction allocated initially by
325 * qcom_nandc_alloc(), and allocate a new one based on the
326 * updated max_cwperpage value.
327 */
328 qcom_free_bam_transaction(snandc);
329
330 snandc->max_cwperpage = cwperpage;
331
332 snandc->bam_txn = qcom_alloc_bam_transaction(snandc);
333 if (!snandc->bam_txn) {
334 dev_err(snandc->dev, "failed to allocate BAM transaction\n");
335 ret = -ENOMEM;
336 goto err_free_ecc_cfg;
337 }
338
339 ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
340 FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
341 FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
342 FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
343 FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
344 FIELD_PREP(STATUS_BFR_READ, 0) |
345 FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
346 FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
347
348 ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
349 FIELD_PREP(CS_ACTIVE_BSY, 0) |
350 FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
351 FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
352 FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
353 FIELD_PREP(WIDE_FLASH, 0) |
354 FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
355
356 ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
357 FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
358 FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
359 FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
360
361 ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
362 FIELD_PREP(CS_ACTIVE_BSY, 0) |
363 FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
364 FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
365 FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
366 FIELD_PREP(WIDE_FLASH, 0) |
367 FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
368
369 ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
370 FIELD_PREP(ECC_SW_RESET, 0) |
371 FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
372 FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
373 FIELD_PREP(ECC_MODE_MASK, ecc_cfg->ecc_mode) |
374 FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
375
376 ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
377 ecc_cfg->clrflashstatus = FS_READY_BSY_N;
378 ecc_cfg->clrreadstatus = 0xc0;
379
380 conf->step_size = ecc_cfg->step_size;
381 conf->strength = ecc_cfg->strength;
382
383 snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
384 snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
385
386 dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
387 ecc_cfg->strength, ecc_cfg->step_size);
388
389 return 0;
390
391 err_free_ecc_cfg:
392 kfree(ecc_cfg);
393 return ret;
394 }
395
qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device * nand)396 static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
397 {
398 struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
399
400 kfree(ecc_cfg);
401 }
402
qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device * nand,struct nand_page_io_req * req)403 static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
404 struct nand_page_io_req *req)
405 {
406 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
407 struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
408
409 snandc->qspi->ecc = ecc_cfg;
410 snandc->qspi->raw_rw = false;
411 snandc->qspi->oob_rw = false;
412 snandc->qspi->page_rw = false;
413
414 if (req->datalen)
415 snandc->qspi->page_rw = true;
416
417 if (req->ooblen)
418 snandc->qspi->oob_rw = true;
419
420 if (req->mode == MTD_OPS_RAW)
421 snandc->qspi->raw_rw = true;
422
423 return 0;
424 }
425
qcom_spi_ecc_finish_io_req_pipelined(struct nand_device * nand,struct nand_page_io_req * req)426 static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
427 struct nand_page_io_req *req)
428 {
429 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
430 struct mtd_info *mtd = nanddev_to_mtd(nand);
431
432 if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
433 return 0;
434
435 if (snandc->qspi->ecc_stats.failed)
436 mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
437 else
438 mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
439
440 if (snandc->qspi->ecc_stats.failed)
441 return -EBADMSG;
442 else
443 return snandc->qspi->ecc_stats.bitflips;
444 }
445
446 static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
447 .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
448 .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
449 .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
450 .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
451 };
452
453 /* helper to configure location register values */
qcom_spi_set_read_loc(struct qcom_nand_controller * snandc,int cw,int reg,int cw_offset,int read_size,int is_last_read_loc)454 static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
455 int cw_offset, int read_size, int is_last_read_loc)
456 {
457 int reg_base = NAND_READ_LOCATION_0;
458 int num_cw = snandc->qspi->num_cw;
459
460 if (cw == (num_cw - 1))
461 reg_base = NAND_READ_LOCATION_LAST_CW_0;
462
463 reg_base += reg * 4;
464
465 if (cw == (num_cw - 1))
466 return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
467 read_size, is_last_read_loc);
468 else
469 return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
470 read_size, is_last_read_loc);
471 }
472
473 static void
qcom_spi_config_cw_read(struct qcom_nand_controller * snandc,bool use_ecc,int cw)474 qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
475 {
476 __le32 *reg = &snandc->regs->read_location0;
477 int num_cw = snandc->qspi->num_cw;
478
479 qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
480 if (cw == (num_cw - 1)) {
481 reg = &snandc->regs->read_location_last0;
482 qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
483 NAND_BAM_NEXT_SGL);
484 }
485
486 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
487 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
488
489 qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
490 qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
491 NAND_BAM_NEXT_SGL);
492 }
493
qcom_spi_block_erase(struct qcom_nand_controller * snandc)494 static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
495 {
496 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
497 int ret;
498
499 snandc->buf_count = 0;
500 snandc->buf_start = 0;
501 qcom_clear_read_regs(snandc);
502 qcom_clear_bam_transaction(snandc);
503
504 snandc->regs->cmd = snandc->qspi->cmd;
505 snandc->regs->addr0 = snandc->qspi->addr1;
506 snandc->regs->addr1 = snandc->qspi->addr2;
507 snandc->regs->cfg0 = cpu_to_le32((ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
508 FIELD_PREP(CW_PER_PAGE_MASK, 0));
509 snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
510 snandc->regs->exec = cpu_to_le32(1);
511
512 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
513 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
514 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
515
516 ret = qcom_submit_descs(snandc);
517 if (ret) {
518 dev_err(snandc->dev, "failure to erase block\n");
519 return ret;
520 }
521
522 return 0;
523 }
524
qcom_spi_config_single_cw_page_read(struct qcom_nand_controller * snandc,bool use_ecc,int cw)525 static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
526 bool use_ecc, int cw)
527 {
528 __le32 *reg = &snandc->regs->read_location0;
529 int num_cw = snandc->qspi->num_cw;
530
531 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
532 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
533 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
534 NAND_ERASED_CW_DETECT_CFG, 1, 0);
535 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
536 NAND_ERASED_CW_DETECT_CFG, 1,
537 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
538
539 if (cw == (num_cw - 1)) {
540 reg = &snandc->regs->read_location_last0;
541 qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
542 }
543 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
544 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
545
546 qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
547 }
548
qcom_spi_check_raw_flash_errors(struct qcom_nand_controller * snandc,int cw_cnt)549 static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
550 {
551 int i;
552
553 qcom_nandc_dev_to_mem(snandc, true);
554
555 for (i = 0; i < cw_cnt; i++) {
556 u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
557
558 if (flash & (FS_OP_ERR | FS_MPU_ERR))
559 return -EIO;
560 }
561
562 return 0;
563 }
564
qcom_spi_read_last_cw(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)565 static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
566 const struct spi_mem_op *op)
567 {
568 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
569 struct mtd_info *mtd = snandc->qspi->mtd;
570 int size, ret = 0;
571 int col, bbpos;
572 u32 cfg0, cfg1, ecc_bch_cfg;
573 u32 num_cw = snandc->qspi->num_cw;
574
575 qcom_clear_bam_transaction(snandc);
576 qcom_clear_read_regs(snandc);
577
578 size = ecc_cfg->cw_size;
579 col = ecc_cfg->cw_size * (num_cw - 1);
580
581 memset(snandc->data_buffer, 0xff, size);
582 snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
583 snandc->regs->addr1 = snandc->qspi->addr2;
584
585 cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
586 FIELD_PREP(CW_PER_PAGE_MASK, 0);
587 cfg1 = ecc_cfg->cfg1_raw;
588 ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
589
590 snandc->regs->cmd = snandc->qspi->cmd;
591 snandc->regs->cfg0 = cpu_to_le32(cfg0);
592 snandc->regs->cfg1 = cpu_to_le32(cfg1);
593 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
594 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
595 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
596 snandc->regs->exec = cpu_to_le32(1);
597
598 qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
599
600 qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
601
602 qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
603
604 ret = qcom_submit_descs(snandc);
605 if (ret) {
606 dev_err(snandc->dev, "failed to read last cw\n");
607 return ret;
608 }
609
610 ret = qcom_spi_check_raw_flash_errors(snandc, 1);
611 if (ret)
612 return ret;
613
614 bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
615
616 /*
617 * TODO: The SPINAND code expects two bad block marker bytes
618 * at the beginning of the OOB area, but the OOB layout used by
619 * the driver has only one. Duplicate that for now in order to
620 * avoid certain blocks to be marked as bad.
621 *
622 * This can be removed once single-byte bad block marker support
623 * gets implemented in the SPINAND code.
624 */
625 snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
626
627 memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
628
629 return ret;
630 }
631
qcom_spi_check_error(struct qcom_nand_controller * snandc)632 static int qcom_spi_check_error(struct qcom_nand_controller *snandc)
633 {
634 struct snandc_read_status *buf;
635 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
636 int i, num_cw = snandc->qspi->num_cw;
637 bool flash_op_err = false, erased;
638 unsigned int max_bitflips = 0;
639 unsigned int uncorrectable_cws = 0;
640
641 snandc->qspi->ecc_stats.failed = 0;
642 snandc->qspi->ecc_stats.corrected = 0;
643
644 qcom_nandc_dev_to_mem(snandc, true);
645 buf = (struct snandc_read_status *)snandc->reg_read_buf;
646
647 for (i = 0; i < num_cw; i++, buf++) {
648 u32 flash, buffer, erased_cw;
649
650 flash = le32_to_cpu(buf->snandc_flash);
651 buffer = le32_to_cpu(buf->snandc_buffer);
652 erased_cw = le32_to_cpu(buf->snandc_erased_cw);
653
654 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
655 if (ecc_cfg->bch_enabled)
656 erased = (erased_cw & ERASED_CW) == ERASED_CW;
657 else
658 erased = false;
659
660 if (!erased)
661 uncorrectable_cws |= BIT(i);
662
663 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
664 flash_op_err = true;
665 } else {
666 unsigned int stat;
667
668 stat = buffer & BS_CORRECTABLE_ERR_MSK;
669
670 /*
671 * The exact number of the corrected bits is
672 * unknown because the hardware only reports the
673 * number of the corrected bytes.
674 *
675 * Since we have no better solution at the moment,
676 * report that value as the number of bit errors
677 * despite that it is inaccurate in most cases.
678 */
679 if (stat && stat != ecc_cfg->strength)
680 dev_warn_once(snandc->dev,
681 "Warning: due to hw limitation, the reported number of the corrected bits may be inaccurate\n");
682
683 snandc->qspi->ecc_stats.corrected += stat;
684 max_bitflips = max(max_bitflips, stat);
685 }
686 }
687
688 if (flash_op_err)
689 return -EIO;
690
691 if (!uncorrectable_cws)
692 snandc->qspi->ecc_stats.bitflips = max_bitflips;
693 else
694 snandc->qspi->ecc_stats.failed++;
695
696 return 0;
697 }
698
qcom_spi_read_cw_raw(struct qcom_nand_controller * snandc,u8 * data_buf,u8 * oob_buf,int cw)699 static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
700 u8 *oob_buf, int cw)
701 {
702 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
703 struct mtd_info *mtd = snandc->qspi->mtd;
704 int data_size1, data_size2, oob_size1, oob_size2;
705 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
706 int raw_cw = cw;
707 u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
708 int col;
709
710 snandc->buf_count = 0;
711 snandc->buf_start = 0;
712 qcom_clear_read_regs(snandc);
713 qcom_clear_bam_transaction(snandc);
714 raw_cw = num_cw - 1;
715
716 cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
717 FIELD_PREP(CW_PER_PAGE_MASK, 0);
718 cfg1 = ecc_cfg->cfg1_raw;
719 ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
720
721 col = ecc_cfg->cw_size * cw;
722
723 snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
724 snandc->regs->addr1 = snandc->qspi->addr2;
725 snandc->regs->cmd = snandc->qspi->cmd;
726 snandc->regs->cfg0 = cpu_to_le32(cfg0);
727 snandc->regs->cfg1 = cpu_to_le32(cfg1);
728 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
729 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
730 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
731 snandc->regs->exec = cpu_to_le32(1);
732
733 qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
734
735 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
736 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
737 qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
738
739 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
740 NAND_ERASED_CW_DETECT_CFG, 1, 0);
741 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
742 NAND_ERASED_CW_DETECT_CFG, 1,
743 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
744
745 data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
746 oob_size1 = ecc_cfg->bbm_size;
747
748 if (cw == (num_cw - 1)) {
749 data_size2 = NANDC_STEP_SIZE - data_size1 -
750 ((num_cw - 1) * 4);
751 oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
752 ecc_cfg->spare_bytes;
753 } else {
754 data_size2 = ecc_cfg->cw_data - data_size1;
755 oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
756 }
757
758 qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
759 read_loc += data_size1;
760
761 qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
762 read_loc += oob_size1;
763
764 qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
765 read_loc += data_size2;
766
767 qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
768
769 qcom_spi_config_cw_read(snandc, false, raw_cw);
770
771 qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
772 reg_off += data_size1;
773
774 qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
775 reg_off += oob_size1;
776
777 qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
778 reg_off += data_size2;
779
780 qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
781
782 ret = qcom_submit_descs(snandc);
783 if (ret) {
784 dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
785 return ret;
786 }
787
788 return qcom_spi_check_raw_flash_errors(snandc, 1);
789 }
790
qcom_spi_read_page_raw(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)791 static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
792 const struct spi_mem_op *op)
793 {
794 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
795 u8 *data_buf = NULL, *oob_buf = NULL;
796 int ret, cw;
797 u32 num_cw = snandc->qspi->num_cw;
798
799 if (snandc->qspi->page_rw)
800 data_buf = op->data.buf.in;
801
802 oob_buf = snandc->qspi->oob_buf;
803 memset(oob_buf, 0xff, OOB_BUF_SIZE);
804
805 for (cw = 0; cw < num_cw; cw++) {
806 ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
807 if (ret)
808 return ret;
809
810 if (data_buf)
811 data_buf += ecc_cfg->cw_data;
812 if (oob_buf)
813 oob_buf += ecc_cfg->bytes;
814 }
815
816 return 0;
817 }
818
qcom_spi_read_page_ecc(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)819 static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
820 const struct spi_mem_op *op)
821 {
822 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
823 u8 *data_buf = NULL, *oob_buf = NULL;
824 int ret, i;
825 u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
826
827 data_buf = op->data.buf.in;
828 oob_buf = snandc->qspi->oob_buf;
829
830 snandc->buf_count = 0;
831 snandc->buf_start = 0;
832 qcom_clear_read_regs(snandc);
833
834 cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
835 FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
836 cfg1 = ecc_cfg->cfg1;
837 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
838
839 snandc->regs->addr0 = snandc->qspi->addr1;
840 snandc->regs->addr1 = snandc->qspi->addr2;
841 snandc->regs->cmd = snandc->qspi->cmd;
842 snandc->regs->cfg0 = cpu_to_le32(cfg0);
843 snandc->regs->cfg1 = cpu_to_le32(cfg1);
844 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
845 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
846 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
847 snandc->regs->exec = cpu_to_le32(1);
848
849 qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
850
851 qcom_clear_bam_transaction(snandc);
852
853 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
854 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
855 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
856 NAND_ERASED_CW_DETECT_CFG, 1, 0);
857 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
858 NAND_ERASED_CW_DETECT_CFG, 1,
859 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
860
861 for (i = 0; i < num_cw; i++) {
862 int data_size, oob_size;
863
864 if (i == (num_cw - 1)) {
865 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
866 oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
867 ecc_cfg->spare_bytes;
868 } else {
869 data_size = ecc_cfg->cw_data;
870 oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
871 }
872
873 if (data_buf && oob_buf) {
874 qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
875 qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
876 } else if (data_buf) {
877 qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
878 } else {
879 qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
880 }
881
882 qcom_spi_config_cw_read(snandc, true, i);
883
884 if (data_buf)
885 qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
886 data_size, 0);
887 if (oob_buf) {
888 int j;
889
890 for (j = 0; j < ecc_cfg->bbm_size; j++)
891 *oob_buf++ = 0xff;
892
893 qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
894 oob_buf, oob_size, 0);
895 }
896
897 if (data_buf)
898 data_buf += data_size;
899 if (oob_buf)
900 oob_buf += oob_size;
901 }
902
903 ret = qcom_submit_descs(snandc);
904 if (ret) {
905 dev_err(snandc->dev, "failure to read page\n");
906 return ret;
907 }
908
909 return qcom_spi_check_error(snandc);
910 }
911
qcom_spi_read_page_oob(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)912 static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
913 const struct spi_mem_op *op)
914 {
915 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
916 u8 *oob_buf = NULL;
917 int ret, i;
918 u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
919
920 oob_buf = op->data.buf.in;
921
922 snandc->buf_count = 0;
923 snandc->buf_start = 0;
924 qcom_clear_read_regs(snandc);
925 qcom_clear_bam_transaction(snandc);
926
927 cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
928 FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
929 cfg1 = ecc_cfg->cfg1;
930 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
931
932 snandc->regs->addr0 = snandc->qspi->addr1;
933 snandc->regs->addr1 = snandc->qspi->addr2;
934 snandc->regs->cmd = snandc->qspi->cmd;
935 snandc->regs->cfg0 = cpu_to_le32(cfg0);
936 snandc->regs->cfg1 = cpu_to_le32(cfg1);
937 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
938 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
939 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
940 snandc->regs->exec = cpu_to_le32(1);
941
942 qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
943
944 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
945 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
946 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
947 NAND_ERASED_CW_DETECT_CFG, 1, 0);
948 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
949 NAND_ERASED_CW_DETECT_CFG, 1,
950 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
951
952 for (i = 0; i < num_cw; i++) {
953 int data_size, oob_size;
954
955 if (i == (num_cw - 1)) {
956 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
957 oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
958 ecc_cfg->spare_bytes;
959 } else {
960 data_size = ecc_cfg->cw_data;
961 oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
962 }
963
964 qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
965
966 qcom_spi_config_cw_read(snandc, true, i);
967
968 if (oob_buf) {
969 int j;
970
971 for (j = 0; j < ecc_cfg->bbm_size; j++)
972 *oob_buf++ = 0xff;
973
974 qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
975 oob_buf, oob_size, 0);
976 }
977
978 if (oob_buf)
979 oob_buf += oob_size;
980 }
981
982 ret = qcom_submit_descs(snandc);
983 if (ret) {
984 dev_err(snandc->dev, "failure to read oob\n");
985 return ret;
986 }
987
988 return qcom_spi_check_error(snandc);
989 }
990
qcom_spi_read_page(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)991 static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
992 const struct spi_mem_op *op)
993 {
994 if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
995 return qcom_spi_read_page_raw(snandc, op);
996
997 if (snandc->qspi->page_rw)
998 return qcom_spi_read_page_ecc(snandc, op);
999
1000 if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
1001 return qcom_spi_read_last_cw(snandc, op);
1002
1003 if (snandc->qspi->oob_rw)
1004 return qcom_spi_read_page_oob(snandc, op);
1005
1006 return 0;
1007 }
1008
qcom_spi_config_page_write(struct qcom_nand_controller * snandc)1009 static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
1010 {
1011 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
1012 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
1013 qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
1014 1, NAND_BAM_NEXT_SGL);
1015 }
1016
qcom_spi_config_cw_write(struct qcom_nand_controller * snandc)1017 static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
1018 {
1019 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1020 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1021 qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1022
1023 qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
1024 qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
1025 NAND_BAM_NEXT_SGL);
1026 }
1027
qcom_spi_program_raw(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1028 static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
1029 const struct spi_mem_op *op)
1030 {
1031 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1032 struct mtd_info *mtd = snandc->qspi->mtd;
1033 u8 *data_buf = NULL, *oob_buf = NULL;
1034 int i, ret;
1035 int num_cw = snandc->qspi->num_cw;
1036 u32 cfg0, cfg1, ecc_bch_cfg;
1037
1038 cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
1039 FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
1040 cfg1 = ecc_cfg->cfg1_raw;
1041 ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
1042
1043 data_buf = snandc->qspi->data_buf;
1044
1045 oob_buf = snandc->qspi->oob_buf;
1046 memset(oob_buf, 0xff, OOB_BUF_SIZE);
1047
1048 snandc->buf_count = 0;
1049 snandc->buf_start = 0;
1050 qcom_clear_read_regs(snandc);
1051 qcom_clear_bam_transaction(snandc);
1052
1053 snandc->regs->addr0 = snandc->qspi->addr1;
1054 snandc->regs->addr1 = snandc->qspi->addr2;
1055 snandc->regs->cmd = snandc->qspi->cmd;
1056 snandc->regs->cfg0 = cpu_to_le32(cfg0);
1057 snandc->regs->cfg1 = cpu_to_le32(cfg1);
1058 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1059 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
1060 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
1061 snandc->regs->exec = cpu_to_le32(1);
1062
1063 qcom_spi_config_page_write(snandc);
1064
1065 for (i = 0; i < num_cw; i++) {
1066 int data_size1, data_size2, oob_size1, oob_size2;
1067 int reg_off = FLASH_BUF_ACC;
1068
1069 data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
1070 oob_size1 = ecc_cfg->bbm_size;
1071
1072 if (i == (num_cw - 1)) {
1073 data_size2 = NANDC_STEP_SIZE - data_size1 -
1074 ((num_cw - 1) << 2);
1075 oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
1076 ecc_cfg->spare_bytes;
1077 } else {
1078 data_size2 = ecc_cfg->cw_data - data_size1;
1079 oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
1080 }
1081
1082 qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
1083 NAND_BAM_NO_EOT);
1084 reg_off += data_size1;
1085 data_buf += data_size1;
1086
1087 qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
1088 NAND_BAM_NO_EOT);
1089 oob_buf += oob_size1;
1090 reg_off += oob_size1;
1091
1092 qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
1093 NAND_BAM_NO_EOT);
1094 reg_off += data_size2;
1095 data_buf += data_size2;
1096
1097 qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
1098 oob_buf += oob_size2;
1099
1100 qcom_spi_config_cw_write(snandc);
1101 }
1102
1103 ret = qcom_submit_descs(snandc);
1104 if (ret) {
1105 dev_err(snandc->dev, "failure to write raw page\n");
1106 return ret;
1107 }
1108
1109 return 0;
1110 }
1111
qcom_spi_program_ecc(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1112 static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
1113 const struct spi_mem_op *op)
1114 {
1115 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1116 u8 *data_buf = NULL, *oob_buf = NULL;
1117 int i, ret;
1118 int num_cw = snandc->qspi->num_cw;
1119 u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
1120
1121 cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
1122 FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
1123 cfg1 = ecc_cfg->cfg1;
1124 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
1125 ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
1126
1127 if (snandc->qspi->data_buf)
1128 data_buf = snandc->qspi->data_buf;
1129
1130 oob_buf = snandc->qspi->oob_buf;
1131
1132 snandc->buf_count = 0;
1133 snandc->buf_start = 0;
1134 qcom_clear_read_regs(snandc);
1135 qcom_clear_bam_transaction(snandc);
1136
1137 snandc->regs->addr0 = snandc->qspi->addr1;
1138 snandc->regs->addr1 = snandc->qspi->addr2;
1139 snandc->regs->cmd = snandc->qspi->cmd;
1140 snandc->regs->cfg0 = cpu_to_le32(cfg0);
1141 snandc->regs->cfg1 = cpu_to_le32(cfg1);
1142 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1143 snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
1144 snandc->regs->exec = cpu_to_le32(1);
1145
1146 qcom_spi_config_page_write(snandc);
1147
1148 for (i = 0; i < num_cw; i++) {
1149 int data_size, oob_size;
1150
1151 if (i == (num_cw - 1)) {
1152 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
1153 oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
1154 ecc_cfg->spare_bytes;
1155 } else {
1156 data_size = ecc_cfg->cw_data;
1157 oob_size = ecc_cfg->bytes;
1158 }
1159
1160 if (data_buf)
1161 qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
1162 i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
1163
1164 if (i == (num_cw - 1)) {
1165 if (oob_buf) {
1166 oob_buf += ecc_cfg->bbm_size;
1167 qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
1168 oob_buf, oob_size, 0);
1169 }
1170 }
1171
1172 qcom_spi_config_cw_write(snandc);
1173
1174 if (data_buf)
1175 data_buf += data_size;
1176 if (oob_buf)
1177 oob_buf += oob_size;
1178 }
1179
1180 ret = qcom_submit_descs(snandc);
1181 if (ret) {
1182 dev_err(snandc->dev, "failure to write page\n");
1183 return ret;
1184 }
1185
1186 return 0;
1187 }
1188
qcom_spi_program_oob(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1189 static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
1190 const struct spi_mem_op *op)
1191 {
1192 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1193 u8 *oob_buf = NULL;
1194 int ret, col, data_size, oob_size;
1195 int num_cw = snandc->qspi->num_cw;
1196 u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
1197
1198 cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
1199 FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
1200 cfg1 = ecc_cfg->cfg1;
1201 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
1202 ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
1203
1204 col = ecc_cfg->cw_size * (num_cw - 1);
1205
1206 oob_buf = snandc->qspi->data_buf;
1207
1208 snandc->buf_count = 0;
1209 snandc->buf_start = 0;
1210 qcom_clear_read_regs(snandc);
1211 qcom_clear_bam_transaction(snandc);
1212 snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
1213 snandc->regs->addr1 = snandc->qspi->addr2;
1214 snandc->regs->cmd = snandc->qspi->cmd;
1215 snandc->regs->cfg0 = cpu_to_le32(cfg0);
1216 snandc->regs->cfg1 = cpu_to_le32(cfg1);
1217 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1218 snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
1219 snandc->regs->exec = cpu_to_le32(1);
1220
1221 /* calculate the data and oob size for the last codeword/step */
1222 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
1223 oob_size = snandc->qspi->mtd->oobavail;
1224
1225 memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
1226 /* override new oob content to last codeword */
1227 mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
1228 oob_buf, 0, snandc->qspi->mtd->oobavail);
1229 qcom_spi_config_page_write(snandc);
1230 qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
1231 qcom_spi_config_cw_write(snandc);
1232
1233 ret = qcom_submit_descs(snandc);
1234 if (ret) {
1235 dev_err(snandc->dev, "failure to write oob\n");
1236 return ret;
1237 }
1238
1239 return 0;
1240 }
1241
qcom_spi_program_execute(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1242 static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
1243 const struct spi_mem_op *op)
1244 {
1245 if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
1246 return qcom_spi_program_raw(snandc, op);
1247
1248 if (snandc->qspi->page_rw)
1249 return qcom_spi_program_ecc(snandc, op);
1250
1251 if (snandc->qspi->oob_rw)
1252 return qcom_spi_program_oob(snandc, op);
1253
1254 return 0;
1255 }
1256
qcom_spi_cmd_mapping(struct qcom_nand_controller * snandc,u32 opcode,u32 * cmd)1257 static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
1258 {
1259 switch (opcode) {
1260 case SPINAND_RESET:
1261 *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
1262 break;
1263 case SPINAND_READID:
1264 *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
1265 break;
1266 case SPINAND_GET_FEATURE:
1267 *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
1268 break;
1269 case SPINAND_SET_FEATURE:
1270 *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
1271 QPIC_SET_FEATURE);
1272 break;
1273 case SPINAND_READ:
1274 if (snandc->qspi->raw_rw) {
1275 *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
1276 SPI_WP | SPI_HOLD | OP_PAGE_READ);
1277 } else {
1278 *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
1279 SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
1280 }
1281
1282 break;
1283 case SPINAND_ERASE:
1284 *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
1285 SPI_HOLD | SPI_TRANSFER_MODE_x1;
1286 break;
1287 case SPINAND_WRITE_EN:
1288 *cmd = SPINAND_WRITE_EN;
1289 break;
1290 case SPINAND_PROGRAM_EXECUTE:
1291 *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
1292 SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
1293 break;
1294 case SPINAND_PROGRAM_LOAD:
1295 *cmd = SPINAND_PROGRAM_LOAD;
1296 break;
1297 default:
1298 dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
1299 return -EOPNOTSUPP;
1300 }
1301
1302 return 0;
1303 }
1304
qcom_spi_write_page(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1305 static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
1306 const struct spi_mem_op *op)
1307 {
1308 int ret;
1309 u32 cmd;
1310
1311 ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
1312 if (ret < 0)
1313 return ret;
1314
1315 if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
1316 snandc->qspi->data_buf = (u8 *)op->data.buf.out;
1317
1318 return 0;
1319 }
1320
qcom_spi_send_cmdaddr(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1321 static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
1322 const struct spi_mem_op *op)
1323 {
1324 u32 cmd;
1325 int ret, opcode;
1326
1327 ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
1328 if (ret < 0)
1329 return ret;
1330
1331 opcode = op->cmd.opcode;
1332
1333 switch (opcode) {
1334 case SPINAND_WRITE_EN:
1335 return 0;
1336 case SPINAND_PROGRAM_EXECUTE:
1337 snandc->qspi->addr1 = cpu_to_le32(op->addr.val << 16);
1338 snandc->qspi->addr2 = cpu_to_le32(op->addr.val >> 16 & 0xff);
1339 snandc->qspi->cmd = cpu_to_le32(cmd);
1340 return qcom_spi_program_execute(snandc, op);
1341 case SPINAND_READ:
1342 snandc->qspi->addr1 = cpu_to_le32(op->addr.val << 16);
1343 snandc->qspi->addr2 = cpu_to_le32(op->addr.val >> 16 & 0xff);
1344 snandc->qspi->cmd = cpu_to_le32(cmd);
1345 return 0;
1346 case SPINAND_ERASE:
1347 snandc->qspi->addr1 = cpu_to_le32(op->addr.val << 16);
1348 snandc->qspi->addr2 = cpu_to_le32(op->addr.val >> 16 & 0xffff);
1349 snandc->qspi->cmd = cpu_to_le32(cmd);
1350 return qcom_spi_block_erase(snandc);
1351 default:
1352 break;
1353 }
1354
1355 snandc->buf_count = 0;
1356 snandc->buf_start = 0;
1357 qcom_clear_read_regs(snandc);
1358 qcom_clear_bam_transaction(snandc);
1359
1360 snandc->regs->cmd = cpu_to_le32(cmd);
1361 snandc->regs->exec = cpu_to_le32(1);
1362 snandc->regs->addr0 = cpu_to_le32(op->addr.val);
1363 snandc->regs->addr1 = cpu_to_le32(0);
1364
1365 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1366 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1367
1368 ret = qcom_submit_descs(snandc);
1369 if (ret)
1370 dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
1371
1372 return ret;
1373 }
1374
qcom_spi_io_op(struct qcom_nand_controller * snandc,const struct spi_mem_op * op)1375 static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
1376 {
1377 int ret, val, opcode;
1378 bool copy = false, copy_ftr = false;
1379
1380 ret = qcom_spi_send_cmdaddr(snandc, op);
1381 if (ret)
1382 return ret;
1383
1384 snandc->buf_count = 0;
1385 snandc->buf_start = 0;
1386 qcom_clear_read_regs(snandc);
1387 qcom_clear_bam_transaction(snandc);
1388 opcode = op->cmd.opcode;
1389
1390 switch (opcode) {
1391 case SPINAND_READID:
1392 snandc->buf_count = 4;
1393 qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1394 copy = true;
1395 break;
1396 case SPINAND_GET_FEATURE:
1397 snandc->buf_count = 4;
1398 qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
1399 copy_ftr = true;
1400 break;
1401 case SPINAND_SET_FEATURE:
1402 snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
1403 qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
1404 NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
1405 break;
1406 case SPINAND_PROGRAM_EXECUTE:
1407 case SPINAND_WRITE_EN:
1408 case SPINAND_RESET:
1409 case SPINAND_ERASE:
1410 case SPINAND_READ:
1411 return 0;
1412 default:
1413 return -EOPNOTSUPP;
1414 }
1415
1416 ret = qcom_submit_descs(snandc);
1417 if (ret) {
1418 dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
1419 return ret;
1420 }
1421
1422 if (copy) {
1423 qcom_nandc_dev_to_mem(snandc, true);
1424 memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
1425 }
1426
1427 if (copy_ftr) {
1428 qcom_nandc_dev_to_mem(snandc, true);
1429 val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
1430 val >>= 8;
1431 memcpy(op->data.buf.in, &val, snandc->buf_count);
1432 }
1433
1434 return 0;
1435 }
1436
qcom_spi_is_page_op(const struct spi_mem_op * op)1437 static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
1438 {
1439 if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
1440 return false;
1441
1442 if (op->data.dir == SPI_MEM_DATA_IN) {
1443 if (op->addr.buswidth == 4 && op->data.buswidth == 4)
1444 return true;
1445
1446 if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
1447 return true;
1448
1449 } else if (op->data.dir == SPI_MEM_DATA_OUT) {
1450 if (op->data.buswidth == 4)
1451 return true;
1452 if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
1453 return true;
1454 }
1455
1456 return false;
1457 }
1458
qcom_spi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)1459 static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
1460 {
1461 if (!spi_mem_default_supports_op(mem, op))
1462 return false;
1463
1464 if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
1465 return false;
1466
1467 if (qcom_spi_is_page_op(op))
1468 return true;
1469
1470 return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
1471 (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
1472 (!op->data.nbytes || op->data.buswidth == 1));
1473 }
1474
qcom_spi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)1475 static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1476 {
1477 struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
1478
1479 dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
1480 op->addr.val, op->addr.buswidth, op->addr.nbytes,
1481 op->data.buswidth, op->data.nbytes);
1482
1483 if (qcom_spi_is_page_op(op)) {
1484 if (op->data.dir == SPI_MEM_DATA_IN)
1485 return qcom_spi_read_page(snandc, op);
1486 if (op->data.dir == SPI_MEM_DATA_OUT)
1487 return qcom_spi_write_page(snandc, op);
1488 } else {
1489 return qcom_spi_io_op(snandc, op);
1490 }
1491
1492 return 0;
1493 }
1494
1495 static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
1496 .supports_op = qcom_spi_supports_op,
1497 .exec_op = qcom_spi_exec_op,
1498 };
1499
1500 static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
1501 .ecc = true,
1502 };
1503
qcom_spi_probe(struct platform_device * pdev)1504 static int qcom_spi_probe(struct platform_device *pdev)
1505 {
1506 struct device *dev = &pdev->dev;
1507 struct spi_controller *ctlr;
1508 struct qcom_nand_controller *snandc;
1509 struct qpic_spi_nand *qspi;
1510 struct qpic_ecc *ecc;
1511 struct resource *res;
1512 const void *dev_data;
1513 int ret;
1514
1515 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
1516 if (!ecc)
1517 return -ENOMEM;
1518
1519 qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
1520 if (!qspi)
1521 return -ENOMEM;
1522
1523 ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
1524 if (!ctlr)
1525 return -ENOMEM;
1526
1527 platform_set_drvdata(pdev, ctlr);
1528
1529 snandc = spi_controller_get_devdata(ctlr);
1530 qspi->snandc = snandc;
1531
1532 snandc->dev = dev;
1533 snandc->qspi = qspi;
1534 snandc->qspi->ctlr = ctlr;
1535 snandc->qspi->ecc = ecc;
1536
1537 dev_data = of_device_get_match_data(dev);
1538 if (!dev_data) {
1539 dev_err(&pdev->dev, "failed to get device data\n");
1540 return -ENODEV;
1541 }
1542
1543 snandc->props = dev_data;
1544 snandc->dev = &pdev->dev;
1545
1546 snandc->core_clk = devm_clk_get(dev, "core");
1547 if (IS_ERR(snandc->core_clk))
1548 return PTR_ERR(snandc->core_clk);
1549
1550 snandc->aon_clk = devm_clk_get(dev, "aon");
1551 if (IS_ERR(snandc->aon_clk))
1552 return PTR_ERR(snandc->aon_clk);
1553
1554 snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
1555 if (IS_ERR(snandc->qspi->iomacro_clk))
1556 return PTR_ERR(snandc->qspi->iomacro_clk);
1557
1558 snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1559 if (IS_ERR(snandc->base))
1560 return PTR_ERR(snandc->base);
1561
1562 snandc->base_phys = res->start;
1563 snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
1564 DMA_BIDIRECTIONAL, 0);
1565 if (dma_mapping_error(dev, snandc->base_dma))
1566 return -ENXIO;
1567
1568 ret = clk_prepare_enable(snandc->core_clk);
1569 if (ret)
1570 goto err_dis_core_clk;
1571
1572 ret = clk_prepare_enable(snandc->aon_clk);
1573 if (ret)
1574 goto err_dis_aon_clk;
1575
1576 ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
1577 if (ret)
1578 goto err_dis_iom_clk;
1579
1580 ret = qcom_nandc_alloc(snandc);
1581 if (ret)
1582 goto err_snand_alloc;
1583
1584 ret = qcom_spi_init(snandc);
1585 if (ret)
1586 goto err_spi_init;
1587
1588 /* setup ECC engine */
1589 snandc->qspi->ecc_eng.dev = &pdev->dev;
1590 snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1591 snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
1592 snandc->qspi->ecc_eng.priv = snandc;
1593
1594 ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
1595 if (ret) {
1596 dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
1597 goto err_spi_init;
1598 }
1599
1600 ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
1601 ctlr->mem_ops = &qcom_spi_mem_ops;
1602 ctlr->mem_caps = &qcom_spi_mem_caps;
1603 ctlr->dev.of_node = pdev->dev.of_node;
1604 ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
1605 SPI_TX_QUAD | SPI_RX_QUAD;
1606
1607 ret = spi_register_controller(ctlr);
1608 if (ret) {
1609 dev_err(&pdev->dev, "spi_register_controller failed.\n");
1610 goto err_spi_init;
1611 }
1612
1613 return 0;
1614
1615 err_spi_init:
1616 qcom_nandc_unalloc(snandc);
1617 err_snand_alloc:
1618 clk_disable_unprepare(snandc->qspi->iomacro_clk);
1619 err_dis_iom_clk:
1620 clk_disable_unprepare(snandc->aon_clk);
1621 err_dis_aon_clk:
1622 clk_disable_unprepare(snandc->core_clk);
1623 err_dis_core_clk:
1624 dma_unmap_resource(dev, res->start, resource_size(res),
1625 DMA_BIDIRECTIONAL, 0);
1626 return ret;
1627 }
1628
qcom_spi_remove(struct platform_device * pdev)1629 static void qcom_spi_remove(struct platform_device *pdev)
1630 {
1631 struct spi_controller *ctlr = platform_get_drvdata(pdev);
1632 struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
1633 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1634
1635 spi_unregister_controller(ctlr);
1636
1637 qcom_nandc_unalloc(snandc);
1638
1639 clk_disable_unprepare(snandc->aon_clk);
1640 clk_disable_unprepare(snandc->core_clk);
1641 clk_disable_unprepare(snandc->qspi->iomacro_clk);
1642
1643 dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
1644 DMA_BIDIRECTIONAL, 0);
1645 }
1646
1647 static const struct qcom_nandc_props ipq9574_snandc_props = {
1648 .dev_cmd_reg_start = 0x7000,
1649 .bam_offset = 0x30000,
1650 .supports_bam = true,
1651 };
1652
1653 static const struct of_device_id qcom_snandc_of_match[] = {
1654 {
1655 .compatible = "qcom,ipq9574-snand",
1656 .data = &ipq9574_snandc_props,
1657 },
1658 {}
1659 };
1660 MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
1661
1662 static struct platform_driver qcom_spi_driver = {
1663 .driver = {
1664 .name = "qcom_snand",
1665 .of_match_table = qcom_snandc_of_match,
1666 },
1667 .probe = qcom_spi_probe,
1668 .remove = qcom_spi_remove,
1669 };
1670 module_platform_driver(qcom_spi_driver);
1671
1672 MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
1673 MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
1674 MODULE_LICENSE("GPL");
1675
1676