1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4  * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5  *
6  * Copyright (C) 2005, Intec Automation Inc.
7  * Copyright (C) 2014, Freescale Semiconductor, Inc.
8  */
9 
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/math64.h>
16 #include <linux/module.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/spi-nor.h>
19 #include <linux/mutex.h>
20 #include <linux/of.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/spi/flash.h>
26 
27 #include "core.h"
28 
29 /* Define max times to check status register before we give up. */
30 
31 /*
32  * For everything but full-chip erase; probably could be much smaller, but kept
33  * around for safety for now
34  */
35 #define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
36 
37 /*
38  * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
39  * for larger flash
40  */
41 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
42 
43 #define SPI_NOR_MAX_ADDR_NBYTES	4
44 
45 #define SPI_NOR_SRST_SLEEP_MIN 200
46 #define SPI_NOR_SRST_SLEEP_MAX 400
47 
48 /**
49  * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
50  *			   extension type.
51  * @nor:		pointer to a 'struct spi_nor'
52  * @op:			pointer to the 'struct spi_mem_op' whose properties
53  *			need to be initialized.
54  *
55  * Right now, only "repeat" and "invert" are supported.
56  *
57  * Return: The opcode extension.
58  */
spi_nor_get_cmd_ext(const struct spi_nor * nor,const struct spi_mem_op * op)59 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
60 			      const struct spi_mem_op *op)
61 {
62 	switch (nor->cmd_ext_type) {
63 	case SPI_NOR_EXT_INVERT:
64 		return ~op->cmd.opcode;
65 
66 	case SPI_NOR_EXT_REPEAT:
67 		return op->cmd.opcode;
68 
69 	default:
70 		dev_err(nor->dev, "Unknown command extension type\n");
71 		return 0;
72 	}
73 }
74 
75 /**
76  * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
77  * @nor:		pointer to a 'struct spi_nor'
78  * @op:			pointer to the 'struct spi_mem_op' whose properties
79  *			need to be initialized.
80  * @proto:		the protocol from which the properties need to be set.
81  */
spi_nor_spimem_setup_op(const struct spi_nor * nor,struct spi_mem_op * op,const enum spi_nor_protocol proto)82 void spi_nor_spimem_setup_op(const struct spi_nor *nor,
83 			     struct spi_mem_op *op,
84 			     const enum spi_nor_protocol proto)
85 {
86 	u8 ext;
87 
88 	op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
89 
90 	if (op->addr.nbytes)
91 		op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
92 
93 	if (op->dummy.nbytes)
94 		op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
95 
96 	if (op->data.nbytes)
97 		op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
98 
99 	if (spi_nor_protocol_is_dtr(proto)) {
100 		/*
101 		 * SPIMEM supports mixed DTR modes, but right now we can only
102 		 * have all phases either DTR or STR. IOW, SPIMEM can have
103 		 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
104 		 * phases to either DTR or STR.
105 		 */
106 		op->cmd.dtr = true;
107 		op->addr.dtr = true;
108 		op->dummy.dtr = true;
109 		op->data.dtr = true;
110 
111 		/* 2 bytes per clock cycle in DTR mode. */
112 		op->dummy.nbytes *= 2;
113 
114 		ext = spi_nor_get_cmd_ext(nor, op);
115 		op->cmd.opcode = (op->cmd.opcode << 8) | ext;
116 		op->cmd.nbytes = 2;
117 	}
118 
119 	if (proto == SNOR_PROTO_8_8_8_DTR && nor->flags & SNOR_F_SWAP16)
120 		op->data.swap16 = true;
121 }
122 
123 /**
124  * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
125  *                           transfer
126  * @nor:        pointer to 'struct spi_nor'
127  * @op:         pointer to 'struct spi_mem_op' template for transfer
128  *
129  * If we have to use the bounce buffer, the data field in @op will be updated.
130  *
131  * Return: true if the bounce buffer is needed, false if not
132  */
spi_nor_spimem_bounce(struct spi_nor * nor,struct spi_mem_op * op)133 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
134 {
135 	/* op->data.buf.in occupies the same memory as op->data.buf.out */
136 	if (object_is_on_stack(op->data.buf.in) ||
137 	    !virt_addr_valid(op->data.buf.in)) {
138 		if (op->data.nbytes > nor->bouncebuf_size)
139 			op->data.nbytes = nor->bouncebuf_size;
140 		op->data.buf.in = nor->bouncebuf;
141 		return true;
142 	}
143 
144 	return false;
145 }
146 
147 /**
148  * spi_nor_spimem_exec_op() - execute a memory operation
149  * @nor:        pointer to 'struct spi_nor'
150  * @op:         pointer to 'struct spi_mem_op' template for transfer
151  *
152  * Return: 0 on success, -error otherwise.
153  */
spi_nor_spimem_exec_op(struct spi_nor * nor,struct spi_mem_op * op)154 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
155 {
156 	int error;
157 
158 	error = spi_mem_adjust_op_size(nor->spimem, op);
159 	if (error)
160 		return error;
161 
162 	return spi_mem_exec_op(nor->spimem, op);
163 }
164 
spi_nor_controller_ops_read_reg(struct spi_nor * nor,u8 opcode,u8 * buf,size_t len)165 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
166 				    u8 *buf, size_t len)
167 {
168 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
169 		return -EOPNOTSUPP;
170 
171 	return nor->controller_ops->read_reg(nor, opcode, buf, len);
172 }
173 
spi_nor_controller_ops_write_reg(struct spi_nor * nor,u8 opcode,const u8 * buf,size_t len)174 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
175 				     const u8 *buf, size_t len)
176 {
177 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
178 		return -EOPNOTSUPP;
179 
180 	return nor->controller_ops->write_reg(nor, opcode, buf, len);
181 }
182 
spi_nor_controller_ops_erase(struct spi_nor * nor,loff_t offs)183 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
184 {
185 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
186 		return -EOPNOTSUPP;
187 
188 	return nor->controller_ops->erase(nor, offs);
189 }
190 
191 /**
192  * spi_nor_spimem_read_data() - read data from flash's memory region via
193  *                              spi-mem
194  * @nor:        pointer to 'struct spi_nor'
195  * @from:       offset to read from
196  * @len:        number of bytes to read
197  * @buf:        pointer to dst buffer
198  *
199  * Return: number of bytes read successfully, -errno otherwise
200  */
spi_nor_spimem_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)201 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
202 					size_t len, u8 *buf)
203 {
204 	struct spi_mem_op op =
205 		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
206 			   SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
207 			   SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
208 			   SPI_MEM_OP_DATA_IN(len, buf, 0));
209 	bool usebouncebuf;
210 	ssize_t nbytes;
211 	int error;
212 
213 	spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
214 
215 	/* convert the dummy cycles to the number of bytes */
216 	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
217 	if (spi_nor_protocol_is_dtr(nor->read_proto))
218 		op.dummy.nbytes *= 2;
219 
220 	usebouncebuf = spi_nor_spimem_bounce(nor, &op);
221 
222 	if (nor->dirmap.rdesc) {
223 		nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
224 					     op.data.nbytes, op.data.buf.in);
225 	} else {
226 		error = spi_nor_spimem_exec_op(nor, &op);
227 		if (error)
228 			return error;
229 		nbytes = op.data.nbytes;
230 	}
231 
232 	if (usebouncebuf && nbytes > 0)
233 		memcpy(buf, op.data.buf.in, nbytes);
234 
235 	return nbytes;
236 }
237 
238 /**
239  * spi_nor_read_data() - read data from flash memory
240  * @nor:        pointer to 'struct spi_nor'
241  * @from:       offset to read from
242  * @len:        number of bytes to read
243  * @buf:        pointer to dst buffer
244  *
245  * Return: number of bytes read successfully, -errno otherwise
246  */
spi_nor_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)247 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
248 {
249 	if (nor->spimem)
250 		return spi_nor_spimem_read_data(nor, from, len, buf);
251 
252 	return nor->controller_ops->read(nor, from, len, buf);
253 }
254 
255 /**
256  * spi_nor_spimem_write_data() - write data to flash memory via
257  *                               spi-mem
258  * @nor:        pointer to 'struct spi_nor'
259  * @to:         offset to write to
260  * @len:        number of bytes to write
261  * @buf:        pointer to src buffer
262  *
263  * Return: number of bytes written successfully, -errno otherwise
264  */
spi_nor_spimem_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)265 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
266 					 size_t len, const u8 *buf)
267 {
268 	struct spi_mem_op op =
269 		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
270 			   SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
271 			   SPI_MEM_OP_NO_DUMMY,
272 			   SPI_MEM_OP_DATA_OUT(len, buf, 0));
273 	ssize_t nbytes;
274 	int error;
275 
276 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
277 		op.addr.nbytes = 0;
278 
279 	spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
280 
281 	if (spi_nor_spimem_bounce(nor, &op))
282 		memcpy(nor->bouncebuf, buf, op.data.nbytes);
283 
284 	if (nor->dirmap.wdesc) {
285 		nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
286 					      op.data.nbytes, op.data.buf.out);
287 	} else {
288 		error = spi_nor_spimem_exec_op(nor, &op);
289 		if (error)
290 			return error;
291 		nbytes = op.data.nbytes;
292 	}
293 
294 	return nbytes;
295 }
296 
297 /**
298  * spi_nor_write_data() - write data to flash memory
299  * @nor:        pointer to 'struct spi_nor'
300  * @to:         offset to write to
301  * @len:        number of bytes to write
302  * @buf:        pointer to src buffer
303  *
304  * Return: number of bytes written successfully, -errno otherwise
305  */
spi_nor_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)306 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
307 			   const u8 *buf)
308 {
309 	if (nor->spimem)
310 		return spi_nor_spimem_write_data(nor, to, len, buf);
311 
312 	return nor->controller_ops->write(nor, to, len, buf);
313 }
314 
315 /**
316  * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
317  * volatile.
318  * @nor:        pointer to 'struct spi_nor'.
319  * @op:		SPI memory operation. op->data.buf must be DMA-able.
320  * @proto:	SPI protocol to use for the register operation.
321  *
322  * Return: zero on success, -errno otherwise
323  */
spi_nor_read_any_reg(struct spi_nor * nor,struct spi_mem_op * op,enum spi_nor_protocol proto)324 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
325 			 enum spi_nor_protocol proto)
326 {
327 	if (!nor->spimem)
328 		return -EOPNOTSUPP;
329 
330 	spi_nor_spimem_setup_op(nor, op, proto);
331 	return spi_nor_spimem_exec_op(nor, op);
332 }
333 
334 /**
335  * spi_nor_write_any_volatile_reg() - write any volatile register to flash
336  * memory.
337  * @nor:        pointer to 'struct spi_nor'
338  * @op:		SPI memory operation. op->data.buf must be DMA-able.
339  * @proto:	SPI protocol to use for the register operation.
340  *
341  * Writing volatile registers are instant according to some manufacturers
342  * (Cypress, Micron) and do not need any status polling.
343  *
344  * Return: zero on success, -errno otherwise
345  */
spi_nor_write_any_volatile_reg(struct spi_nor * nor,struct spi_mem_op * op,enum spi_nor_protocol proto)346 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
347 				   enum spi_nor_protocol proto)
348 {
349 	int ret;
350 
351 	if (!nor->spimem)
352 		return -EOPNOTSUPP;
353 
354 	ret = spi_nor_write_enable(nor);
355 	if (ret)
356 		return ret;
357 	spi_nor_spimem_setup_op(nor, op, proto);
358 	return spi_nor_spimem_exec_op(nor, op);
359 }
360 
361 /**
362  * spi_nor_write_enable() - Set write enable latch with Write Enable command.
363  * @nor:	pointer to 'struct spi_nor'.
364  *
365  * Return: 0 on success, -errno otherwise.
366  */
spi_nor_write_enable(struct spi_nor * nor)367 int spi_nor_write_enable(struct spi_nor *nor)
368 {
369 	int ret;
370 
371 	if (nor->spimem) {
372 		struct spi_mem_op op = SPI_NOR_WREN_OP;
373 
374 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
375 
376 		ret = spi_mem_exec_op(nor->spimem, &op);
377 	} else {
378 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
379 						       NULL, 0);
380 	}
381 
382 	if (ret)
383 		dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
384 
385 	return ret;
386 }
387 
388 /**
389  * spi_nor_write_disable() - Send Write Disable instruction to the chip.
390  * @nor:	pointer to 'struct spi_nor'.
391  *
392  * Return: 0 on success, -errno otherwise.
393  */
spi_nor_write_disable(struct spi_nor * nor)394 int spi_nor_write_disable(struct spi_nor *nor)
395 {
396 	int ret;
397 
398 	if (nor->spimem) {
399 		struct spi_mem_op op = SPI_NOR_WRDI_OP;
400 
401 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
402 
403 		ret = spi_mem_exec_op(nor->spimem, &op);
404 	} else {
405 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
406 						       NULL, 0);
407 	}
408 
409 	if (ret)
410 		dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
411 
412 	return ret;
413 }
414 
415 /**
416  * spi_nor_read_id() - Read the JEDEC ID.
417  * @nor:	pointer to 'struct spi_nor'.
418  * @naddr:	number of address bytes to send. Can be zero if the operation
419  *		does not need to send an address.
420  * @ndummy:	number of dummy bytes to send after an opcode or address. Can
421  *		be zero if the operation does not require dummy bytes.
422  * @id:		pointer to a DMA-able buffer where the value of the JEDEC ID
423  *		will be written.
424  * @proto:	the SPI protocol for register operation.
425  *
426  * Return: 0 on success, -errno otherwise.
427  */
spi_nor_read_id(struct spi_nor * nor,u8 naddr,u8 ndummy,u8 * id,enum spi_nor_protocol proto)428 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
429 		    enum spi_nor_protocol proto)
430 {
431 	int ret;
432 
433 	if (nor->spimem) {
434 		struct spi_mem_op op =
435 			SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
436 
437 		spi_nor_spimem_setup_op(nor, &op, proto);
438 		ret = spi_mem_exec_op(nor->spimem, &op);
439 	} else {
440 		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
441 						    SPI_NOR_MAX_ID_LEN);
442 	}
443 	return ret;
444 }
445 
446 /**
447  * spi_nor_read_sr() - Read the Status Register.
448  * @nor:	pointer to 'struct spi_nor'.
449  * @sr:		pointer to a DMA-able buffer where the value of the
450  *              Status Register will be written. Should be at least 2 bytes.
451  *
452  * Return: 0 on success, -errno otherwise.
453  */
spi_nor_read_sr(struct spi_nor * nor,u8 * sr)454 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
455 {
456 	int ret;
457 
458 	if (nor->spimem) {
459 		struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
460 
461 		if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
462 			op.addr.nbytes = nor->params->rdsr_addr_nbytes;
463 			op.dummy.nbytes = nor->params->rdsr_dummy;
464 			/*
465 			 * We don't want to read only one byte in DTR mode. So,
466 			 * read 2 and then discard the second byte.
467 			 */
468 			op.data.nbytes = 2;
469 		}
470 
471 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
472 
473 		ret = spi_mem_exec_op(nor->spimem, &op);
474 	} else {
475 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
476 						      1);
477 	}
478 
479 	if (ret)
480 		dev_dbg(nor->dev, "error %d reading SR\n", ret);
481 
482 	return ret;
483 }
484 
485 /**
486  * spi_nor_read_cr() - Read the Configuration Register using the
487  * SPINOR_OP_RDCR (35h) command.
488  * @nor:	pointer to 'struct spi_nor'
489  * @cr:		pointer to a DMA-able buffer where the value of the
490  *              Configuration Register will be written.
491  *
492  * Return: 0 on success, -errno otherwise.
493  */
spi_nor_read_cr(struct spi_nor * nor,u8 * cr)494 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
495 {
496 	int ret;
497 
498 	if (nor->spimem) {
499 		struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
500 
501 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
502 
503 		ret = spi_mem_exec_op(nor->spimem, &op);
504 	} else {
505 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
506 						      1);
507 	}
508 
509 	if (ret)
510 		dev_dbg(nor->dev, "error %d reading CR\n", ret);
511 
512 	return ret;
513 }
514 
515 /**
516  * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
517  *			using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
518  *			Winbond and Macronix.
519  * @nor:	pointer to 'struct spi_nor'.
520  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
521  *		address mode.
522  *
523  * Return: 0 on success, -errno otherwise.
524  */
spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor * nor,bool enable)525 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable)
526 {
527 	int ret;
528 
529 	if (nor->spimem) {
530 		struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
531 
532 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
533 
534 		ret = spi_mem_exec_op(nor->spimem, &op);
535 	} else {
536 		ret = spi_nor_controller_ops_write_reg(nor,
537 						       enable ? SPINOR_OP_EN4B :
538 								SPINOR_OP_EX4B,
539 						       NULL, 0);
540 	}
541 
542 	if (ret)
543 		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
544 
545 	return ret;
546 }
547 
548 /**
549  * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
550  * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
551  * by ST and Micron flashes.
552  * @nor:	pointer to 'struct spi_nor'.
553  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
554  *		address mode.
555  *
556  * Return: 0 on success, -errno otherwise.
557  */
spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor * nor,bool enable)558 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable)
559 {
560 	int ret;
561 
562 	ret = spi_nor_write_enable(nor);
563 	if (ret)
564 		return ret;
565 
566 	ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
567 	if (ret)
568 		return ret;
569 
570 	return spi_nor_write_disable(nor);
571 }
572 
573 /**
574  * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
575  *			SPINOR_OP_BRWR. Typically used by Spansion flashes.
576  * @nor:	pointer to 'struct spi_nor'.
577  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
578  *		address mode.
579  *
580  * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
581  * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
582  * address mode is active and A[30:24] bits are don’t care. Write instruction is
583  * SPINOR_OP_BRWR(17h) with 1 byte of data.
584  *
585  * Return: 0 on success, -errno otherwise.
586  */
spi_nor_set_4byte_addr_mode_brwr(struct spi_nor * nor,bool enable)587 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable)
588 {
589 	int ret;
590 
591 	nor->bouncebuf[0] = enable << 7;
592 
593 	if (nor->spimem) {
594 		struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
595 
596 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
597 
598 		ret = spi_mem_exec_op(nor->spimem, &op);
599 	} else {
600 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
601 						       nor->bouncebuf, 1);
602 	}
603 
604 	if (ret)
605 		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
606 
607 	return ret;
608 }
609 
610 /**
611  * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
612  * for new commands.
613  * @nor:	pointer to 'struct spi_nor'.
614  *
615  * Return: 1 if ready, 0 if not ready, -errno on errors.
616  */
spi_nor_sr_ready(struct spi_nor * nor)617 int spi_nor_sr_ready(struct spi_nor *nor)
618 {
619 	int ret;
620 
621 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
622 	if (ret)
623 		return ret;
624 
625 	return !(nor->bouncebuf[0] & SR_WIP);
626 }
627 
628 /**
629  * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
630  * @nor:	pointer to 'struct spi_nor'.
631  *
632  * Return: true if parallel locking is enabled, false otherwise.
633  */
spi_nor_use_parallel_locking(struct spi_nor * nor)634 static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
635 {
636 	return nor->flags & SNOR_F_RWW;
637 }
638 
639 /* Locking helpers for status read operations */
spi_nor_rww_start_rdst(struct spi_nor * nor)640 static int spi_nor_rww_start_rdst(struct spi_nor *nor)
641 {
642 	struct spi_nor_rww *rww = &nor->rww;
643 
644 	guard(mutex)(&nor->lock);
645 
646 	if (rww->ongoing_io || rww->ongoing_rd)
647 		return -EAGAIN;
648 
649 	rww->ongoing_io = true;
650 	rww->ongoing_rd = true;
651 
652 	return 0;
653 }
654 
spi_nor_rww_end_rdst(struct spi_nor * nor)655 static void spi_nor_rww_end_rdst(struct spi_nor *nor)
656 {
657 	struct spi_nor_rww *rww = &nor->rww;
658 
659 	guard(mutex)(&nor->lock);
660 
661 	rww->ongoing_io = false;
662 	rww->ongoing_rd = false;
663 }
664 
spi_nor_lock_rdst(struct spi_nor * nor)665 static int spi_nor_lock_rdst(struct spi_nor *nor)
666 {
667 	if (spi_nor_use_parallel_locking(nor))
668 		return spi_nor_rww_start_rdst(nor);
669 
670 	return 0;
671 }
672 
spi_nor_unlock_rdst(struct spi_nor * nor)673 static void spi_nor_unlock_rdst(struct spi_nor *nor)
674 {
675 	if (spi_nor_use_parallel_locking(nor)) {
676 		spi_nor_rww_end_rdst(nor);
677 		wake_up(&nor->rww.wait);
678 	}
679 }
680 
681 /**
682  * spi_nor_ready() - Query the flash to see if it is ready for new commands.
683  * @nor:	pointer to 'struct spi_nor'.
684  *
685  * Return: 1 if ready, 0 if not ready, -errno on errors.
686  */
spi_nor_ready(struct spi_nor * nor)687 static int spi_nor_ready(struct spi_nor *nor)
688 {
689 	int ret;
690 
691 	ret = spi_nor_lock_rdst(nor);
692 	if (ret)
693 		return 0;
694 
695 	/* Flashes might override the standard routine. */
696 	if (nor->params->ready)
697 		ret = nor->params->ready(nor);
698 	else
699 		ret = spi_nor_sr_ready(nor);
700 
701 	spi_nor_unlock_rdst(nor);
702 
703 	return ret;
704 }
705 
706 /**
707  * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
708  * Status Register until ready, or timeout occurs.
709  * @nor:		pointer to "struct spi_nor".
710  * @timeout_jiffies:	jiffies to wait until timeout.
711  *
712  * Return: 0 on success, -errno otherwise.
713  */
spi_nor_wait_till_ready_with_timeout(struct spi_nor * nor,unsigned long timeout_jiffies)714 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
715 						unsigned long timeout_jiffies)
716 {
717 	unsigned long deadline;
718 	int timeout = 0, ret;
719 
720 	deadline = jiffies + timeout_jiffies;
721 
722 	while (!timeout) {
723 		if (time_after_eq(jiffies, deadline))
724 			timeout = 1;
725 
726 		ret = spi_nor_ready(nor);
727 		if (ret < 0)
728 			return ret;
729 		if (ret)
730 			return 0;
731 
732 		cond_resched();
733 	}
734 
735 	dev_dbg(nor->dev, "flash operation timed out\n");
736 
737 	return -ETIMEDOUT;
738 }
739 
740 /**
741  * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
742  * flash to be ready, or timeout occurs.
743  * @nor:	pointer to "struct spi_nor".
744  *
745  * Return: 0 on success, -errno otherwise.
746  */
spi_nor_wait_till_ready(struct spi_nor * nor)747 int spi_nor_wait_till_ready(struct spi_nor *nor)
748 {
749 	return spi_nor_wait_till_ready_with_timeout(nor,
750 						    DEFAULT_READY_WAIT_JIFFIES);
751 }
752 
753 /**
754  * spi_nor_global_block_unlock() - Unlock Global Block Protection.
755  * @nor:	pointer to 'struct spi_nor'.
756  *
757  * Return: 0 on success, -errno otherwise.
758  */
spi_nor_global_block_unlock(struct spi_nor * nor)759 int spi_nor_global_block_unlock(struct spi_nor *nor)
760 {
761 	int ret;
762 
763 	ret = spi_nor_write_enable(nor);
764 	if (ret)
765 		return ret;
766 
767 	if (nor->spimem) {
768 		struct spi_mem_op op = SPI_NOR_GBULK_OP;
769 
770 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
771 
772 		ret = spi_mem_exec_op(nor->spimem, &op);
773 	} else {
774 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
775 						       NULL, 0);
776 	}
777 
778 	if (ret) {
779 		dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
780 		return ret;
781 	}
782 
783 	return spi_nor_wait_till_ready(nor);
784 }
785 
786 /**
787  * spi_nor_write_sr() - Write the Status Register.
788  * @nor:	pointer to 'struct spi_nor'.
789  * @sr:		pointer to DMA-able buffer to write to the Status Register.
790  * @len:	number of bytes to write to the Status Register.
791  *
792  * Return: 0 on success, -errno otherwise.
793  */
spi_nor_write_sr(struct spi_nor * nor,const u8 * sr,size_t len)794 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
795 {
796 	int ret;
797 
798 	ret = spi_nor_write_enable(nor);
799 	if (ret)
800 		return ret;
801 
802 	if (nor->spimem) {
803 		struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
804 
805 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
806 
807 		ret = spi_mem_exec_op(nor->spimem, &op);
808 	} else {
809 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
810 						       len);
811 	}
812 
813 	if (ret) {
814 		dev_dbg(nor->dev, "error %d writing SR\n", ret);
815 		return ret;
816 	}
817 
818 	return spi_nor_wait_till_ready(nor);
819 }
820 
821 /**
822  * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
823  * ensure that the byte written match the received value.
824  * @nor:	pointer to a 'struct spi_nor'.
825  * @sr1:	byte value to be written to the Status Register.
826  *
827  * Return: 0 on success, -errno otherwise.
828  */
spi_nor_write_sr1_and_check(struct spi_nor * nor,u8 sr1)829 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
830 {
831 	int ret;
832 
833 	nor->bouncebuf[0] = sr1;
834 
835 	ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
836 	if (ret)
837 		return ret;
838 
839 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
840 	if (ret)
841 		return ret;
842 
843 	if (nor->bouncebuf[0] != sr1) {
844 		dev_dbg(nor->dev, "SR1: read back test failed\n");
845 		return -EIO;
846 	}
847 
848 	return 0;
849 }
850 
851 /**
852  * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
853  * Status Register 2 in one shot. Ensure that the byte written in the Status
854  * Register 1 match the received value, and that the 16-bit Write did not
855  * affect what was already in the Status Register 2.
856  * @nor:	pointer to a 'struct spi_nor'.
857  * @sr1:	byte value to be written to the Status Register 1.
858  *
859  * Return: 0 on success, -errno otherwise.
860  */
spi_nor_write_16bit_sr_and_check(struct spi_nor * nor,u8 sr1)861 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
862 {
863 	int ret;
864 	u8 *sr_cr = nor->bouncebuf;
865 	u8 cr_written;
866 
867 	/* Make sure we don't overwrite the contents of Status Register 2. */
868 	if (!(nor->flags & SNOR_F_NO_READ_CR)) {
869 		ret = spi_nor_read_cr(nor, &sr_cr[1]);
870 		if (ret)
871 			return ret;
872 	} else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
873 		   spi_nor_get_protocol_width(nor->write_proto) == 4 &&
874 		   nor->params->quad_enable) {
875 		/*
876 		 * If the Status Register 2 Read command (35h) is not
877 		 * supported, we should at least be sure we don't
878 		 * change the value of the SR2 Quad Enable bit.
879 		 *
880 		 * When the Quad Enable method is set and the buswidth is 4, we
881 		 * can safely assume that the value of the QE bit is one, as a
882 		 * consequence of the nor->params->quad_enable() call.
883 		 *
884 		 * According to the JESD216 revB standard, BFPT DWORDS[15],
885 		 * bits 22:20, the 16-bit Write Status (01h) command is
886 		 * available just for the cases in which the QE bit is
887 		 * described in SR2 at BIT(1).
888 		 */
889 		sr_cr[1] = SR2_QUAD_EN_BIT1;
890 	} else {
891 		sr_cr[1] = 0;
892 	}
893 
894 	sr_cr[0] = sr1;
895 
896 	ret = spi_nor_write_sr(nor, sr_cr, 2);
897 	if (ret)
898 		return ret;
899 
900 	ret = spi_nor_read_sr(nor, sr_cr);
901 	if (ret)
902 		return ret;
903 
904 	if (sr1 != sr_cr[0]) {
905 		dev_dbg(nor->dev, "SR: Read back test failed\n");
906 		return -EIO;
907 	}
908 
909 	if (nor->flags & SNOR_F_NO_READ_CR)
910 		return 0;
911 
912 	cr_written = sr_cr[1];
913 
914 	ret = spi_nor_read_cr(nor, &sr_cr[1]);
915 	if (ret)
916 		return ret;
917 
918 	if (cr_written != sr_cr[1]) {
919 		dev_dbg(nor->dev, "CR: read back test failed\n");
920 		return -EIO;
921 	}
922 
923 	return 0;
924 }
925 
926 /**
927  * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
928  * Configuration Register in one shot. Ensure that the byte written in the
929  * Configuration Register match the received value, and that the 16-bit Write
930  * did not affect what was already in the Status Register 1.
931  * @nor:	pointer to a 'struct spi_nor'.
932  * @cr:		byte value to be written to the Configuration Register.
933  *
934  * Return: 0 on success, -errno otherwise.
935  */
spi_nor_write_16bit_cr_and_check(struct spi_nor * nor,u8 cr)936 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
937 {
938 	int ret;
939 	u8 *sr_cr = nor->bouncebuf;
940 	u8 sr_written;
941 
942 	/* Keep the current value of the Status Register 1. */
943 	ret = spi_nor_read_sr(nor, sr_cr);
944 	if (ret)
945 		return ret;
946 
947 	sr_cr[1] = cr;
948 
949 	ret = spi_nor_write_sr(nor, sr_cr, 2);
950 	if (ret)
951 		return ret;
952 
953 	sr_written = sr_cr[0];
954 
955 	ret = spi_nor_read_sr(nor, sr_cr);
956 	if (ret)
957 		return ret;
958 
959 	if (sr_written != sr_cr[0]) {
960 		dev_dbg(nor->dev, "SR: Read back test failed\n");
961 		return -EIO;
962 	}
963 
964 	if (nor->flags & SNOR_F_NO_READ_CR)
965 		return 0;
966 
967 	ret = spi_nor_read_cr(nor, &sr_cr[1]);
968 	if (ret)
969 		return ret;
970 
971 	if (cr != sr_cr[1]) {
972 		dev_dbg(nor->dev, "CR: read back test failed\n");
973 		return -EIO;
974 	}
975 
976 	return 0;
977 }
978 
979 /**
980  * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
981  * the byte written match the received value without affecting other bits in the
982  * Status Register 1 and 2.
983  * @nor:	pointer to a 'struct spi_nor'.
984  * @sr1:	byte value to be written to the Status Register.
985  *
986  * Return: 0 on success, -errno otherwise.
987  */
spi_nor_write_sr_and_check(struct spi_nor * nor,u8 sr1)988 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
989 {
990 	if (nor->flags & SNOR_F_HAS_16BIT_SR)
991 		return spi_nor_write_16bit_sr_and_check(nor, sr1);
992 
993 	return spi_nor_write_sr1_and_check(nor, sr1);
994 }
995 
996 /**
997  * spi_nor_write_sr2() - Write the Status Register 2 using the
998  * SPINOR_OP_WRSR2 (3eh) command.
999  * @nor:	pointer to 'struct spi_nor'.
1000  * @sr2:	pointer to DMA-able buffer to write to the Status Register 2.
1001  *
1002  * Return: 0 on success, -errno otherwise.
1003  */
spi_nor_write_sr2(struct spi_nor * nor,const u8 * sr2)1004 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1005 {
1006 	int ret;
1007 
1008 	ret = spi_nor_write_enable(nor);
1009 	if (ret)
1010 		return ret;
1011 
1012 	if (nor->spimem) {
1013 		struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
1014 
1015 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1016 
1017 		ret = spi_mem_exec_op(nor->spimem, &op);
1018 	} else {
1019 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
1020 						       sr2, 1);
1021 	}
1022 
1023 	if (ret) {
1024 		dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1025 		return ret;
1026 	}
1027 
1028 	return spi_nor_wait_till_ready(nor);
1029 }
1030 
1031 /**
1032  * spi_nor_read_sr2() - Read the Status Register 2 using the
1033  * SPINOR_OP_RDSR2 (3fh) command.
1034  * @nor:	pointer to 'struct spi_nor'.
1035  * @sr2:	pointer to DMA-able buffer where the value of the
1036  *		Status Register 2 will be written.
1037  *
1038  * Return: 0 on success, -errno otherwise.
1039  */
spi_nor_read_sr2(struct spi_nor * nor,u8 * sr2)1040 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1041 {
1042 	int ret;
1043 
1044 	if (nor->spimem) {
1045 		struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
1046 
1047 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1048 
1049 		ret = spi_mem_exec_op(nor->spimem, &op);
1050 	} else {
1051 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
1052 						      1);
1053 	}
1054 
1055 	if (ret)
1056 		dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1057 
1058 	return ret;
1059 }
1060 
1061 /**
1062  * spi_nor_erase_die() - Erase the entire die.
1063  * @nor:	pointer to 'struct spi_nor'.
1064  * @addr:	address of the die.
1065  * @die_size:	size of the die.
1066  *
1067  * Return: 0 on success, -errno otherwise.
1068  */
spi_nor_erase_die(struct spi_nor * nor,loff_t addr,size_t die_size)1069 static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size)
1070 {
1071 	bool multi_die = nor->mtd.size != die_size;
1072 	int ret;
1073 
1074 	dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10));
1075 
1076 	if (nor->spimem) {
1077 		struct spi_mem_op op =
1078 			SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode,
1079 					     nor->addr_nbytes, addr, multi_die);
1080 
1081 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1082 
1083 		ret = spi_mem_exec_op(nor->spimem, &op);
1084 	} else {
1085 		if (multi_die)
1086 			return -EOPNOTSUPP;
1087 
1088 		ret = spi_nor_controller_ops_write_reg(nor,
1089 						       SPINOR_OP_CHIP_ERASE,
1090 						       NULL, 0);
1091 	}
1092 
1093 	if (ret)
1094 		dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1095 
1096 	return ret;
1097 }
1098 
spi_nor_convert_opcode(u8 opcode,const u8 table[][2],size_t size)1099 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1100 {
1101 	size_t i;
1102 
1103 	for (i = 0; i < size; i++)
1104 		if (table[i][0] == opcode)
1105 			return table[i][1];
1106 
1107 	/* No conversion found, keep input op code. */
1108 	return opcode;
1109 }
1110 
spi_nor_convert_3to4_read(u8 opcode)1111 u8 spi_nor_convert_3to4_read(u8 opcode)
1112 {
1113 	static const u8 spi_nor_3to4_read[][2] = {
1114 		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
1115 		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
1116 		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
1117 		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
1118 		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
1119 		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
1120 		{ SPINOR_OP_READ_1_1_8,	SPINOR_OP_READ_1_1_8_4B },
1121 		{ SPINOR_OP_READ_1_8_8,	SPINOR_OP_READ_1_8_8_4B },
1122 
1123 		{ SPINOR_OP_READ_1_1_1_DTR,	SPINOR_OP_READ_1_1_1_DTR_4B },
1124 		{ SPINOR_OP_READ_1_2_2_DTR,	SPINOR_OP_READ_1_2_2_DTR_4B },
1125 		{ SPINOR_OP_READ_1_4_4_DTR,	SPINOR_OP_READ_1_4_4_DTR_4B },
1126 	};
1127 
1128 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1129 				      ARRAY_SIZE(spi_nor_3to4_read));
1130 }
1131 
spi_nor_convert_3to4_program(u8 opcode)1132 static u8 spi_nor_convert_3to4_program(u8 opcode)
1133 {
1134 	static const u8 spi_nor_3to4_program[][2] = {
1135 		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
1136 		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
1137 		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
1138 		{ SPINOR_OP_PP_1_1_8,	SPINOR_OP_PP_1_1_8_4B },
1139 		{ SPINOR_OP_PP_1_8_8,	SPINOR_OP_PP_1_8_8_4B },
1140 	};
1141 
1142 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1143 				      ARRAY_SIZE(spi_nor_3to4_program));
1144 }
1145 
spi_nor_convert_3to4_erase(u8 opcode)1146 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1147 {
1148 	static const u8 spi_nor_3to4_erase[][2] = {
1149 		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
1150 		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
1151 		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
1152 	};
1153 
1154 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1155 				      ARRAY_SIZE(spi_nor_3to4_erase));
1156 }
1157 
spi_nor_has_uniform_erase(const struct spi_nor * nor)1158 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1159 {
1160 	return !!nor->params->erase_map.uniform_region.erase_mask;
1161 }
1162 
spi_nor_set_4byte_opcodes(struct spi_nor * nor)1163 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1164 {
1165 	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1166 	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1167 	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1168 
1169 	if (!spi_nor_has_uniform_erase(nor)) {
1170 		struct spi_nor_erase_map *map = &nor->params->erase_map;
1171 		struct spi_nor_erase_type *erase;
1172 		int i;
1173 
1174 		for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1175 			erase = &map->erase_type[i];
1176 			erase->opcode =
1177 				spi_nor_convert_3to4_erase(erase->opcode);
1178 		}
1179 	}
1180 }
1181 
spi_nor_prep(struct spi_nor * nor)1182 static int spi_nor_prep(struct spi_nor *nor)
1183 {
1184 	int ret = 0;
1185 
1186 	if (nor->controller_ops && nor->controller_ops->prepare)
1187 		ret = nor->controller_ops->prepare(nor);
1188 
1189 	return ret;
1190 }
1191 
spi_nor_unprep(struct spi_nor * nor)1192 static void spi_nor_unprep(struct spi_nor *nor)
1193 {
1194 	if (nor->controller_ops && nor->controller_ops->unprepare)
1195 		nor->controller_ops->unprepare(nor);
1196 }
1197 
spi_nor_offset_to_banks(u64 bank_size,loff_t start,size_t len,u8 * first,u8 * last)1198 static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
1199 				    u8 *first, u8 *last)
1200 {
1201 	/* This is currently safe, the number of banks being very small */
1202 	*first = DIV_ROUND_DOWN_ULL(start, bank_size);
1203 	*last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size);
1204 }
1205 
1206 /* Generic helpers for internal locking and serialization */
spi_nor_rww_start_io(struct spi_nor * nor)1207 static bool spi_nor_rww_start_io(struct spi_nor *nor)
1208 {
1209 	struct spi_nor_rww *rww = &nor->rww;
1210 
1211 	guard(mutex)(&nor->lock);
1212 
1213 	if (rww->ongoing_io)
1214 		return false;
1215 
1216 	rww->ongoing_io = true;
1217 
1218 	return true;
1219 }
1220 
spi_nor_rww_end_io(struct spi_nor * nor)1221 static void spi_nor_rww_end_io(struct spi_nor *nor)
1222 {
1223 	guard(mutex)(&nor->lock);
1224 	nor->rww.ongoing_io = false;
1225 }
1226 
spi_nor_lock_device(struct spi_nor * nor)1227 static int spi_nor_lock_device(struct spi_nor *nor)
1228 {
1229 	if (!spi_nor_use_parallel_locking(nor))
1230 		return 0;
1231 
1232 	return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor));
1233 }
1234 
spi_nor_unlock_device(struct spi_nor * nor)1235 static void spi_nor_unlock_device(struct spi_nor *nor)
1236 {
1237 	if (spi_nor_use_parallel_locking(nor)) {
1238 		spi_nor_rww_end_io(nor);
1239 		wake_up(&nor->rww.wait);
1240 	}
1241 }
1242 
1243 /* Generic helpers for internal locking and serialization */
spi_nor_rww_start_exclusive(struct spi_nor * nor)1244 static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
1245 {
1246 	struct spi_nor_rww *rww = &nor->rww;
1247 
1248 	mutex_lock(&nor->lock);
1249 
1250 	if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1251 		return false;
1252 
1253 	rww->ongoing_io = true;
1254 	rww->ongoing_rd = true;
1255 	rww->ongoing_pe = true;
1256 
1257 	return true;
1258 }
1259 
spi_nor_rww_end_exclusive(struct spi_nor * nor)1260 static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
1261 {
1262 	struct spi_nor_rww *rww = &nor->rww;
1263 
1264 	guard(mutex)(&nor->lock);
1265 	rww->ongoing_io = false;
1266 	rww->ongoing_rd = false;
1267 	rww->ongoing_pe = false;
1268 }
1269 
spi_nor_prep_and_lock(struct spi_nor * nor)1270 int spi_nor_prep_and_lock(struct spi_nor *nor)
1271 {
1272 	int ret;
1273 
1274 	ret = spi_nor_prep(nor);
1275 	if (ret)
1276 		return ret;
1277 
1278 	if (!spi_nor_use_parallel_locking(nor))
1279 		mutex_lock(&nor->lock);
1280 	else
1281 		ret = wait_event_killable(nor->rww.wait,
1282 					  spi_nor_rww_start_exclusive(nor));
1283 
1284 	return ret;
1285 }
1286 
spi_nor_unlock_and_unprep(struct spi_nor * nor)1287 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1288 {
1289 	if (!spi_nor_use_parallel_locking(nor)) {
1290 		mutex_unlock(&nor->lock);
1291 	} else {
1292 		spi_nor_rww_end_exclusive(nor);
1293 		wake_up(&nor->rww.wait);
1294 	}
1295 
1296 	spi_nor_unprep(nor);
1297 }
1298 
1299 /* Internal locking helpers for program and erase operations */
spi_nor_rww_start_pe(struct spi_nor * nor,loff_t start,size_t len)1300 static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
1301 {
1302 	struct spi_nor_rww *rww = &nor->rww;
1303 	unsigned int used_banks = 0;
1304 	u8 first, last;
1305 	int bank;
1306 
1307 	guard(mutex)(&nor->lock);
1308 
1309 	if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1310 		return false;
1311 
1312 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1313 	for (bank = first; bank <= last; bank++) {
1314 		if (rww->used_banks & BIT(bank))
1315 			return false;
1316 
1317 		used_banks |= BIT(bank);
1318 	}
1319 
1320 	rww->used_banks |= used_banks;
1321 	rww->ongoing_pe = true;
1322 
1323 	return true;
1324 }
1325 
spi_nor_rww_end_pe(struct spi_nor * nor,loff_t start,size_t len)1326 static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
1327 {
1328 	struct spi_nor_rww *rww = &nor->rww;
1329 	u8 first, last;
1330 	int bank;
1331 
1332 	guard(mutex)(&nor->lock);
1333 
1334 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1335 	for (bank = first; bank <= last; bank++)
1336 		rww->used_banks &= ~BIT(bank);
1337 
1338 	rww->ongoing_pe = false;
1339 }
1340 
spi_nor_prep_and_lock_pe(struct spi_nor * nor,loff_t start,size_t len)1341 static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
1342 {
1343 	int ret;
1344 
1345 	ret = spi_nor_prep(nor);
1346 	if (ret)
1347 		return ret;
1348 
1349 	if (!spi_nor_use_parallel_locking(nor))
1350 		mutex_lock(&nor->lock);
1351 	else
1352 		ret = wait_event_killable(nor->rww.wait,
1353 					  spi_nor_rww_start_pe(nor, start, len));
1354 
1355 	return ret;
1356 }
1357 
spi_nor_unlock_and_unprep_pe(struct spi_nor * nor,loff_t start,size_t len)1358 static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
1359 {
1360 	if (!spi_nor_use_parallel_locking(nor)) {
1361 		mutex_unlock(&nor->lock);
1362 	} else {
1363 		spi_nor_rww_end_pe(nor, start, len);
1364 		wake_up(&nor->rww.wait);
1365 	}
1366 
1367 	spi_nor_unprep(nor);
1368 }
1369 
1370 /* Internal locking helpers for read operations */
spi_nor_rww_start_rd(struct spi_nor * nor,loff_t start,size_t len)1371 static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
1372 {
1373 	struct spi_nor_rww *rww = &nor->rww;
1374 	unsigned int used_banks = 0;
1375 	u8 first, last;
1376 	int bank;
1377 
1378 	guard(mutex)(&nor->lock);
1379 
1380 	if (rww->ongoing_io || rww->ongoing_rd)
1381 		return false;
1382 
1383 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1384 	for (bank = first; bank <= last; bank++) {
1385 		if (rww->used_banks & BIT(bank))
1386 			return false;
1387 
1388 		used_banks |= BIT(bank);
1389 	}
1390 
1391 	rww->used_banks |= used_banks;
1392 	rww->ongoing_io = true;
1393 	rww->ongoing_rd = true;
1394 
1395 	return true;
1396 }
1397 
spi_nor_rww_end_rd(struct spi_nor * nor,loff_t start,size_t len)1398 static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
1399 {
1400 	struct spi_nor_rww *rww = &nor->rww;
1401 	u8 first, last;
1402 	int bank;
1403 
1404 	guard(mutex)(&nor->lock);
1405 
1406 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1407 	for (bank = first; bank <= last; bank++)
1408 		nor->rww.used_banks &= ~BIT(bank);
1409 
1410 	rww->ongoing_io = false;
1411 	rww->ongoing_rd = false;
1412 }
1413 
spi_nor_prep_and_lock_rd(struct spi_nor * nor,loff_t start,size_t len)1414 static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
1415 {
1416 	int ret;
1417 
1418 	ret = spi_nor_prep(nor);
1419 	if (ret)
1420 		return ret;
1421 
1422 	if (!spi_nor_use_parallel_locking(nor))
1423 		mutex_lock(&nor->lock);
1424 	else
1425 		ret = wait_event_killable(nor->rww.wait,
1426 					  spi_nor_rww_start_rd(nor, start, len));
1427 
1428 	return ret;
1429 }
1430 
spi_nor_unlock_and_unprep_rd(struct spi_nor * nor,loff_t start,size_t len)1431 static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
1432 {
1433 	if (!spi_nor_use_parallel_locking(nor)) {
1434 		mutex_unlock(&nor->lock);
1435 	} else {
1436 		spi_nor_rww_end_rd(nor, start, len);
1437 		wake_up(&nor->rww.wait);
1438 	}
1439 
1440 	spi_nor_unprep(nor);
1441 }
1442 
1443 /*
1444  * Initiate the erasure of a single sector
1445  */
spi_nor_erase_sector(struct spi_nor * nor,u32 addr)1446 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1447 {
1448 	int i;
1449 
1450 	if (nor->spimem) {
1451 		struct spi_mem_op op =
1452 			SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
1453 						nor->addr_nbytes, addr);
1454 
1455 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1456 
1457 		return spi_mem_exec_op(nor->spimem, &op);
1458 	} else if (nor->controller_ops->erase) {
1459 		return spi_nor_controller_ops_erase(nor, addr);
1460 	}
1461 
1462 	/*
1463 	 * Default implementation, if driver doesn't have a specialized HW
1464 	 * control
1465 	 */
1466 	for (i = nor->addr_nbytes - 1; i >= 0; i--) {
1467 		nor->bouncebuf[i] = addr & 0xff;
1468 		addr >>= 8;
1469 	}
1470 
1471 	return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1472 						nor->bouncebuf, nor->addr_nbytes);
1473 }
1474 
1475 /**
1476  * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1477  * @erase:	pointer to a structure that describes a SPI NOR erase type
1478  * @dividend:	dividend value
1479  * @remainder:	pointer to u32 remainder (will be updated)
1480  *
1481  * Return: the result of the division
1482  */
spi_nor_div_by_erase_size(const struct spi_nor_erase_type * erase,u64 dividend,u32 * remainder)1483 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1484 				     u64 dividend, u32 *remainder)
1485 {
1486 	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1487 	*remainder = (u32)dividend & erase->size_mask;
1488 	return dividend >> erase->size_shift;
1489 }
1490 
1491 /**
1492  * spi_nor_find_best_erase_type() - find the best erase type for the given
1493  *				    offset in the serial flash memory and the
1494  *				    number of bytes to erase. The region in
1495  *				    which the address fits is expected to be
1496  *				    provided.
1497  * @map:	the erase map of the SPI NOR
1498  * @region:	pointer to a structure that describes a SPI NOR erase region
1499  * @addr:	offset in the serial flash memory
1500  * @len:	number of bytes to erase
1501  *
1502  * Return: a pointer to the best fitted erase type, NULL otherwise.
1503  */
1504 static const struct spi_nor_erase_type *
spi_nor_find_best_erase_type(const struct spi_nor_erase_map * map,const struct spi_nor_erase_region * region,u64 addr,u32 len)1505 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1506 			     const struct spi_nor_erase_region *region,
1507 			     u64 addr, u32 len)
1508 {
1509 	const struct spi_nor_erase_type *erase;
1510 	u32 rem;
1511 	int i;
1512 
1513 	/*
1514 	 * Erase types are ordered by size, with the smallest erase type at
1515 	 * index 0.
1516 	 */
1517 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1518 		/* Does the erase region support the tested erase type? */
1519 		if (!(region->erase_mask & BIT(i)))
1520 			continue;
1521 
1522 		erase = &map->erase_type[i];
1523 		if (!erase->size)
1524 			continue;
1525 
1526 		/* Alignment is not mandatory for overlaid regions */
1527 		if (region->overlaid && region->size <= len)
1528 			return erase;
1529 
1530 		/* Don't erase more than what the user has asked for. */
1531 		if (erase->size > len)
1532 			continue;
1533 
1534 		spi_nor_div_by_erase_size(erase, addr, &rem);
1535 		if (!rem)
1536 			return erase;
1537 	}
1538 
1539 	return NULL;
1540 }
1541 
1542 /**
1543  * spi_nor_init_erase_cmd() - initialize an erase command
1544  * @region:	pointer to a structure that describes a SPI NOR erase region
1545  * @erase:	pointer to a structure that describes a SPI NOR erase type
1546  *
1547  * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1548  *	   otherwise.
1549  */
1550 static struct spi_nor_erase_command *
spi_nor_init_erase_cmd(const struct spi_nor_erase_region * region,const struct spi_nor_erase_type * erase)1551 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1552 		       const struct spi_nor_erase_type *erase)
1553 {
1554 	struct spi_nor_erase_command *cmd;
1555 
1556 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1557 	if (!cmd)
1558 		return ERR_PTR(-ENOMEM);
1559 
1560 	INIT_LIST_HEAD(&cmd->list);
1561 	cmd->opcode = erase->opcode;
1562 	cmd->count = 1;
1563 
1564 	if (region->overlaid)
1565 		cmd->size = region->size;
1566 	else
1567 		cmd->size = erase->size;
1568 
1569 	return cmd;
1570 }
1571 
1572 /**
1573  * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1574  * @erase_list:	list of erase commands
1575  */
spi_nor_destroy_erase_cmd_list(struct list_head * erase_list)1576 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1577 {
1578 	struct spi_nor_erase_command *cmd, *next;
1579 
1580 	list_for_each_entry_safe(cmd, next, erase_list, list) {
1581 		list_del(&cmd->list);
1582 		kfree(cmd);
1583 	}
1584 }
1585 
1586 /**
1587  * spi_nor_init_erase_cmd_list() - initialize erase command list
1588  * @nor:	pointer to a 'struct spi_nor'
1589  * @erase_list:	list of erase commands to be executed once we validate that the
1590  *		erase can be performed
1591  * @addr:	offset in the serial flash memory
1592  * @len:	number of bytes to erase
1593  *
1594  * Builds the list of best fitted erase commands and verifies if the erase can
1595  * be performed.
1596  *
1597  * Return: 0 on success, -errno otherwise.
1598  */
spi_nor_init_erase_cmd_list(struct spi_nor * nor,struct list_head * erase_list,u64 addr,u32 len)1599 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1600 				       struct list_head *erase_list,
1601 				       u64 addr, u32 len)
1602 {
1603 	const struct spi_nor_erase_map *map = &nor->params->erase_map;
1604 	const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1605 	struct spi_nor_erase_region *region;
1606 	struct spi_nor_erase_command *cmd = NULL;
1607 	u64 region_end;
1608 	unsigned int i;
1609 	int ret = -EINVAL;
1610 
1611 	for (i = 0; i < map->n_regions && len; i++) {
1612 		region = &map->regions[i];
1613 		region_end = region->offset + region->size;
1614 
1615 		while (len && addr >= region->offset && addr < region_end) {
1616 			erase = spi_nor_find_best_erase_type(map, region, addr,
1617 							     len);
1618 			if (!erase)
1619 				goto destroy_erase_cmd_list;
1620 
1621 			if (prev_erase != erase || erase->size != cmd->size ||
1622 			    region->overlaid) {
1623 				cmd = spi_nor_init_erase_cmd(region, erase);
1624 				if (IS_ERR(cmd)) {
1625 					ret = PTR_ERR(cmd);
1626 					goto destroy_erase_cmd_list;
1627 				}
1628 
1629 				list_add_tail(&cmd->list, erase_list);
1630 			} else {
1631 				cmd->count++;
1632 			}
1633 
1634 			len -= cmd->size;
1635 			addr += cmd->size;
1636 			prev_erase = erase;
1637 		}
1638 	}
1639 
1640 	return 0;
1641 
1642 destroy_erase_cmd_list:
1643 	spi_nor_destroy_erase_cmd_list(erase_list);
1644 	return ret;
1645 }
1646 
1647 /**
1648  * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1649  * @nor:	pointer to a 'struct spi_nor'
1650  * @addr:	offset in the serial flash memory
1651  * @len:	number of bytes to erase
1652  *
1653  * Build a list of best fitted erase commands and execute it once we validate
1654  * that the erase can be performed.
1655  *
1656  * Return: 0 on success, -errno otherwise.
1657  */
spi_nor_erase_multi_sectors(struct spi_nor * nor,u64 addr,u32 len)1658 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1659 {
1660 	LIST_HEAD(erase_list);
1661 	struct spi_nor_erase_command *cmd, *next;
1662 	int ret;
1663 
1664 	ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1665 	if (ret)
1666 		return ret;
1667 
1668 	list_for_each_entry_safe(cmd, next, &erase_list, list) {
1669 		nor->erase_opcode = cmd->opcode;
1670 		while (cmd->count) {
1671 			dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1672 				 cmd->size, cmd->opcode, cmd->count);
1673 
1674 			ret = spi_nor_lock_device(nor);
1675 			if (ret)
1676 				goto destroy_erase_cmd_list;
1677 
1678 			ret = spi_nor_write_enable(nor);
1679 			if (ret) {
1680 				spi_nor_unlock_device(nor);
1681 				goto destroy_erase_cmd_list;
1682 			}
1683 
1684 			ret = spi_nor_erase_sector(nor, addr);
1685 			spi_nor_unlock_device(nor);
1686 			if (ret)
1687 				goto destroy_erase_cmd_list;
1688 
1689 			ret = spi_nor_wait_till_ready(nor);
1690 			if (ret)
1691 				goto destroy_erase_cmd_list;
1692 
1693 			addr += cmd->size;
1694 			cmd->count--;
1695 		}
1696 		list_del(&cmd->list);
1697 		kfree(cmd);
1698 	}
1699 
1700 	return 0;
1701 
1702 destroy_erase_cmd_list:
1703 	spi_nor_destroy_erase_cmd_list(&erase_list);
1704 	return ret;
1705 }
1706 
spi_nor_erase_dice(struct spi_nor * nor,loff_t addr,size_t len,size_t die_size)1707 static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr,
1708 			      size_t len, size_t die_size)
1709 {
1710 	unsigned long timeout;
1711 	int ret;
1712 
1713 	/*
1714 	 * Scale the timeout linearly with the size of the flash, with
1715 	 * a minimum calibrated to an old 2MB flash. We could try to
1716 	 * pull these from CFI/SFDP, but these values should be good
1717 	 * enough for now.
1718 	 */
1719 	timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1720 		      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1721 		      (unsigned long)(nor->mtd.size / SZ_2M));
1722 
1723 	do {
1724 		ret = spi_nor_lock_device(nor);
1725 		if (ret)
1726 			return ret;
1727 
1728 		ret = spi_nor_write_enable(nor);
1729 		if (ret) {
1730 			spi_nor_unlock_device(nor);
1731 			return ret;
1732 		}
1733 
1734 		ret = spi_nor_erase_die(nor, addr, die_size);
1735 
1736 		spi_nor_unlock_device(nor);
1737 		if (ret)
1738 			return ret;
1739 
1740 		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1741 		if (ret)
1742 			return ret;
1743 
1744 		addr += die_size;
1745 		len -= die_size;
1746 
1747 	} while (len);
1748 
1749 	return 0;
1750 }
1751 
1752 /*
1753  * Erase an address range on the nor chip.  The address range may extend
1754  * one or more erase sectors. Return an error if there is a problem erasing.
1755  */
spi_nor_erase(struct mtd_info * mtd,struct erase_info * instr)1756 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1757 {
1758 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1759 	u8 n_dice = nor->params->n_dice;
1760 	bool multi_die_erase = false;
1761 	u32 addr, len, rem;
1762 	size_t die_size;
1763 	int ret;
1764 
1765 	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1766 			(long long)instr->len);
1767 
1768 	if (spi_nor_has_uniform_erase(nor)) {
1769 		div_u64_rem(instr->len, mtd->erasesize, &rem);
1770 		if (rem)
1771 			return -EINVAL;
1772 	}
1773 
1774 	addr = instr->addr;
1775 	len = instr->len;
1776 
1777 	if (n_dice) {
1778 		die_size = div_u64(mtd->size, n_dice);
1779 		if (!(len & (die_size - 1)) && !(addr & (die_size - 1)))
1780 			multi_die_erase = true;
1781 	} else {
1782 		die_size = mtd->size;
1783 	}
1784 
1785 	ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
1786 	if (ret)
1787 		return ret;
1788 
1789 	/* chip (die) erase? */
1790 	if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) ||
1791 	    multi_die_erase) {
1792 		ret = spi_nor_erase_dice(nor, addr, len, die_size);
1793 		if (ret)
1794 			goto erase_err;
1795 
1796 	/* REVISIT in some cases we could speed up erasing large regions
1797 	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1798 	 * to use "small sector erase", but that's not always optimal.
1799 	 */
1800 
1801 	/* "sector"-at-a-time erase */
1802 	} else if (spi_nor_has_uniform_erase(nor)) {
1803 		while (len) {
1804 			ret = spi_nor_lock_device(nor);
1805 			if (ret)
1806 				goto erase_err;
1807 
1808 			ret = spi_nor_write_enable(nor);
1809 			if (ret) {
1810 				spi_nor_unlock_device(nor);
1811 				goto erase_err;
1812 			}
1813 
1814 			ret = spi_nor_erase_sector(nor, addr);
1815 			spi_nor_unlock_device(nor);
1816 			if (ret)
1817 				goto erase_err;
1818 
1819 			ret = spi_nor_wait_till_ready(nor);
1820 			if (ret)
1821 				goto erase_err;
1822 
1823 			addr += mtd->erasesize;
1824 			len -= mtd->erasesize;
1825 		}
1826 
1827 	/* erase multiple sectors */
1828 	} else {
1829 		ret = spi_nor_erase_multi_sectors(nor, addr, len);
1830 		if (ret)
1831 			goto erase_err;
1832 	}
1833 
1834 	ret = spi_nor_write_disable(nor);
1835 
1836 erase_err:
1837 	spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len);
1838 
1839 	return ret;
1840 }
1841 
1842 /**
1843  * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1844  * Register 1.
1845  * @nor:	pointer to a 'struct spi_nor'
1846  *
1847  * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1848  *
1849  * Return: 0 on success, -errno otherwise.
1850  */
spi_nor_sr1_bit6_quad_enable(struct spi_nor * nor)1851 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1852 {
1853 	int ret;
1854 
1855 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
1856 	if (ret)
1857 		return ret;
1858 
1859 	if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1860 		return 0;
1861 
1862 	nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1863 
1864 	return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1865 }
1866 
1867 /**
1868  * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1869  * Register 2.
1870  * @nor:       pointer to a 'struct spi_nor'.
1871  *
1872  * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1873  *
1874  * Return: 0 on success, -errno otherwise.
1875  */
spi_nor_sr2_bit1_quad_enable(struct spi_nor * nor)1876 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1877 {
1878 	int ret;
1879 
1880 	if (nor->flags & SNOR_F_NO_READ_CR)
1881 		return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1882 
1883 	ret = spi_nor_read_cr(nor, nor->bouncebuf);
1884 	if (ret)
1885 		return ret;
1886 
1887 	if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1888 		return 0;
1889 
1890 	nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1891 
1892 	return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1893 }
1894 
1895 /**
1896  * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1897  * @nor:	pointer to a 'struct spi_nor'
1898  *
1899  * Set the Quad Enable (QE) bit in the Status Register 2.
1900  *
1901  * This is one of the procedures to set the QE bit described in the SFDP
1902  * (JESD216 rev B) specification but no manufacturer using this procedure has
1903  * been identified yet, hence the name of the function.
1904  *
1905  * Return: 0 on success, -errno otherwise.
1906  */
spi_nor_sr2_bit7_quad_enable(struct spi_nor * nor)1907 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1908 {
1909 	u8 *sr2 = nor->bouncebuf;
1910 	int ret;
1911 	u8 sr2_written;
1912 
1913 	/* Check current Quad Enable bit value. */
1914 	ret = spi_nor_read_sr2(nor, sr2);
1915 	if (ret)
1916 		return ret;
1917 	if (*sr2 & SR2_QUAD_EN_BIT7)
1918 		return 0;
1919 
1920 	/* Update the Quad Enable bit. */
1921 	*sr2 |= SR2_QUAD_EN_BIT7;
1922 
1923 	ret = spi_nor_write_sr2(nor, sr2);
1924 	if (ret)
1925 		return ret;
1926 
1927 	sr2_written = *sr2;
1928 
1929 	/* Read back and check it. */
1930 	ret = spi_nor_read_sr2(nor, sr2);
1931 	if (ret)
1932 		return ret;
1933 
1934 	if (*sr2 != sr2_written) {
1935 		dev_dbg(nor->dev, "SR2: Read back test failed\n");
1936 		return -EIO;
1937 	}
1938 
1939 	return 0;
1940 }
1941 
1942 static const struct spi_nor_manufacturer *manufacturers[] = {
1943 	&spi_nor_atmel,
1944 	&spi_nor_eon,
1945 	&spi_nor_esmt,
1946 	&spi_nor_everspin,
1947 	&spi_nor_gigadevice,
1948 	&spi_nor_intel,
1949 	&spi_nor_issi,
1950 	&spi_nor_macronix,
1951 	&spi_nor_micron,
1952 	&spi_nor_st,
1953 	&spi_nor_spansion,
1954 	&spi_nor_sst,
1955 	&spi_nor_winbond,
1956 	&spi_nor_xmc,
1957 };
1958 
1959 static const struct flash_info spi_nor_generic_flash = {
1960 	.name = "spi-nor-generic",
1961 };
1962 
spi_nor_match_id(struct spi_nor * nor,const u8 * id)1963 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
1964 						 const u8 *id)
1965 {
1966 	const struct flash_info *part;
1967 	unsigned int i, j;
1968 
1969 	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
1970 		for (j = 0; j < manufacturers[i]->nparts; j++) {
1971 			part = &manufacturers[i]->parts[j];
1972 			if (part->id &&
1973 			    !memcmp(part->id->bytes, id, part->id->len)) {
1974 				nor->manufacturer = manufacturers[i];
1975 				return part;
1976 			}
1977 		}
1978 	}
1979 
1980 	return NULL;
1981 }
1982 
spi_nor_detect(struct spi_nor * nor)1983 static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
1984 {
1985 	const struct flash_info *info;
1986 	u8 *id = nor->bouncebuf;
1987 	int ret;
1988 
1989 	ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
1990 	if (ret) {
1991 		dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
1992 		return ERR_PTR(ret);
1993 	}
1994 
1995 	/* Cache the complete flash ID. */
1996 	nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
1997 	if (!nor->id)
1998 		return ERR_PTR(-ENOMEM);
1999 
2000 	info = spi_nor_match_id(nor, id);
2001 
2002 	/* Fallback to a generic flash described only by its SFDP data. */
2003 	if (!info) {
2004 		ret = spi_nor_check_sfdp_signature(nor);
2005 		if (!ret)
2006 			info = &spi_nor_generic_flash;
2007 	}
2008 
2009 	if (!info) {
2010 		dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2011 			SPI_NOR_MAX_ID_LEN, id);
2012 		return ERR_PTR(-ENODEV);
2013 	}
2014 	return info;
2015 }
2016 
spi_nor_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2017 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2018 			size_t *retlen, u_char *buf)
2019 {
2020 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2021 	loff_t from_lock = from;
2022 	size_t len_lock = len;
2023 	ssize_t ret;
2024 
2025 	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2026 
2027 	ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock);
2028 	if (ret)
2029 		return ret;
2030 
2031 	while (len) {
2032 		loff_t addr = from;
2033 
2034 		ret = spi_nor_read_data(nor, addr, len, buf);
2035 		if (ret == 0) {
2036 			/* We shouldn't see 0-length reads */
2037 			ret = -EIO;
2038 			goto read_err;
2039 		}
2040 		if (ret < 0)
2041 			goto read_err;
2042 
2043 		WARN_ON(ret > len);
2044 		*retlen += ret;
2045 		buf += ret;
2046 		from += ret;
2047 		len -= ret;
2048 	}
2049 	ret = 0;
2050 
2051 read_err:
2052 	spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock);
2053 
2054 	return ret;
2055 }
2056 
2057 /*
2058  * Write an address range to the nor chip.  Data must be written in
2059  * FLASH_PAGESIZE chunks.  The address range may be any size provided
2060  * it is within the physical boundaries.
2061  */
spi_nor_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2062 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2063 	size_t *retlen, const u_char *buf)
2064 {
2065 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2066 	size_t i;
2067 	ssize_t ret;
2068 	u32 page_size = nor->params->page_size;
2069 
2070 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2071 
2072 	ret = spi_nor_prep_and_lock_pe(nor, to, len);
2073 	if (ret)
2074 		return ret;
2075 
2076 	for (i = 0; i < len; ) {
2077 		ssize_t written;
2078 		loff_t addr = to + i;
2079 		size_t page_offset = addr & (page_size - 1);
2080 		/* the size of data remaining on the first page */
2081 		size_t page_remain = min_t(size_t, page_size - page_offset, len - i);
2082 
2083 		ret = spi_nor_lock_device(nor);
2084 		if (ret)
2085 			goto write_err;
2086 
2087 		ret = spi_nor_write_enable(nor);
2088 		if (ret) {
2089 			spi_nor_unlock_device(nor);
2090 			goto write_err;
2091 		}
2092 
2093 		ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2094 		spi_nor_unlock_device(nor);
2095 		if (ret < 0)
2096 			goto write_err;
2097 		written = ret;
2098 
2099 		ret = spi_nor_wait_till_ready(nor);
2100 		if (ret)
2101 			goto write_err;
2102 		*retlen += written;
2103 		i += written;
2104 	}
2105 
2106 write_err:
2107 	spi_nor_unlock_and_unprep_pe(nor, to, len);
2108 
2109 	return ret;
2110 }
2111 
spi_nor_check(struct spi_nor * nor)2112 static int spi_nor_check(struct spi_nor *nor)
2113 {
2114 	if (!nor->dev ||
2115 	    (!nor->spimem && !nor->controller_ops) ||
2116 	    (!nor->spimem && nor->controller_ops &&
2117 	    (!nor->controller_ops->read ||
2118 	     !nor->controller_ops->write ||
2119 	     !nor->controller_ops->read_reg ||
2120 	     !nor->controller_ops->write_reg))) {
2121 		pr_err("spi-nor: please fill all the necessary fields!\n");
2122 		return -EINVAL;
2123 	}
2124 
2125 	if (nor->spimem && nor->controller_ops) {
2126 		dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2127 		return -EINVAL;
2128 	}
2129 
2130 	return 0;
2131 }
2132 
2133 void
spi_nor_set_read_settings(struct spi_nor_read_command * read,u8 num_mode_clocks,u8 num_wait_states,u8 opcode,enum spi_nor_protocol proto)2134 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2135 			  u8 num_mode_clocks,
2136 			  u8 num_wait_states,
2137 			  u8 opcode,
2138 			  enum spi_nor_protocol proto)
2139 {
2140 	read->num_mode_clocks = num_mode_clocks;
2141 	read->num_wait_states = num_wait_states;
2142 	read->opcode = opcode;
2143 	read->proto = proto;
2144 }
2145 
spi_nor_set_pp_settings(struct spi_nor_pp_command * pp,u8 opcode,enum spi_nor_protocol proto)2146 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2147 			     enum spi_nor_protocol proto)
2148 {
2149 	pp->opcode = opcode;
2150 	pp->proto = proto;
2151 }
2152 
spi_nor_hwcaps2cmd(u32 hwcaps,const int table[][2],size_t size)2153 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2154 {
2155 	size_t i;
2156 
2157 	for (i = 0; i < size; i++)
2158 		if (table[i][0] == (int)hwcaps)
2159 			return table[i][1];
2160 
2161 	return -EINVAL;
2162 }
2163 
spi_nor_hwcaps_read2cmd(u32 hwcaps)2164 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2165 {
2166 	static const int hwcaps_read2cmd[][2] = {
2167 		{ SNOR_HWCAPS_READ,		SNOR_CMD_READ },
2168 		{ SNOR_HWCAPS_READ_FAST,	SNOR_CMD_READ_FAST },
2169 		{ SNOR_HWCAPS_READ_1_1_1_DTR,	SNOR_CMD_READ_1_1_1_DTR },
2170 		{ SNOR_HWCAPS_READ_1_1_2,	SNOR_CMD_READ_1_1_2 },
2171 		{ SNOR_HWCAPS_READ_1_2_2,	SNOR_CMD_READ_1_2_2 },
2172 		{ SNOR_HWCAPS_READ_2_2_2,	SNOR_CMD_READ_2_2_2 },
2173 		{ SNOR_HWCAPS_READ_1_2_2_DTR,	SNOR_CMD_READ_1_2_2_DTR },
2174 		{ SNOR_HWCAPS_READ_1_1_4,	SNOR_CMD_READ_1_1_4 },
2175 		{ SNOR_HWCAPS_READ_1_4_4,	SNOR_CMD_READ_1_4_4 },
2176 		{ SNOR_HWCAPS_READ_4_4_4,	SNOR_CMD_READ_4_4_4 },
2177 		{ SNOR_HWCAPS_READ_1_4_4_DTR,	SNOR_CMD_READ_1_4_4_DTR },
2178 		{ SNOR_HWCAPS_READ_1_1_8,	SNOR_CMD_READ_1_1_8 },
2179 		{ SNOR_HWCAPS_READ_1_8_8,	SNOR_CMD_READ_1_8_8 },
2180 		{ SNOR_HWCAPS_READ_8_8_8,	SNOR_CMD_READ_8_8_8 },
2181 		{ SNOR_HWCAPS_READ_1_8_8_DTR,	SNOR_CMD_READ_1_8_8_DTR },
2182 		{ SNOR_HWCAPS_READ_8_8_8_DTR,	SNOR_CMD_READ_8_8_8_DTR },
2183 	};
2184 
2185 	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2186 				  ARRAY_SIZE(hwcaps_read2cmd));
2187 }
2188 
spi_nor_hwcaps_pp2cmd(u32 hwcaps)2189 int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2190 {
2191 	static const int hwcaps_pp2cmd[][2] = {
2192 		{ SNOR_HWCAPS_PP,		SNOR_CMD_PP },
2193 		{ SNOR_HWCAPS_PP_1_1_4,		SNOR_CMD_PP_1_1_4 },
2194 		{ SNOR_HWCAPS_PP_1_4_4,		SNOR_CMD_PP_1_4_4 },
2195 		{ SNOR_HWCAPS_PP_4_4_4,		SNOR_CMD_PP_4_4_4 },
2196 		{ SNOR_HWCAPS_PP_1_1_8,		SNOR_CMD_PP_1_1_8 },
2197 		{ SNOR_HWCAPS_PP_1_8_8,		SNOR_CMD_PP_1_8_8 },
2198 		{ SNOR_HWCAPS_PP_8_8_8,		SNOR_CMD_PP_8_8_8 },
2199 		{ SNOR_HWCAPS_PP_8_8_8_DTR,	SNOR_CMD_PP_8_8_8_DTR },
2200 	};
2201 
2202 	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2203 				  ARRAY_SIZE(hwcaps_pp2cmd));
2204 }
2205 
2206 /**
2207  * spi_nor_spimem_check_op - check if the operation is supported
2208  *                           by controller
2209  *@nor:        pointer to a 'struct spi_nor'
2210  *@op:         pointer to op template to be checked
2211  *
2212  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2213  */
spi_nor_spimem_check_op(struct spi_nor * nor,struct spi_mem_op * op)2214 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2215 				   struct spi_mem_op *op)
2216 {
2217 	/*
2218 	 * First test with 4 address bytes. The opcode itself might
2219 	 * be a 3B addressing opcode but we don't care, because
2220 	 * SPI controller implementation should not check the opcode,
2221 	 * but just the sequence.
2222 	 */
2223 	op->addr.nbytes = 4;
2224 	if (!spi_mem_supports_op(nor->spimem, op)) {
2225 		if (nor->params->size > SZ_16M)
2226 			return -EOPNOTSUPP;
2227 
2228 		/* If flash size <= 16MB, 3 address bytes are sufficient */
2229 		op->addr.nbytes = 3;
2230 		if (!spi_mem_supports_op(nor->spimem, op))
2231 			return -EOPNOTSUPP;
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 /**
2238  * spi_nor_spimem_check_readop - check if the read op is supported
2239  *                               by controller
2240  *@nor:         pointer to a 'struct spi_nor'
2241  *@read:        pointer to op template to be checked
2242  *
2243  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2244  */
spi_nor_spimem_check_readop(struct spi_nor * nor,const struct spi_nor_read_command * read)2245 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2246 				       const struct spi_nor_read_command *read)
2247 {
2248 	struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
2249 
2250 	spi_nor_spimem_setup_op(nor, &op, read->proto);
2251 
2252 	/* convert the dummy cycles to the number of bytes */
2253 	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2254 			  op.dummy.buswidth / 8;
2255 	if (spi_nor_protocol_is_dtr(nor->read_proto))
2256 		op.dummy.nbytes *= 2;
2257 
2258 	return spi_nor_spimem_check_op(nor, &op);
2259 }
2260 
2261 /**
2262  * spi_nor_spimem_check_pp - check if the page program op is supported
2263  *                           by controller
2264  *@nor:         pointer to a 'struct spi_nor'
2265  *@pp:          pointer to op template to be checked
2266  *
2267  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2268  */
spi_nor_spimem_check_pp(struct spi_nor * nor,const struct spi_nor_pp_command * pp)2269 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2270 				   const struct spi_nor_pp_command *pp)
2271 {
2272 	struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
2273 
2274 	spi_nor_spimem_setup_op(nor, &op, pp->proto);
2275 
2276 	return spi_nor_spimem_check_op(nor, &op);
2277 }
2278 
2279 /**
2280  * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2281  *                                based on SPI controller capabilities
2282  * @nor:        pointer to a 'struct spi_nor'
2283  * @hwcaps:     pointer to resulting capabilities after adjusting
2284  *              according to controller and flash's capability
2285  */
2286 static void
spi_nor_spimem_adjust_hwcaps(struct spi_nor * nor,u32 * hwcaps)2287 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2288 {
2289 	struct spi_nor_flash_parameter *params = nor->params;
2290 	unsigned int cap;
2291 
2292 	/* X-X-X modes are not supported yet, mask them all. */
2293 	*hwcaps &= ~SNOR_HWCAPS_X_X_X;
2294 
2295 	/*
2296 	 * If the reset line is broken, we do not want to enter a stateful
2297 	 * mode.
2298 	 */
2299 	if (nor->flags & SNOR_F_BROKEN_RESET)
2300 		*hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2301 
2302 	for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2303 		int rdidx, ppidx;
2304 
2305 		if (!(*hwcaps & BIT(cap)))
2306 			continue;
2307 
2308 		rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2309 		if (rdidx >= 0 &&
2310 		    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
2311 			*hwcaps &= ~BIT(cap);
2312 
2313 		ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2314 		if (ppidx < 0)
2315 			continue;
2316 
2317 		if (spi_nor_spimem_check_pp(nor,
2318 					    &params->page_programs[ppidx]))
2319 			*hwcaps &= ~BIT(cap);
2320 	}
2321 }
2322 
2323 /**
2324  * spi_nor_set_erase_type() - set a SPI NOR erase type
2325  * @erase:	pointer to a structure that describes a SPI NOR erase type
2326  * @size:	the size of the sector/block erased by the erase type
2327  * @opcode:	the SPI command op code to erase the sector/block
2328  */
spi_nor_set_erase_type(struct spi_nor_erase_type * erase,u32 size,u8 opcode)2329 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2330 			    u8 opcode)
2331 {
2332 	erase->size = size;
2333 	erase->opcode = opcode;
2334 	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2335 	erase->size_shift = ffs(erase->size) - 1;
2336 	erase->size_mask = (1 << erase->size_shift) - 1;
2337 }
2338 
2339 /**
2340  * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2341  * @erase:	pointer to a structure that describes a SPI NOR erase type
2342  */
spi_nor_mask_erase_type(struct spi_nor_erase_type * erase)2343 void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
2344 {
2345 	erase->size = 0;
2346 }
2347 
2348 /**
2349  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2350  * @map:		the erase map of the SPI NOR
2351  * @erase_mask:		bitmask encoding erase types that can erase the entire
2352  *			flash memory
2353  * @flash_size:		the spi nor flash memory size
2354  */
spi_nor_init_uniform_erase_map(struct spi_nor_erase_map * map,u8 erase_mask,u64 flash_size)2355 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2356 				    u8 erase_mask, u64 flash_size)
2357 {
2358 	map->uniform_region.offset = 0;
2359 	map->uniform_region.size = flash_size;
2360 	map->uniform_region.erase_mask = erase_mask;
2361 	map->regions = &map->uniform_region;
2362 	map->n_regions = 1;
2363 }
2364 
spi_nor_post_bfpt_fixups(struct spi_nor * nor,const struct sfdp_parameter_header * bfpt_header,const struct sfdp_bfpt * bfpt)2365 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2366 			     const struct sfdp_parameter_header *bfpt_header,
2367 			     const struct sfdp_bfpt *bfpt)
2368 {
2369 	int ret;
2370 
2371 	if (nor->manufacturer && nor->manufacturer->fixups &&
2372 	    nor->manufacturer->fixups->post_bfpt) {
2373 		ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2374 							   bfpt);
2375 		if (ret)
2376 			return ret;
2377 	}
2378 
2379 	if (nor->info->fixups && nor->info->fixups->post_bfpt)
2380 		return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2381 
2382 	return 0;
2383 }
2384 
spi_nor_select_read(struct spi_nor * nor,u32 shared_hwcaps)2385 static int spi_nor_select_read(struct spi_nor *nor,
2386 			       u32 shared_hwcaps)
2387 {
2388 	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2389 	const struct spi_nor_read_command *read;
2390 
2391 	if (best_match < 0)
2392 		return -EINVAL;
2393 
2394 	cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2395 	if (cmd < 0)
2396 		return -EINVAL;
2397 
2398 	read = &nor->params->reads[cmd];
2399 	nor->read_opcode = read->opcode;
2400 	nor->read_proto = read->proto;
2401 
2402 	/*
2403 	 * In the SPI NOR framework, we don't need to make the difference
2404 	 * between mode clock cycles and wait state clock cycles.
2405 	 * Indeed, the value of the mode clock cycles is used by a QSPI
2406 	 * flash memory to know whether it should enter or leave its 0-4-4
2407 	 * (Continuous Read / XIP) mode.
2408 	 * eXecution In Place is out of the scope of the mtd sub-system.
2409 	 * Hence we choose to merge both mode and wait state clock cycles
2410 	 * into the so called dummy clock cycles.
2411 	 */
2412 	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2413 	return 0;
2414 }
2415 
spi_nor_select_pp(struct spi_nor * nor,u32 shared_hwcaps)2416 static int spi_nor_select_pp(struct spi_nor *nor,
2417 			     u32 shared_hwcaps)
2418 {
2419 	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2420 	const struct spi_nor_pp_command *pp;
2421 
2422 	if (best_match < 0)
2423 		return -EINVAL;
2424 
2425 	cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2426 	if (cmd < 0)
2427 		return -EINVAL;
2428 
2429 	pp = &nor->params->page_programs[cmd];
2430 	nor->program_opcode = pp->opcode;
2431 	nor->write_proto = pp->proto;
2432 	return 0;
2433 }
2434 
2435 /**
2436  * spi_nor_select_uniform_erase() - select optimum uniform erase type
2437  * @map:		the erase map of the SPI NOR
2438  *
2439  * Once the optimum uniform sector erase command is found, disable all the
2440  * other.
2441  *
2442  * Return: pointer to erase type on success, NULL otherwise.
2443  */
2444 static const struct spi_nor_erase_type *
spi_nor_select_uniform_erase(struct spi_nor_erase_map * map)2445 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
2446 {
2447 	const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2448 	int i;
2449 	u8 uniform_erase_type = map->uniform_region.erase_mask;
2450 
2451 	/*
2452 	 * Search for the biggest erase size, except for when compiled
2453 	 * to use 4k erases.
2454 	 */
2455 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2456 		if (!(uniform_erase_type & BIT(i)))
2457 			continue;
2458 
2459 		tested_erase = &map->erase_type[i];
2460 
2461 		/* Skip masked erase types. */
2462 		if (!tested_erase->size)
2463 			continue;
2464 
2465 		/*
2466 		 * If the current erase size is the 4k one, stop here,
2467 		 * we have found the right uniform Sector Erase command.
2468 		 */
2469 		if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
2470 		    tested_erase->size == SZ_4K) {
2471 			erase = tested_erase;
2472 			break;
2473 		}
2474 
2475 		/*
2476 		 * Otherwise, the current erase size is still a valid candidate.
2477 		 * Select the biggest valid candidate.
2478 		 */
2479 		if (!erase && tested_erase->size)
2480 			erase = tested_erase;
2481 			/* keep iterating to find the wanted_size */
2482 	}
2483 
2484 	if (!erase)
2485 		return NULL;
2486 
2487 	/* Disable all other Sector Erase commands. */
2488 	map->uniform_region.erase_mask = BIT(erase - map->erase_type);
2489 	return erase;
2490 }
2491 
spi_nor_select_erase(struct spi_nor * nor)2492 static int spi_nor_select_erase(struct spi_nor *nor)
2493 {
2494 	struct spi_nor_erase_map *map = &nor->params->erase_map;
2495 	const struct spi_nor_erase_type *erase = NULL;
2496 	struct mtd_info *mtd = &nor->mtd;
2497 	int i;
2498 
2499 	/*
2500 	 * The previous implementation handling Sector Erase commands assumed
2501 	 * that the SPI flash memory has an uniform layout then used only one
2502 	 * of the supported erase sizes for all Sector Erase commands.
2503 	 * So to be backward compatible, the new implementation also tries to
2504 	 * manage the SPI flash memory as uniform with a single erase sector
2505 	 * size, when possible.
2506 	 */
2507 	if (spi_nor_has_uniform_erase(nor)) {
2508 		erase = spi_nor_select_uniform_erase(map);
2509 		if (!erase)
2510 			return -EINVAL;
2511 		nor->erase_opcode = erase->opcode;
2512 		mtd->erasesize = erase->size;
2513 		return 0;
2514 	}
2515 
2516 	/*
2517 	 * For non-uniform SPI flash memory, set mtd->erasesize to the
2518 	 * maximum erase sector size. No need to set nor->erase_opcode.
2519 	 */
2520 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2521 		if (map->erase_type[i].size) {
2522 			erase = &map->erase_type[i];
2523 			break;
2524 		}
2525 	}
2526 
2527 	if (!erase)
2528 		return -EINVAL;
2529 
2530 	mtd->erasesize = erase->size;
2531 	return 0;
2532 }
2533 
spi_nor_set_addr_nbytes(struct spi_nor * nor)2534 static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
2535 {
2536 	if (nor->params->addr_nbytes) {
2537 		nor->addr_nbytes = nor->params->addr_nbytes;
2538 	} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2539 		/*
2540 		 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2541 		 * in this protocol an odd addr_nbytes cannot be used because
2542 		 * then the address phase would only span a cycle and a half.
2543 		 * Half a cycle would be left over. We would then have to start
2544 		 * the dummy phase in the middle of a cycle and so too the data
2545 		 * phase, and we will end the transaction with half a cycle left
2546 		 * over.
2547 		 *
2548 		 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2549 		 * avoid this situation.
2550 		 */
2551 		nor->addr_nbytes = 4;
2552 	} else if (nor->info->addr_nbytes) {
2553 		nor->addr_nbytes = nor->info->addr_nbytes;
2554 	} else {
2555 		nor->addr_nbytes = 3;
2556 	}
2557 
2558 	if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
2559 		/* enable 4-byte addressing if the device exceeds 16MiB */
2560 		nor->addr_nbytes = 4;
2561 	}
2562 
2563 	if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
2564 		dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
2565 			nor->addr_nbytes);
2566 		return -EINVAL;
2567 	}
2568 
2569 	/* Set 4byte opcodes when possible. */
2570 	if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
2571 	    !(nor->flags & SNOR_F_HAS_4BAIT))
2572 		spi_nor_set_4byte_opcodes(nor);
2573 
2574 	return 0;
2575 }
2576 
spi_nor_setup(struct spi_nor * nor,const struct spi_nor_hwcaps * hwcaps)2577 static int spi_nor_setup(struct spi_nor *nor,
2578 			 const struct spi_nor_hwcaps *hwcaps)
2579 {
2580 	struct spi_nor_flash_parameter *params = nor->params;
2581 	u32 ignored_mask, shared_mask;
2582 	int err;
2583 
2584 	/*
2585 	 * Keep only the hardware capabilities supported by both the SPI
2586 	 * controller and the SPI flash memory.
2587 	 */
2588 	shared_mask = hwcaps->mask & params->hwcaps.mask;
2589 
2590 	if (nor->spimem) {
2591 		/*
2592 		 * When called from spi_nor_probe(), all caps are set and we
2593 		 * need to discard some of them based on what the SPI
2594 		 * controller actually supports (using spi_mem_supports_op()).
2595 		 */
2596 		spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2597 	} else {
2598 		/*
2599 		 * SPI n-n-n protocols are not supported when the SPI
2600 		 * controller directly implements the spi_nor interface.
2601 		 * Yet another reason to switch to spi-mem.
2602 		 */
2603 		ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2604 		if (shared_mask & ignored_mask) {
2605 			dev_dbg(nor->dev,
2606 				"SPI n-n-n protocols are not supported.\n");
2607 			shared_mask &= ~ignored_mask;
2608 		}
2609 	}
2610 
2611 	/* Select the (Fast) Read command. */
2612 	err = spi_nor_select_read(nor, shared_mask);
2613 	if (err) {
2614 		dev_dbg(nor->dev,
2615 			"can't select read settings supported by both the SPI controller and memory.\n");
2616 		return err;
2617 	}
2618 
2619 	/* Select the Page Program command. */
2620 	err = spi_nor_select_pp(nor, shared_mask);
2621 	if (err) {
2622 		dev_dbg(nor->dev,
2623 			"can't select write settings supported by both the SPI controller and memory.\n");
2624 		return err;
2625 	}
2626 
2627 	/* Select the Sector Erase command. */
2628 	err = spi_nor_select_erase(nor);
2629 	if (err) {
2630 		dev_dbg(nor->dev,
2631 			"can't select erase settings supported by both the SPI controller and memory.\n");
2632 		return err;
2633 	}
2634 
2635 	return spi_nor_set_addr_nbytes(nor);
2636 }
2637 
2638 /**
2639  * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2640  * settings based on MFR register and ->default_init() hook.
2641  * @nor:	pointer to a 'struct spi_nor'.
2642  */
spi_nor_manufacturer_init_params(struct spi_nor * nor)2643 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2644 {
2645 	if (nor->manufacturer && nor->manufacturer->fixups &&
2646 	    nor->manufacturer->fixups->default_init)
2647 		nor->manufacturer->fixups->default_init(nor);
2648 
2649 	if (nor->info->fixups && nor->info->fixups->default_init)
2650 		nor->info->fixups->default_init(nor);
2651 }
2652 
2653 /**
2654  * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2655  * settings based on nor->info->sfdp_flags. This method should be called only by
2656  * flashes that do not define SFDP tables. If the flash supports SFDP but the
2657  * information is wrong and the settings from this function can not be retrieved
2658  * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2659  * bits.
2660  * @nor:	pointer to a 'struct spi_nor'.
2661  */
spi_nor_no_sfdp_init_params(struct spi_nor * nor)2662 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
2663 {
2664 	struct spi_nor_flash_parameter *params = nor->params;
2665 	struct spi_nor_erase_map *map = &params->erase_map;
2666 	const struct flash_info *info = nor->info;
2667 	const u8 no_sfdp_flags = info->no_sfdp_flags;
2668 	u8 i, erase_mask;
2669 
2670 	if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
2671 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2672 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
2673 					  0, 8, SPINOR_OP_READ_1_1_2,
2674 					  SNOR_PROTO_1_1_2);
2675 	}
2676 
2677 	if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
2678 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2679 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
2680 					  0, 8, SPINOR_OP_READ_1_1_4,
2681 					  SNOR_PROTO_1_1_4);
2682 	}
2683 
2684 	if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
2685 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2686 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
2687 					  0, 8, SPINOR_OP_READ_1_1_8,
2688 					  SNOR_PROTO_1_1_8);
2689 	}
2690 
2691 	if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
2692 		params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2693 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
2694 					  0, 20, SPINOR_OP_READ_FAST,
2695 					  SNOR_PROTO_8_8_8_DTR);
2696 	}
2697 
2698 	if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
2699 		params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2700 		/*
2701 		 * Since xSPI Page Program opcode is backward compatible with
2702 		 * Legacy SPI, use Legacy SPI opcode there as well.
2703 		 */
2704 		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2705 					SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2706 	}
2707 
2708 	/*
2709 	 * Sector Erase settings. Sort Erase Types in ascending order, with the
2710 	 * smallest erase size starting at BIT(0).
2711 	 */
2712 	erase_mask = 0;
2713 	i = 0;
2714 	if (no_sfdp_flags & SECT_4K) {
2715 		erase_mask |= BIT(i);
2716 		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2717 				       SPINOR_OP_BE_4K);
2718 		i++;
2719 	}
2720 	erase_mask |= BIT(i);
2721 	spi_nor_set_erase_type(&map->erase_type[i],
2722 			       info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
2723 			       SPINOR_OP_SE);
2724 	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2725 }
2726 
2727 /**
2728  * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2729  * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2730  * @nor:	pointer to a 'struct spi_nor'
2731  */
spi_nor_init_flags(struct spi_nor * nor)2732 static void spi_nor_init_flags(struct spi_nor *nor)
2733 {
2734 	struct device_node *np = spi_nor_get_flash_node(nor);
2735 	const u16 flags = nor->info->flags;
2736 
2737 	if (of_property_read_bool(np, "broken-flash-reset"))
2738 		nor->flags |= SNOR_F_BROKEN_RESET;
2739 
2740 	if (of_property_read_bool(np, "no-wp"))
2741 		nor->flags |= SNOR_F_NO_WP;
2742 
2743 	if (flags & SPI_NOR_SWP_IS_VOLATILE)
2744 		nor->flags |= SNOR_F_SWP_IS_VOLATILE;
2745 
2746 	if (flags & SPI_NOR_HAS_LOCK)
2747 		nor->flags |= SNOR_F_HAS_LOCK;
2748 
2749 	if (flags & SPI_NOR_HAS_TB) {
2750 		nor->flags |= SNOR_F_HAS_SR_TB;
2751 		if (flags & SPI_NOR_TB_SR_BIT6)
2752 			nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
2753 	}
2754 
2755 	if (flags & SPI_NOR_4BIT_BP) {
2756 		nor->flags |= SNOR_F_HAS_4BIT_BP;
2757 		if (flags & SPI_NOR_BP3_SR_BIT6)
2758 			nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
2759 	}
2760 
2761 	if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
2762 	    !nor->controller_ops)
2763 		nor->flags |= SNOR_F_RWW;
2764 }
2765 
2766 /**
2767  * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2768  * be discovered by SFDP for this particular flash because the SFDP table that
2769  * indicates this support is not defined in the flash. In case the table for
2770  * this support is defined but has wrong values, one should instead use a
2771  * post_sfdp() hook to set the SNOR_F equivalent flag.
2772  * @nor:       pointer to a 'struct spi_nor'
2773  */
spi_nor_init_fixup_flags(struct spi_nor * nor)2774 static void spi_nor_init_fixup_flags(struct spi_nor *nor)
2775 {
2776 	const u8 fixup_flags = nor->info->fixup_flags;
2777 
2778 	if (fixup_flags & SPI_NOR_4B_OPCODES)
2779 		nor->flags |= SNOR_F_4B_OPCODES;
2780 
2781 	if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
2782 		nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2783 }
2784 
2785 /**
2786  * spi_nor_late_init_params() - Late initialization of default flash parameters.
2787  * @nor:	pointer to a 'struct spi_nor'
2788  *
2789  * Used to initialize flash parameters that are not declared in the JESD216
2790  * SFDP standard, or where SFDP tables are not defined at all.
2791  * Will replace the spi_nor_manufacturer_init_params() method.
2792  */
spi_nor_late_init_params(struct spi_nor * nor)2793 static int spi_nor_late_init_params(struct spi_nor *nor)
2794 {
2795 	struct spi_nor_flash_parameter *params = nor->params;
2796 	int ret;
2797 
2798 	if (nor->manufacturer && nor->manufacturer->fixups &&
2799 	    nor->manufacturer->fixups->late_init) {
2800 		ret = nor->manufacturer->fixups->late_init(nor);
2801 		if (ret)
2802 			return ret;
2803 	}
2804 
2805 	/* Needed by some flashes late_init hooks. */
2806 	spi_nor_init_flags(nor);
2807 
2808 	if (nor->info->fixups && nor->info->fixups->late_init) {
2809 		ret = nor->info->fixups->late_init(nor);
2810 		if (ret)
2811 			return ret;
2812 	}
2813 
2814 	if (!nor->params->die_erase_opcode)
2815 		nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE;
2816 
2817 	/* Default method kept for backward compatibility. */
2818 	if (!params->set_4byte_addr_mode)
2819 		params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
2820 
2821 	spi_nor_init_fixup_flags(nor);
2822 
2823 	/*
2824 	 * NOR protection support. When locking_ops are not provided, we pick
2825 	 * the default ones.
2826 	 */
2827 	if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2828 		spi_nor_init_default_locking_ops(nor);
2829 
2830 	if (params->n_banks > 1)
2831 		params->bank_size = div_u64(params->size, params->n_banks);
2832 
2833 	return 0;
2834 }
2835 
2836 /**
2837  * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2838  * parameters and settings based on JESD216 SFDP standard.
2839  * @nor:	pointer to a 'struct spi_nor'.
2840  *
2841  * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2842  * legacy flash parameters and settings will be restored.
2843  */
spi_nor_sfdp_init_params_deprecated(struct spi_nor * nor)2844 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
2845 {
2846 	struct spi_nor_flash_parameter sfdp_params;
2847 
2848 	memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2849 
2850 	if (spi_nor_parse_sfdp(nor)) {
2851 		memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2852 		nor->flags &= ~SNOR_F_4B_OPCODES;
2853 	}
2854 }
2855 
2856 /**
2857  * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2858  * parameters and settings.
2859  * @nor:	pointer to a 'struct spi_nor'.
2860  *
2861  * The method assumes that flash doesn't support SFDP so it initializes flash
2862  * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
2863  * when parsing SFDP, if supported.
2864  */
spi_nor_init_params_deprecated(struct spi_nor * nor)2865 static void spi_nor_init_params_deprecated(struct spi_nor *nor)
2866 {
2867 	spi_nor_no_sfdp_init_params(nor);
2868 
2869 	spi_nor_manufacturer_init_params(nor);
2870 
2871 	if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
2872 					SPI_NOR_QUAD_READ |
2873 					SPI_NOR_OCTAL_READ |
2874 					SPI_NOR_OCTAL_DTR_READ))
2875 		spi_nor_sfdp_init_params_deprecated(nor);
2876 }
2877 
2878 /**
2879  * spi_nor_init_default_params() - Default initialization of flash parameters
2880  * and settings. Done for all flashes, regardless is they define SFDP tables
2881  * or not.
2882  * @nor:	pointer to a 'struct spi_nor'.
2883  */
spi_nor_init_default_params(struct spi_nor * nor)2884 static void spi_nor_init_default_params(struct spi_nor *nor)
2885 {
2886 	struct spi_nor_flash_parameter *params = nor->params;
2887 	const struct flash_info *info = nor->info;
2888 	struct device_node *np = spi_nor_get_flash_node(nor);
2889 
2890 	params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2891 	params->otp.org = info->otp;
2892 
2893 	/* Default to 16-bit Write Status (01h) Command */
2894 	nor->flags |= SNOR_F_HAS_16BIT_SR;
2895 
2896 	/* Set SPI NOR sizes. */
2897 	params->writesize = 1;
2898 	params->size = info->size;
2899 	params->bank_size = params->size;
2900 	params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
2901 	params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
2902 
2903 	/* Default to Fast Read for non-DT and enable it if requested by DT. */
2904 	if (!np || of_property_read_bool(np, "m25p,fast-read"))
2905 		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2906 
2907 	/* (Fast) Read settings. */
2908 	params->hwcaps.mask |= SNOR_HWCAPS_READ;
2909 	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
2910 				  0, 0, SPINOR_OP_READ,
2911 				  SNOR_PROTO_1_1_1);
2912 
2913 	if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2914 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
2915 					  0, 8, SPINOR_OP_READ_FAST,
2916 					  SNOR_PROTO_1_1_1);
2917 	/* Page Program settings. */
2918 	params->hwcaps.mask |= SNOR_HWCAPS_PP;
2919 	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
2920 				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2921 
2922 	if (info->flags & SPI_NOR_QUAD_PP) {
2923 		params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
2924 		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
2925 					SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
2926 	}
2927 }
2928 
2929 /**
2930  * spi_nor_init_params() - Initialize the flash's parameters and settings.
2931  * @nor:	pointer to a 'struct spi_nor'.
2932  *
2933  * The flash parameters and settings are initialized based on a sequence of
2934  * calls that are ordered by priority:
2935  *
2936  * 1/ Default flash parameters initialization. The initializations are done
2937  *    based on nor->info data:
2938  *		spi_nor_info_init_params()
2939  *
2940  * which can be overwritten by:
2941  * 2/ Manufacturer flash parameters initialization. The initializations are
2942  *    done based on MFR register, or when the decisions can not be done solely
2943  *    based on MFR, by using specific flash_info tweeks, ->default_init():
2944  *		spi_nor_manufacturer_init_params()
2945  *
2946  * which can be overwritten by:
2947  * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
2948  *    should be more accurate that the above.
2949  *		spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
2950  *
2951  *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
2952  *    the flash parameters and settings immediately after parsing the Basic
2953  *    Flash Parameter Table.
2954  *    spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
2955  *    It is used to tweak various flash parameters when information provided
2956  *    by the SFDP tables are wrong.
2957  *
2958  * which can be overwritten by:
2959  * 4/ Late flash parameters initialization, used to initialize flash
2960  * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
2961  * tables are not defined at all.
2962  *		spi_nor_late_init_params()
2963  *
2964  * Return: 0 on success, -errno otherwise.
2965  */
spi_nor_init_params(struct spi_nor * nor)2966 static int spi_nor_init_params(struct spi_nor *nor)
2967 {
2968 	int ret;
2969 
2970 	nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2971 	if (!nor->params)
2972 		return -ENOMEM;
2973 
2974 	spi_nor_init_default_params(nor);
2975 
2976 	if (spi_nor_needs_sfdp(nor)) {
2977 		ret = spi_nor_parse_sfdp(nor);
2978 		if (ret) {
2979 			dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
2980 			return ret;
2981 		}
2982 	} else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
2983 		spi_nor_no_sfdp_init_params(nor);
2984 	} else {
2985 		spi_nor_init_params_deprecated(nor);
2986 	}
2987 
2988 	ret = spi_nor_late_init_params(nor);
2989 	if (ret)
2990 		return ret;
2991 
2992 	if (WARN_ON(!is_power_of_2(nor->params->page_size)))
2993 		return -EINVAL;
2994 
2995 	return 0;
2996 }
2997 
2998 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
2999  * @nor:                 pointer to a 'struct spi_nor'
3000  * @enable:              whether to enable or disable Octal DTR
3001  *
3002  * Return: 0 on success, -errno otherwise.
3003  */
spi_nor_set_octal_dtr(struct spi_nor * nor,bool enable)3004 static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
3005 {
3006 	int ret;
3007 
3008 	if (!nor->params->set_octal_dtr)
3009 		return 0;
3010 
3011 	if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
3012 	      nor->write_proto == SNOR_PROTO_8_8_8_DTR))
3013 		return 0;
3014 
3015 	if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
3016 		return 0;
3017 
3018 	ret = nor->params->set_octal_dtr(nor, enable);
3019 	if (ret)
3020 		return ret;
3021 
3022 	if (enable)
3023 		nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3024 	else
3025 		nor->reg_proto = SNOR_PROTO_1_1_1;
3026 
3027 	return 0;
3028 }
3029 
3030 /**
3031  * spi_nor_quad_enable() - enable Quad I/O if needed.
3032  * @nor:                pointer to a 'struct spi_nor'
3033  *
3034  * Return: 0 on success, -errno otherwise.
3035  */
spi_nor_quad_enable(struct spi_nor * nor)3036 static int spi_nor_quad_enable(struct spi_nor *nor)
3037 {
3038 	if (!nor->params->quad_enable)
3039 		return 0;
3040 
3041 	if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3042 	      spi_nor_get_protocol_width(nor->write_proto) == 4))
3043 		return 0;
3044 
3045 	return nor->params->quad_enable(nor);
3046 }
3047 
3048 /**
3049  * spi_nor_set_4byte_addr_mode() - Set address mode.
3050  * @nor:                pointer to a 'struct spi_nor'.
3051  * @enable:             enable/disable 4 byte address mode.
3052  *
3053  * Return: 0 on success, -errno otherwise.
3054  */
spi_nor_set_4byte_addr_mode(struct spi_nor * nor,bool enable)3055 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
3056 {
3057 	struct spi_nor_flash_parameter *params = nor->params;
3058 	int ret;
3059 
3060 	if (enable) {
3061 		/*
3062 		 * If the RESET# pin isn't hooked up properly, or the system
3063 		 * otherwise doesn't perform a reset command in the boot
3064 		 * sequence, it's impossible to 100% protect against unexpected
3065 		 * reboots (e.g., crashes). Warn the user (or hopefully, system
3066 		 * designer) that this is bad.
3067 		 */
3068 		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3069 			  "enabling reset hack; may not recover from unexpected reboots\n");
3070 	}
3071 
3072 	ret = params->set_4byte_addr_mode(nor, enable);
3073 	if (ret && ret != -EOPNOTSUPP)
3074 		return ret;
3075 
3076 	if (enable) {
3077 		params->addr_nbytes = 4;
3078 		params->addr_mode_nbytes = 4;
3079 	} else {
3080 		params->addr_nbytes = 3;
3081 		params->addr_mode_nbytes = 3;
3082 	}
3083 
3084 	return 0;
3085 }
3086 
spi_nor_init(struct spi_nor * nor)3087 static int spi_nor_init(struct spi_nor *nor)
3088 {
3089 	int err;
3090 
3091 	err = spi_nor_set_octal_dtr(nor, true);
3092 	if (err) {
3093 		dev_dbg(nor->dev, "octal mode not supported\n");
3094 		return err;
3095 	}
3096 
3097 	err = spi_nor_quad_enable(nor);
3098 	if (err) {
3099 		dev_dbg(nor->dev, "quad mode not supported\n");
3100 		return err;
3101 	}
3102 
3103 	/*
3104 	 * Some SPI NOR flashes are write protected by default after a power-on
3105 	 * reset cycle, in order to avoid inadvertent writes during power-up.
3106 	 * Backward compatibility imposes to unlock the entire flash memory
3107 	 * array at power-up by default. Depending on the kernel configuration
3108 	 * (1) do nothing, (2) always unlock the entire flash array or (3)
3109 	 * unlock the entire flash array only when the software write
3110 	 * protection bits are volatile. The latter is indicated by
3111 	 * SNOR_F_SWP_IS_VOLATILE.
3112 	 */
3113 	if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
3114 	    (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
3115 	     nor->flags & SNOR_F_SWP_IS_VOLATILE))
3116 		spi_nor_try_unlock_all(nor);
3117 
3118 	if (nor->addr_nbytes == 4 &&
3119 	    nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
3120 	    !(nor->flags & SNOR_F_4B_OPCODES))
3121 		return spi_nor_set_4byte_addr_mode(nor, true);
3122 
3123 	return 0;
3124 }
3125 
3126 /**
3127  * spi_nor_soft_reset() - Perform a software reset
3128  * @nor:	pointer to 'struct spi_nor'
3129  *
3130  * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
3131  * the device to its power-on-reset state. This is useful when the software has
3132  * made some changes to device (volatile) registers and needs to reset it before
3133  * shutting down, for example.
3134  *
3135  * Not every flash supports this sequence. The same set of opcodes might be used
3136  * for some other operation on a flash that does not support this. Support for
3137  * this sequence can be discovered via SFDP in the BFPT table.
3138  *
3139  * Return: 0 on success, -errno otherwise.
3140  */
spi_nor_soft_reset(struct spi_nor * nor)3141 static void spi_nor_soft_reset(struct spi_nor *nor)
3142 {
3143 	struct spi_mem_op op;
3144 	int ret;
3145 
3146 	op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
3147 
3148 	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3149 
3150 	ret = spi_mem_exec_op(nor->spimem, &op);
3151 	if (ret) {
3152 		if (ret != -EOPNOTSUPP)
3153 			dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3154 		return;
3155 	}
3156 
3157 	op = (struct spi_mem_op)SPINOR_SRST_OP;
3158 
3159 	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3160 
3161 	ret = spi_mem_exec_op(nor->spimem, &op);
3162 	if (ret) {
3163 		dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3164 		return;
3165 	}
3166 
3167 	/*
3168 	 * Software Reset is not instant, and the delay varies from flash to
3169 	 * flash. Looking at a few flashes, most range somewhere below 100
3170 	 * microseconds. So, sleep for a range of 200-400 us.
3171 	 */
3172 	usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
3173 }
3174 
3175 /* mtd suspend handler */
spi_nor_suspend(struct mtd_info * mtd)3176 static int spi_nor_suspend(struct mtd_info *mtd)
3177 {
3178 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
3179 	int ret;
3180 
3181 	/* Disable octal DTR mode if we enabled it. */
3182 	ret = spi_nor_set_octal_dtr(nor, false);
3183 	if (ret)
3184 		dev_err(nor->dev, "suspend() failed\n");
3185 
3186 	return ret;
3187 }
3188 
3189 /* mtd resume handler */
spi_nor_resume(struct mtd_info * mtd)3190 static void spi_nor_resume(struct mtd_info *mtd)
3191 {
3192 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
3193 	struct device *dev = nor->dev;
3194 	int ret;
3195 
3196 	/* re-initialize the nor chip */
3197 	ret = spi_nor_init(nor);
3198 	if (ret)
3199 		dev_err(dev, "resume() failed\n");
3200 }
3201 
spi_nor_get_device(struct mtd_info * mtd)3202 static int spi_nor_get_device(struct mtd_info *mtd)
3203 {
3204 	struct mtd_info *master = mtd_get_master(mtd);
3205 	struct spi_nor *nor = mtd_to_spi_nor(master);
3206 	struct device *dev;
3207 
3208 	if (nor->spimem)
3209 		dev = nor->spimem->spi->controller->dev.parent;
3210 	else
3211 		dev = nor->dev;
3212 
3213 	if (!try_module_get(dev->driver->owner))
3214 		return -ENODEV;
3215 
3216 	return 0;
3217 }
3218 
spi_nor_put_device(struct mtd_info * mtd)3219 static void spi_nor_put_device(struct mtd_info *mtd)
3220 {
3221 	struct mtd_info *master = mtd_get_master(mtd);
3222 	struct spi_nor *nor = mtd_to_spi_nor(master);
3223 	struct device *dev;
3224 
3225 	if (nor->spimem)
3226 		dev = nor->spimem->spi->controller->dev.parent;
3227 	else
3228 		dev = nor->dev;
3229 
3230 	module_put(dev->driver->owner);
3231 }
3232 
spi_nor_restore(struct spi_nor * nor)3233 static void spi_nor_restore(struct spi_nor *nor)
3234 {
3235 	int ret;
3236 
3237 	/* restore the addressing mode */
3238 	if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3239 	    nor->flags & SNOR_F_BROKEN_RESET) {
3240 		ret = spi_nor_set_4byte_addr_mode(nor, false);
3241 		if (ret)
3242 			/*
3243 			 * Do not stop the execution in the hope that the flash
3244 			 * will default to the 3-byte address mode after the
3245 			 * software reset.
3246 			 */
3247 			dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
3248 	}
3249 
3250 	if (nor->flags & SNOR_F_SOFT_RESET)
3251 		spi_nor_soft_reset(nor);
3252 }
3253 
spi_nor_match_name(struct spi_nor * nor,const char * name)3254 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
3255 						   const char *name)
3256 {
3257 	unsigned int i, j;
3258 
3259 	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
3260 		for (j = 0; j < manufacturers[i]->nparts; j++) {
3261 			if (manufacturers[i]->parts[j].name &&
3262 			    !strcmp(name, manufacturers[i]->parts[j].name)) {
3263 				nor->manufacturer = manufacturers[i];
3264 				return &manufacturers[i]->parts[j];
3265 			}
3266 		}
3267 	}
3268 
3269 	return NULL;
3270 }
3271 
spi_nor_get_flash_info(struct spi_nor * nor,const char * name)3272 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3273 						       const char *name)
3274 {
3275 	const struct flash_info *info = NULL;
3276 
3277 	if (name)
3278 		info = spi_nor_match_name(nor, name);
3279 	/*
3280 	 * Auto-detect if chip name wasn't specified or not found, or the chip
3281 	 * has an ID. If the chip supposedly has an ID, we also do an
3282 	 * auto-detection to compare it later.
3283 	 */
3284 	if (!info || info->id) {
3285 		const struct flash_info *jinfo;
3286 
3287 		jinfo = spi_nor_detect(nor);
3288 		if (IS_ERR(jinfo))
3289 			return jinfo;
3290 
3291 		/*
3292 		 * If caller has specified name of flash model that can normally
3293 		 * be detected using JEDEC, let's verify it.
3294 		 */
3295 		if (info && jinfo != info)
3296 			dev_warn(nor->dev, "found %s, expected %s\n",
3297 				 jinfo->name, info->name);
3298 
3299 		/* If info was set before, JEDEC knows better. */
3300 		info = jinfo;
3301 	}
3302 
3303 	return info;
3304 }
3305 
3306 static u32
spi_nor_get_region_erasesize(const struct spi_nor_erase_region * region,const struct spi_nor_erase_type * erase_type)3307 spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region,
3308 			     const struct spi_nor_erase_type *erase_type)
3309 {
3310 	int i;
3311 
3312 	if (region->overlaid)
3313 		return region->size;
3314 
3315 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3316 		if (region->erase_mask & BIT(i))
3317 			return erase_type[i].size;
3318 	}
3319 
3320 	return 0;
3321 }
3322 
spi_nor_set_mtd_eraseregions(struct spi_nor * nor)3323 static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
3324 {
3325 	const struct spi_nor_erase_map *map = &nor->params->erase_map;
3326 	const struct spi_nor_erase_region *region = map->regions;
3327 	struct mtd_erase_region_info *mtd_region;
3328 	struct mtd_info *mtd = &nor->mtd;
3329 	u32 erasesize, i;
3330 
3331 	mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
3332 				  GFP_KERNEL);
3333 	if (!mtd_region)
3334 		return -ENOMEM;
3335 
3336 	for (i = 0; i < map->n_regions; i++) {
3337 		erasesize = spi_nor_get_region_erasesize(&region[i],
3338 							 map->erase_type);
3339 		if (!erasesize)
3340 			return -EINVAL;
3341 
3342 		mtd_region[i].erasesize = erasesize;
3343 		mtd_region[i].numblocks = div_u64(region[i].size, erasesize);
3344 		mtd_region[i].offset = region[i].offset;
3345 	}
3346 
3347 	mtd->numeraseregions = map->n_regions;
3348 	mtd->eraseregions = mtd_region;
3349 
3350 	return 0;
3351 }
3352 
spi_nor_set_mtd_info(struct spi_nor * nor)3353 static int spi_nor_set_mtd_info(struct spi_nor *nor)
3354 {
3355 	struct mtd_info *mtd = &nor->mtd;
3356 	struct device *dev = nor->dev;
3357 
3358 	spi_nor_set_mtd_locking_ops(nor);
3359 	spi_nor_set_mtd_otp_ops(nor);
3360 
3361 	mtd->dev.parent = dev;
3362 	if (!mtd->name)
3363 		mtd->name = dev_name(dev);
3364 	mtd->type = MTD_NORFLASH;
3365 	mtd->flags = MTD_CAP_NORFLASH;
3366 	/* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
3367 	if (nor->flags & SNOR_F_ECC)
3368 		mtd->flags &= ~MTD_BIT_WRITEABLE;
3369 	if (nor->info->flags & SPI_NOR_NO_ERASE)
3370 		mtd->flags |= MTD_NO_ERASE;
3371 	else
3372 		mtd->_erase = spi_nor_erase;
3373 	mtd->writesize = nor->params->writesize;
3374 	mtd->writebufsize = nor->params->page_size;
3375 	mtd->size = nor->params->size;
3376 	mtd->_read = spi_nor_read;
3377 	/* Might be already set by some SST flashes. */
3378 	if (!mtd->_write)
3379 		mtd->_write = spi_nor_write;
3380 	mtd->_suspend = spi_nor_suspend;
3381 	mtd->_resume = spi_nor_resume;
3382 	mtd->_get_device = spi_nor_get_device;
3383 	mtd->_put_device = spi_nor_put_device;
3384 
3385 	if (!spi_nor_has_uniform_erase(nor))
3386 		return spi_nor_set_mtd_eraseregions(nor);
3387 
3388 	return 0;
3389 }
3390 
spi_nor_hw_reset(struct spi_nor * nor)3391 static int spi_nor_hw_reset(struct spi_nor *nor)
3392 {
3393 	struct gpio_desc *reset;
3394 
3395 	reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW);
3396 	if (IS_ERR_OR_NULL(reset))
3397 		return PTR_ERR_OR_ZERO(reset);
3398 
3399 	/*
3400 	 * Experimental delay values by looking at different flash device
3401 	 * vendors datasheets.
3402 	 */
3403 	usleep_range(1, 5);
3404 	gpiod_set_value_cansleep(reset, 1);
3405 	usleep_range(100, 150);
3406 	gpiod_set_value_cansleep(reset, 0);
3407 	usleep_range(1000, 1200);
3408 
3409 	return 0;
3410 }
3411 
spi_nor_scan(struct spi_nor * nor,const char * name,const struct spi_nor_hwcaps * hwcaps)3412 int spi_nor_scan(struct spi_nor *nor, const char *name,
3413 		 const struct spi_nor_hwcaps *hwcaps)
3414 {
3415 	const struct flash_info *info;
3416 	struct device *dev = nor->dev;
3417 	int ret;
3418 
3419 	ret = spi_nor_check(nor);
3420 	if (ret)
3421 		return ret;
3422 
3423 	/* Reset SPI protocol for all commands. */
3424 	nor->reg_proto = SNOR_PROTO_1_1_1;
3425 	nor->read_proto = SNOR_PROTO_1_1_1;
3426 	nor->write_proto = SNOR_PROTO_1_1_1;
3427 
3428 	/*
3429 	 * We need the bounce buffer early to read/write registers when going
3430 	 * through the spi-mem layer (buffers have to be DMA-able).
3431 	 * For spi-mem drivers, we'll reallocate a new buffer if
3432 	 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3433 	 * shouldn't happen before long since NOR pages are usually less
3434 	 * than 1KB) after spi_nor_scan() returns.
3435 	 */
3436 	nor->bouncebuf_size = PAGE_SIZE;
3437 	nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3438 				      GFP_KERNEL);
3439 	if (!nor->bouncebuf)
3440 		return -ENOMEM;
3441 
3442 	ret = spi_nor_hw_reset(nor);
3443 	if (ret)
3444 		return ret;
3445 
3446 	info = spi_nor_get_flash_info(nor, name);
3447 	if (IS_ERR(info))
3448 		return PTR_ERR(info);
3449 
3450 	nor->info = info;
3451 
3452 	mutex_init(&nor->lock);
3453 
3454 	/* Init flash parameters based on flash_info struct and SFDP */
3455 	ret = spi_nor_init_params(nor);
3456 	if (ret)
3457 		return ret;
3458 
3459 	if (spi_nor_use_parallel_locking(nor))
3460 		init_waitqueue_head(&nor->rww.wait);
3461 
3462 	/*
3463 	 * Configure the SPI memory:
3464 	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3465 	 * - set the number of dummy cycles (mode cycles + wait states).
3466 	 * - set the SPI protocols for register and memory accesses.
3467 	 * - set the number of address bytes.
3468 	 */
3469 	ret = spi_nor_setup(nor, hwcaps);
3470 	if (ret)
3471 		return ret;
3472 
3473 	/* Send all the required SPI flash commands to initialize device */
3474 	ret = spi_nor_init(nor);
3475 	if (ret)
3476 		return ret;
3477 
3478 	/* No mtd_info fields should be used up to this point. */
3479 	ret = spi_nor_set_mtd_info(nor);
3480 	if (ret)
3481 		return ret;
3482 
3483 	dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
3484 		SPI_NOR_MAX_ID_LEN, nor->id);
3485 
3486 	return 0;
3487 }
3488 EXPORT_SYMBOL_GPL(spi_nor_scan);
3489 
spi_nor_create_read_dirmap(struct spi_nor * nor)3490 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3491 {
3492 	struct spi_mem_dirmap_info info = {
3493 		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3494 				      SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3495 				      SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3496 				      SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3497 		.offset = 0,
3498 		.length = nor->params->size,
3499 	};
3500 	struct spi_mem_op *op = &info.op_tmpl;
3501 
3502 	spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3503 
3504 	/* convert the dummy cycles to the number of bytes */
3505 	op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3506 	if (spi_nor_protocol_is_dtr(nor->read_proto))
3507 		op->dummy.nbytes *= 2;
3508 
3509 	/*
3510 	 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3511 	 * of data bytes is non-zero, the data buswidth won't be set here. So,
3512 	 * do it explicitly.
3513 	 */
3514 	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3515 
3516 	nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3517 						       &info);
3518 	return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3519 }
3520 
spi_nor_create_write_dirmap(struct spi_nor * nor)3521 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3522 {
3523 	struct spi_mem_dirmap_info info = {
3524 		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3525 				      SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3526 				      SPI_MEM_OP_NO_DUMMY,
3527 				      SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3528 		.offset = 0,
3529 		.length = nor->params->size,
3530 	};
3531 	struct spi_mem_op *op = &info.op_tmpl;
3532 
3533 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3534 		op->addr.nbytes = 0;
3535 
3536 	spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3537 
3538 	/*
3539 	 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3540 	 * of data bytes is non-zero, the data buswidth won't be set here. So,
3541 	 * do it explicitly.
3542 	 */
3543 	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3544 
3545 	nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3546 						       &info);
3547 	return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3548 }
3549 
spi_nor_probe(struct spi_mem * spimem)3550 static int spi_nor_probe(struct spi_mem *spimem)
3551 {
3552 	struct spi_device *spi = spimem->spi;
3553 	struct device *dev = &spi->dev;
3554 	struct flash_platform_data *data = dev_get_platdata(dev);
3555 	struct spi_nor *nor;
3556 	/*
3557 	 * Enable all caps by default. The core will mask them after
3558 	 * checking what's really supported using spi_mem_supports_op().
3559 	 */
3560 	const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3561 	char *flash_name;
3562 	int ret;
3563 
3564 	ret = devm_regulator_get_enable(dev, "vcc");
3565 	if (ret)
3566 		return ret;
3567 
3568 	nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
3569 	if (!nor)
3570 		return -ENOMEM;
3571 
3572 	nor->spimem = spimem;
3573 	nor->dev = dev;
3574 	spi_nor_set_flash_node(nor, dev->of_node);
3575 
3576 	spi_mem_set_drvdata(spimem, nor);
3577 
3578 	if (data && data->name)
3579 		nor->mtd.name = data->name;
3580 
3581 	if (!nor->mtd.name)
3582 		nor->mtd.name = spi_mem_get_name(spimem);
3583 
3584 	/*
3585 	 * For some (historical?) reason many platforms provide two different
3586 	 * names in flash_platform_data: "name" and "type". Quite often name is
3587 	 * set to "m25p80" and then "type" provides a real chip name.
3588 	 * If that's the case, respect "type" and ignore a "name".
3589 	 */
3590 	if (data && data->type)
3591 		flash_name = data->type;
3592 	else if (!strcmp(spi->modalias, "spi-nor"))
3593 		flash_name = NULL; /* auto-detect */
3594 	else
3595 		flash_name = spi->modalias;
3596 
3597 	ret = spi_nor_scan(nor, flash_name, &hwcaps);
3598 	if (ret)
3599 		return ret;
3600 
3601 	spi_nor_debugfs_register(nor);
3602 
3603 	/*
3604 	 * None of the existing parts have > 512B pages, but let's play safe
3605 	 * and add this logic so that if anyone ever adds support for such
3606 	 * a NOR we don't end up with buffer overflows.
3607 	 */
3608 	if (nor->params->page_size > PAGE_SIZE) {
3609 		nor->bouncebuf_size = nor->params->page_size;
3610 		devm_kfree(dev, nor->bouncebuf);
3611 		nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3612 					      GFP_KERNEL);
3613 		if (!nor->bouncebuf)
3614 			return -ENOMEM;
3615 	}
3616 
3617 	ret = spi_nor_create_read_dirmap(nor);
3618 	if (ret)
3619 		return ret;
3620 
3621 	ret = spi_nor_create_write_dirmap(nor);
3622 	if (ret)
3623 		return ret;
3624 
3625 	return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3626 				   data ? data->nr_parts : 0);
3627 }
3628 
spi_nor_remove(struct spi_mem * spimem)3629 static int spi_nor_remove(struct spi_mem *spimem)
3630 {
3631 	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3632 
3633 	spi_nor_restore(nor);
3634 
3635 	/* Clean up MTD stuff. */
3636 	return mtd_device_unregister(&nor->mtd);
3637 }
3638 
spi_nor_shutdown(struct spi_mem * spimem)3639 static void spi_nor_shutdown(struct spi_mem *spimem)
3640 {
3641 	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3642 
3643 	spi_nor_restore(nor);
3644 }
3645 
3646 /*
3647  * Do NOT add to this array without reading the following:
3648  *
3649  * Historically, many flash devices are bound to this driver by their name. But
3650  * since most of these flash are compatible to some extent, and their
3651  * differences can often be differentiated by the JEDEC read-ID command, we
3652  * encourage new users to add support to the spi-nor library, and simply bind
3653  * against a generic string here (e.g., "jedec,spi-nor").
3654  *
3655  * Many flash names are kept here in this list to keep them available
3656  * as module aliases for existing platforms.
3657  */
3658 static const struct spi_device_id spi_nor_dev_ids[] = {
3659 	/*
3660 	 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3661 	 * hack around the fact that the SPI core does not provide uevent
3662 	 * matching for .of_match_table
3663 	 */
3664 	{"spi-nor"},
3665 
3666 	/*
3667 	 * Entries not used in DTs that should be safe to drop after replacing
3668 	 * them with "spi-nor" in platform data.
3669 	 */
3670 	{"s25sl064a"},	{"w25x16"},	{"m25p10"},	{"m25px64"},
3671 
3672 	/*
3673 	 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3674 	 * should be kept for backward compatibility.
3675 	 */
3676 	{"at25df321a"},	{"at25df641"},	{"at26df081a"},
3677 	{"mx25l4005a"},	{"mx25l1606e"},	{"mx25l6405d"},	{"mx25l12805d"},
3678 	{"mx25l25635e"},{"mx66l51235l"},
3679 	{"n25q064"},	{"n25q128a11"},	{"n25q128a13"},	{"n25q512a"},
3680 	{"s25fl256s1"},	{"s25fl512s"},	{"s25sl12801"},	{"s25fl008k"},
3681 	{"s25fl064k"},
3682 	{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3683 	{"m25p40"},	{"m25p80"},	{"m25p16"},	{"m25p32"},
3684 	{"m25p64"},	{"m25p128"},
3685 	{"w25x80"},	{"w25x32"},	{"w25q32"},	{"w25q32dw"},
3686 	{"w25q80bl"},	{"w25q128"},	{"w25q256"},
3687 
3688 	/* Flashes that can't be detected using JEDEC */
3689 	{"m25p05-nonjedec"},	{"m25p10-nonjedec"},	{"m25p20-nonjedec"},
3690 	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
3691 	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
3692 
3693 	/* Everspin MRAMs (non-JEDEC) */
3694 	{ "mr25h128" }, /* 128 Kib, 40 MHz */
3695 	{ "mr25h256" }, /* 256 Kib, 40 MHz */
3696 	{ "mr25h10" },  /*   1 Mib, 40 MHz */
3697 	{ "mr25h40" },  /*   4 Mib, 40 MHz */
3698 
3699 	{ },
3700 };
3701 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3702 
3703 static const struct of_device_id spi_nor_of_table[] = {
3704 	/*
3705 	 * Generic compatibility for SPI NOR that can be identified by the
3706 	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3707 	 */
3708 	{ .compatible = "jedec,spi-nor" },
3709 	{ /* sentinel */ },
3710 };
3711 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3712 
3713 /*
3714  * REVISIT: many of these chips have deep power-down modes, which
3715  * should clearly be entered on suspend() to minimize power use.
3716  * And also when they're otherwise idle...
3717  */
3718 static struct spi_mem_driver spi_nor_driver = {
3719 	.spidrv = {
3720 		.driver = {
3721 			.name = "spi-nor",
3722 			.of_match_table = spi_nor_of_table,
3723 			.dev_groups = spi_nor_sysfs_groups,
3724 		},
3725 		.id_table = spi_nor_dev_ids,
3726 	},
3727 	.probe = spi_nor_probe,
3728 	.remove = spi_nor_remove,
3729 	.shutdown = spi_nor_shutdown,
3730 };
3731 
spi_nor_module_init(void)3732 static int __init spi_nor_module_init(void)
3733 {
3734 	return spi_mem_driver_register(&spi_nor_driver);
3735 }
3736 module_init(spi_nor_module_init);
3737 
spi_nor_module_exit(void)3738 static void __exit spi_nor_module_exit(void)
3739 {
3740 	spi_mem_driver_unregister(&spi_nor_driver);
3741 	spi_nor_debugfs_shutdown();
3742 }
3743 module_exit(spi_nor_module_exit);
3744 
3745 MODULE_LICENSE("GPL v2");
3746 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3747 MODULE_AUTHOR("Mike Lavender");
3748 MODULE_DESCRIPTION("framework for SPI NOR");
3749