1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  * Copyright 2024 BayLibre, SAS
6  *  Author: Lars-Peter Clausen <lars@metafoo.de>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/bitops.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/dmaengine.h>
14 #include <linux/fpga/adi-axi-common.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/module.h>
19 #include <linux/overflow.h>
20 #include <linux/platform_device.h>
21 #include <linux/spi/offload/provider.h>
22 #include <linux/spi/spi.h>
23 #include <trace/events/spi.h>
24 
25 #define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH	0x10
26 #define SPI_ENGINE_REG_RESET			0x40
27 
28 #define SPI_ENGINE_REG_INT_ENABLE		0x80
29 #define SPI_ENGINE_REG_INT_PENDING		0x84
30 #define SPI_ENGINE_REG_INT_SOURCE		0x88
31 
32 #define SPI_ENGINE_REG_SYNC_ID			0xc0
33 #define SPI_ENGINE_REG_OFFLOAD_SYNC_ID		0xc4
34 
35 #define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
36 #define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
37 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
38 
39 #define SPI_ENGINE_REG_CMD_FIFO			0xe0
40 #define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
41 #define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
42 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
43 
44 #define SPI_ENGINE_MAX_NUM_OFFLOADS		32
45 
46 #define SPI_ENGINE_REG_OFFLOAD_CTRL(x)		(0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
47 #define SPI_ENGINE_REG_OFFLOAD_STATUS(x)	(0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
48 #define SPI_ENGINE_REG_OFFLOAD_RESET(x)		(0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
49 #define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x)	(0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
50 #define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x)	(0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
51 
52 #define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO	GENMASK(15, 8)
53 #define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD	GENMASK(7, 0)
54 
55 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
56 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
57 #define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
58 #define SPI_ENGINE_INT_SYNC			BIT(3)
59 #define SPI_ENGINE_INT_OFFLOAD_SYNC		BIT(4)
60 
61 #define SPI_ENGINE_OFFLOAD_CTRL_ENABLE		BIT(0)
62 
63 #define SPI_ENGINE_CONFIG_CPHA			BIT(0)
64 #define SPI_ENGINE_CONFIG_CPOL			BIT(1)
65 #define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
66 #define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH		BIT(3)
67 
68 #define SPI_ENGINE_INST_TRANSFER		0x0
69 #define SPI_ENGINE_INST_ASSERT			0x1
70 #define SPI_ENGINE_INST_WRITE			0x2
71 #define SPI_ENGINE_INST_MISC			0x3
72 #define SPI_ENGINE_INST_CS_INV			0x4
73 
74 #define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
75 #define SPI_ENGINE_CMD_REG_CONFIG		0x1
76 #define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
77 
78 #define SPI_ENGINE_MISC_SYNC			0x0
79 #define SPI_ENGINE_MISC_SLEEP			0x1
80 
81 #define SPI_ENGINE_TRANSFER_WRITE		0x1
82 #define SPI_ENGINE_TRANSFER_READ		0x2
83 
84 /* Arbitrary sync ID for use by host->cur_msg */
85 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID		0x1
86 
87 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
88 	(((inst) << 12) | ((arg1) << 8) | (arg2))
89 
90 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
91 	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
92 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
93 	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
94 #define SPI_ENGINE_CMD_WRITE(reg, val) \
95 	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
96 #define SPI_ENGINE_CMD_SLEEP(delay) \
97 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
98 #define SPI_ENGINE_CMD_SYNC(id) \
99 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
100 #define SPI_ENGINE_CMD_CS_INV(flags) \
101 	SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
102 
103 /* default sizes - can be changed when SPI Engine firmware is compiled */
104 #define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE	16
105 #define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE	16
106 
107 struct spi_engine_program {
108 	unsigned int length;
109 	uint16_t instructions[] __counted_by(length);
110 };
111 
112 /**
113  * struct spi_engine_message_state - SPI engine per-message state
114  */
115 struct spi_engine_message_state {
116 	/** @cmd_length: Number of elements in cmd_buf array. */
117 	unsigned cmd_length;
118 	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
119 	const uint16_t *cmd_buf;
120 	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
121 	struct spi_transfer *tx_xfer;
122 	/** @tx_length: Size of tx_buf in bytes. */
123 	unsigned int tx_length;
124 	/** @tx_buf: Bytes not yet written to TX FIFO. */
125 	const uint8_t *tx_buf;
126 	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
127 	struct spi_transfer *rx_xfer;
128 	/** @rx_length: Size of tx_buf in bytes. */
129 	unsigned int rx_length;
130 	/** @rx_buf: Bytes not yet written to the RX FIFO. */
131 	uint8_t *rx_buf;
132 };
133 
134 enum {
135 	SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
136 	SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
137 };
138 
139 struct spi_engine_offload {
140 	struct spi_engine *spi_engine;
141 	unsigned long flags;
142 	unsigned int offload_num;
143 };
144 
145 struct spi_engine {
146 	struct clk *clk;
147 	struct clk *ref_clk;
148 
149 	spinlock_t lock;
150 
151 	void __iomem *base;
152 	struct spi_engine_message_state msg_state;
153 	struct completion msg_complete;
154 	unsigned int int_enable;
155 	/* shadows hardware CS inversion flag state */
156 	u8 cs_inv;
157 
158 	unsigned int offload_ctrl_mem_size;
159 	unsigned int offload_sdo_mem_size;
160 	struct spi_offload *offload;
161 	u32 offload_caps;
162 };
163 
spi_engine_program_add_cmd(struct spi_engine_program * p,bool dry,uint16_t cmd)164 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
165 	bool dry, uint16_t cmd)
166 {
167 	p->length++;
168 
169 	if (!dry)
170 		p->instructions[p->length - 1] = cmd;
171 }
172 
spi_engine_get_config(struct spi_device * spi)173 static unsigned int spi_engine_get_config(struct spi_device *spi)
174 {
175 	unsigned int config = 0;
176 
177 	if (spi->mode & SPI_CPOL)
178 		config |= SPI_ENGINE_CONFIG_CPOL;
179 	if (spi->mode & SPI_CPHA)
180 		config |= SPI_ENGINE_CONFIG_CPHA;
181 	if (spi->mode & SPI_3WIRE)
182 		config |= SPI_ENGINE_CONFIG_3WIRE;
183 	if (spi->mode & SPI_MOSI_IDLE_HIGH)
184 		config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
185 	if (spi->mode & SPI_MOSI_IDLE_LOW)
186 		config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
187 
188 	return config;
189 }
190 
spi_engine_gen_xfer(struct spi_engine_program * p,bool dry,struct spi_transfer * xfer)191 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
192 	struct spi_transfer *xfer)
193 {
194 	unsigned int len;
195 
196 	if (xfer->bits_per_word <= 8)
197 		len = xfer->len;
198 	else if (xfer->bits_per_word <= 16)
199 		len = xfer->len / 2;
200 	else
201 		len = xfer->len / 4;
202 
203 	while (len) {
204 		unsigned int n = min(len, 256U);
205 		unsigned int flags = 0;
206 
207 		if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
208 			flags |= SPI_ENGINE_TRANSFER_WRITE;
209 		if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
210 			flags |= SPI_ENGINE_TRANSFER_READ;
211 
212 		spi_engine_program_add_cmd(p, dry,
213 			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
214 		len -= n;
215 	}
216 }
217 
spi_engine_gen_sleep(struct spi_engine_program * p,bool dry,int delay_ns,int inst_ns,u32 sclk_hz)218 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
219 				 int delay_ns, int inst_ns, u32 sclk_hz)
220 {
221 	unsigned int t;
222 
223 	/*
224 	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
225 	 * delay is less that the instruction execution time, there is no need
226 	 * for an extra sleep instruction since the instruction execution time
227 	 * will already cover the required delay.
228 	 */
229 	if (delay_ns < 0 || delay_ns <= inst_ns)
230 		return;
231 
232 	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
233 	while (t) {
234 		unsigned int n = min(t, 256U);
235 
236 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
237 		t -= n;
238 	}
239 }
240 
spi_engine_gen_cs(struct spi_engine_program * p,bool dry,struct spi_device * spi,bool assert)241 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
242 		struct spi_device *spi, bool assert)
243 {
244 	unsigned int mask = 0xff;
245 
246 	if (assert)
247 		mask ^= BIT(spi_get_chipselect(spi, 0));
248 
249 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
250 }
251 
252 /*
253  * Performs precompile steps on the message.
254  *
255  * The SPI core does most of the message/transfer validation and filling in
256  * fields for us via __spi_validate(). This fixes up anything remaining not
257  * done there.
258  *
259  * NB: This is separate from spi_engine_compile_message() because the latter
260  * is called twice and would otherwise result in double-evaluation.
261  *
262  * Returns 0 on success, -EINVAL on failure.
263  */
spi_engine_precompile_message(struct spi_message * msg)264 static int spi_engine_precompile_message(struct spi_message *msg)
265 {
266 	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
267 	struct spi_transfer *xfer;
268 
269 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
270 		/* If we have an offload transfer, we can't rx to buffer */
271 		if (msg->offload && xfer->rx_buf)
272 			return -EINVAL;
273 
274 		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
275 		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
276 	}
277 
278 	return 0;
279 }
280 
spi_engine_compile_message(struct spi_message * msg,bool dry,struct spi_engine_program * p)281 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
282 				       struct spi_engine_program *p)
283 {
284 	struct spi_device *spi = msg->spi;
285 	struct spi_controller *host = spi->controller;
286 	struct spi_transfer *xfer;
287 	int clk_div, new_clk_div, inst_ns;
288 	bool keep_cs = false;
289 	u8 bits_per_word = 0;
290 
291 	/*
292 	 * Take into account instruction execution time for more accurate sleep
293 	 * times, especially when the delay is small.
294 	 */
295 	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
296 
297 	clk_div = 1;
298 
299 	spi_engine_program_add_cmd(p, dry,
300 		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
301 			spi_engine_get_config(spi)));
302 
303 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
304 	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
305 
306 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
307 		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
308 		if (new_clk_div != clk_div) {
309 			clk_div = new_clk_div;
310 			/* actual divider used is register value + 1 */
311 			spi_engine_program_add_cmd(p, dry,
312 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
313 					clk_div - 1));
314 		}
315 
316 		if (bits_per_word != xfer->bits_per_word && xfer->len) {
317 			bits_per_word = xfer->bits_per_word;
318 			spi_engine_program_add_cmd(p, dry,
319 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
320 					bits_per_word));
321 		}
322 
323 		spi_engine_gen_xfer(p, dry, xfer);
324 		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
325 				     inst_ns, xfer->effective_speed_hz);
326 
327 		if (xfer->cs_change) {
328 			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
329 				keep_cs = true;
330 			} else {
331 				if (!xfer->cs_off)
332 					spi_engine_gen_cs(p, dry, spi, false);
333 
334 				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
335 					&xfer->cs_change_delay, xfer), inst_ns,
336 					xfer->effective_speed_hz);
337 
338 				if (!list_next_entry(xfer, transfer_list)->cs_off)
339 					spi_engine_gen_cs(p, dry, spi, true);
340 			}
341 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
342 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
343 			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
344 		}
345 	}
346 
347 	if (!keep_cs)
348 		spi_engine_gen_cs(p, dry, spi, false);
349 
350 	/*
351 	 * Restore clockdiv to default so that future gen_sleep commands don't
352 	 * have to be aware of the current register state.
353 	 */
354 	if (clk_div != 1)
355 		spi_engine_program_add_cmd(p, dry,
356 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
357 }
358 
spi_engine_xfer_next(struct spi_message * msg,struct spi_transfer ** _xfer)359 static void spi_engine_xfer_next(struct spi_message *msg,
360 	struct spi_transfer **_xfer)
361 {
362 	struct spi_transfer *xfer = *_xfer;
363 
364 	if (!xfer) {
365 		xfer = list_first_entry(&msg->transfers,
366 			struct spi_transfer, transfer_list);
367 	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
368 		xfer = NULL;
369 	} else {
370 		xfer = list_next_entry(xfer, transfer_list);
371 	}
372 
373 	*_xfer = xfer;
374 }
375 
spi_engine_tx_next(struct spi_message * msg)376 static void spi_engine_tx_next(struct spi_message *msg)
377 {
378 	struct spi_engine_message_state *st = msg->state;
379 	struct spi_transfer *xfer = st->tx_xfer;
380 
381 	do {
382 		spi_engine_xfer_next(msg, &xfer);
383 	} while (xfer && !xfer->tx_buf);
384 
385 	st->tx_xfer = xfer;
386 	if (xfer) {
387 		st->tx_length = xfer->len;
388 		st->tx_buf = xfer->tx_buf;
389 	} else {
390 		st->tx_buf = NULL;
391 	}
392 }
393 
spi_engine_rx_next(struct spi_message * msg)394 static void spi_engine_rx_next(struct spi_message *msg)
395 {
396 	struct spi_engine_message_state *st = msg->state;
397 	struct spi_transfer *xfer = st->rx_xfer;
398 
399 	do {
400 		spi_engine_xfer_next(msg, &xfer);
401 	} while (xfer && !xfer->rx_buf);
402 
403 	st->rx_xfer = xfer;
404 	if (xfer) {
405 		st->rx_length = xfer->len;
406 		st->rx_buf = xfer->rx_buf;
407 	} else {
408 		st->rx_buf = NULL;
409 	}
410 }
411 
spi_engine_write_cmd_fifo(struct spi_engine * spi_engine,struct spi_message * msg)412 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
413 				      struct spi_message *msg)
414 {
415 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
416 	struct spi_engine_message_state *st = msg->state;
417 	unsigned int n, m, i;
418 	const uint16_t *buf;
419 
420 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
421 	while (n && st->cmd_length) {
422 		m = min(n, st->cmd_length);
423 		buf = st->cmd_buf;
424 		for (i = 0; i < m; i++)
425 			writel_relaxed(buf[i], addr);
426 		st->cmd_buf += m;
427 		st->cmd_length -= m;
428 		n -= m;
429 	}
430 
431 	return st->cmd_length != 0;
432 }
433 
spi_engine_write_tx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)434 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
435 				     struct spi_message *msg)
436 {
437 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
438 	struct spi_engine_message_state *st = msg->state;
439 	unsigned int n, m, i;
440 
441 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
442 	while (n && st->tx_length) {
443 		if (st->tx_xfer->bits_per_word <= 8) {
444 			const u8 *buf = st->tx_buf;
445 
446 			m = min(n, st->tx_length);
447 			for (i = 0; i < m; i++)
448 				writel_relaxed(buf[i], addr);
449 			st->tx_buf += m;
450 			st->tx_length -= m;
451 		} else if (st->tx_xfer->bits_per_word <= 16) {
452 			const u16 *buf = (const u16 *)st->tx_buf;
453 
454 			m = min(n, st->tx_length / 2);
455 			for (i = 0; i < m; i++)
456 				writel_relaxed(buf[i], addr);
457 			st->tx_buf += m * 2;
458 			st->tx_length -= m * 2;
459 		} else {
460 			const u32 *buf = (const u32 *)st->tx_buf;
461 
462 			m = min(n, st->tx_length / 4);
463 			for (i = 0; i < m; i++)
464 				writel_relaxed(buf[i], addr);
465 			st->tx_buf += m * 4;
466 			st->tx_length -= m * 4;
467 		}
468 		n -= m;
469 		if (st->tx_length == 0)
470 			spi_engine_tx_next(msg);
471 	}
472 
473 	return st->tx_length != 0;
474 }
475 
spi_engine_read_rx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)476 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
477 				    struct spi_message *msg)
478 {
479 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
480 	struct spi_engine_message_state *st = msg->state;
481 	unsigned int n, m, i;
482 
483 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
484 	while (n && st->rx_length) {
485 		if (st->rx_xfer->bits_per_word <= 8) {
486 			u8 *buf = st->rx_buf;
487 
488 			m = min(n, st->rx_length);
489 			for (i = 0; i < m; i++)
490 				buf[i] = readl_relaxed(addr);
491 			st->rx_buf += m;
492 			st->rx_length -= m;
493 		} else if (st->rx_xfer->bits_per_word <= 16) {
494 			u16 *buf = (u16 *)st->rx_buf;
495 
496 			m = min(n, st->rx_length / 2);
497 			for (i = 0; i < m; i++)
498 				buf[i] = readl_relaxed(addr);
499 			st->rx_buf += m * 2;
500 			st->rx_length -= m * 2;
501 		} else {
502 			u32 *buf = (u32 *)st->rx_buf;
503 
504 			m = min(n, st->rx_length / 4);
505 			for (i = 0; i < m; i++)
506 				buf[i] = readl_relaxed(addr);
507 			st->rx_buf += m * 4;
508 			st->rx_length -= m * 4;
509 		}
510 		n -= m;
511 		if (st->rx_length == 0)
512 			spi_engine_rx_next(msg);
513 	}
514 
515 	return st->rx_length != 0;
516 }
517 
spi_engine_irq(int irq,void * devid)518 static irqreturn_t spi_engine_irq(int irq, void *devid)
519 {
520 	struct spi_controller *host = devid;
521 	struct spi_message *msg = host->cur_msg;
522 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
523 	unsigned int disable_int = 0;
524 	unsigned int pending;
525 	int completed_id = -1;
526 
527 	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
528 
529 	if (pending & SPI_ENGINE_INT_SYNC) {
530 		writel_relaxed(SPI_ENGINE_INT_SYNC,
531 			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
532 		completed_id = readl_relaxed(
533 			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
534 	}
535 
536 	spin_lock(&spi_engine->lock);
537 
538 	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
539 		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
540 			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
541 	}
542 
543 	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
544 		if (!spi_engine_write_tx_fifo(spi_engine, msg))
545 			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
546 	}
547 
548 	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
549 		if (!spi_engine_read_rx_fifo(spi_engine, msg))
550 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
551 	}
552 
553 	if (pending & SPI_ENGINE_INT_SYNC && msg) {
554 		if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
555 			msg->status = 0;
556 			msg->actual_length = msg->frame_length;
557 			complete(&spi_engine->msg_complete);
558 			disable_int |= SPI_ENGINE_INT_SYNC;
559 		}
560 	}
561 
562 	if (disable_int) {
563 		spi_engine->int_enable &= ~disable_int;
564 		writel_relaxed(spi_engine->int_enable,
565 			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
566 	}
567 
568 	spin_unlock(&spi_engine->lock);
569 
570 	return IRQ_HANDLED;
571 }
572 
spi_engine_offload_prepare(struct spi_message * msg)573 static int spi_engine_offload_prepare(struct spi_message *msg)
574 {
575 	struct spi_controller *host = msg->spi->controller;
576 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
577 	struct spi_engine_program *p = msg->opt_state;
578 	struct spi_engine_offload *priv = msg->offload->priv;
579 	struct spi_transfer *xfer;
580 	void __iomem *cmd_addr;
581 	void __iomem *sdo_addr;
582 	size_t tx_word_count = 0;
583 	unsigned int i;
584 
585 	if (p->length > spi_engine->offload_ctrl_mem_size)
586 		return -EINVAL;
587 
588 	/* count total number of tx words in message */
589 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
590 		/* no support for reading to rx_buf */
591 		if (xfer->rx_buf)
592 			return -EINVAL;
593 
594 		if (!xfer->tx_buf)
595 			continue;
596 
597 		if (xfer->bits_per_word <= 8)
598 			tx_word_count += xfer->len;
599 		else if (xfer->bits_per_word <= 16)
600 			tx_word_count += xfer->len / 2;
601 		else
602 			tx_word_count += xfer->len / 4;
603 	}
604 
605 	if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
606 		return -EINVAL;
607 
608 	if (tx_word_count > spi_engine->offload_sdo_mem_size)
609 		return -EINVAL;
610 
611 	/*
612 	 * This protects against calling spi_optimize_message() with an offload
613 	 * that has already been prepared with a different message.
614 	 */
615 	if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
616 		return -EBUSY;
617 
618 	cmd_addr = spi_engine->base +
619 		   SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
620 	sdo_addr = spi_engine->base +
621 		   SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
622 
623 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
624 		if (!xfer->tx_buf)
625 			continue;
626 
627 		if (xfer->bits_per_word <= 8) {
628 			const u8 *buf = xfer->tx_buf;
629 
630 			for (i = 0; i < xfer->len; i++)
631 				writel_relaxed(buf[i], sdo_addr);
632 		} else if (xfer->bits_per_word <= 16) {
633 			const u16 *buf = xfer->tx_buf;
634 
635 			for (i = 0; i < xfer->len / 2; i++)
636 				writel_relaxed(buf[i], sdo_addr);
637 		} else {
638 			const u32 *buf = xfer->tx_buf;
639 
640 			for (i = 0; i < xfer->len / 4; i++)
641 				writel_relaxed(buf[i], sdo_addr);
642 		}
643 	}
644 
645 	for (i = 0; i < p->length; i++)
646 		writel_relaxed(p->instructions[i], cmd_addr);
647 
648 	return 0;
649 }
650 
spi_engine_offload_unprepare(struct spi_offload * offload)651 static void spi_engine_offload_unprepare(struct spi_offload *offload)
652 {
653 	struct spi_engine_offload *priv = offload->priv;
654 	struct spi_engine *spi_engine = priv->spi_engine;
655 
656 	writel_relaxed(1, spi_engine->base +
657 			  SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
658 	writel_relaxed(0, spi_engine->base +
659 			  SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
660 
661 	clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
662 }
663 
spi_engine_optimize_message(struct spi_message * msg)664 static int spi_engine_optimize_message(struct spi_message *msg)
665 {
666 	struct spi_engine_program p_dry, *p;
667 	int ret;
668 
669 	ret = spi_engine_precompile_message(msg);
670 	if (ret)
671 		return ret;
672 
673 	p_dry.length = 0;
674 	spi_engine_compile_message(msg, true, &p_dry);
675 
676 	p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
677 	if (!p)
678 		return -ENOMEM;
679 
680 	spi_engine_compile_message(msg, false, p);
681 
682 	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
683 		msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
684 
685 	msg->opt_state = p;
686 
687 	if (msg->offload) {
688 		ret = spi_engine_offload_prepare(msg);
689 		if (ret) {
690 			msg->opt_state = NULL;
691 			kfree(p);
692 			return ret;
693 		}
694 	}
695 
696 	return 0;
697 }
698 
spi_engine_unoptimize_message(struct spi_message * msg)699 static int spi_engine_unoptimize_message(struct spi_message *msg)
700 {
701 	if (msg->offload)
702 		spi_engine_offload_unprepare(msg->offload);
703 
704 	kfree(msg->opt_state);
705 
706 	return 0;
707 }
708 
709 static struct spi_offload
spi_engine_get_offload(struct spi_device * spi,const struct spi_offload_config * config)710 *spi_engine_get_offload(struct spi_device *spi,
711 			const struct spi_offload_config *config)
712 {
713 	struct spi_controller *host = spi->controller;
714 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
715 	struct spi_engine_offload *priv;
716 
717 	if (!spi_engine->offload)
718 		return ERR_PTR(-ENODEV);
719 
720 	if (config->capability_flags & ~spi_engine->offload_caps)
721 		return ERR_PTR(-EINVAL);
722 
723 	priv = spi_engine->offload->priv;
724 
725 	if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
726 		return ERR_PTR(-EBUSY);
727 
728 	return spi_engine->offload;
729 }
730 
spi_engine_put_offload(struct spi_offload * offload)731 static void spi_engine_put_offload(struct spi_offload *offload)
732 {
733 	struct spi_engine_offload *priv = offload->priv;
734 
735 	clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
736 }
737 
spi_engine_setup(struct spi_device * device)738 static int spi_engine_setup(struct spi_device *device)
739 {
740 	struct spi_controller *host = device->controller;
741 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
742 
743 	if (device->mode & SPI_CS_HIGH)
744 		spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
745 	else
746 		spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
747 
748 	writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
749 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
750 
751 	/*
752 	 * In addition to setting the flags, we have to do a CS assert command
753 	 * to make the new setting actually take effect.
754 	 */
755 	writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
756 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
757 
758 	return 0;
759 }
760 
spi_engine_transfer_one_message(struct spi_controller * host,struct spi_message * msg)761 static int spi_engine_transfer_one_message(struct spi_controller *host,
762 	struct spi_message *msg)
763 {
764 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
765 	struct spi_engine_message_state *st = &spi_engine->msg_state;
766 	struct spi_engine_program *p = msg->opt_state;
767 	unsigned int int_enable = 0;
768 	unsigned long flags;
769 
770 	if (msg->offload) {
771 		dev_err(&host->dev, "Single transfer offload not supported\n");
772 		msg->status = -EOPNOTSUPP;
773 		goto out;
774 	}
775 
776 	/* reinitialize message state for this transfer */
777 	memset(st, 0, sizeof(*st));
778 	st->cmd_buf = p->instructions;
779 	st->cmd_length = p->length;
780 	msg->state = st;
781 
782 	reinit_completion(&spi_engine->msg_complete);
783 
784 	if (trace_spi_transfer_start_enabled()) {
785 		struct spi_transfer *xfer;
786 
787 		list_for_each_entry(xfer, &msg->transfers, transfer_list)
788 			trace_spi_transfer_start(msg, xfer);
789 	}
790 
791 	spin_lock_irqsave(&spi_engine->lock, flags);
792 
793 	if (spi_engine_write_cmd_fifo(spi_engine, msg))
794 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
795 
796 	spi_engine_tx_next(msg);
797 	if (spi_engine_write_tx_fifo(spi_engine, msg))
798 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
799 
800 	spi_engine_rx_next(msg);
801 	if (st->rx_length != 0)
802 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
803 
804 	int_enable |= SPI_ENGINE_INT_SYNC;
805 
806 	writel_relaxed(int_enable,
807 		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
808 	spi_engine->int_enable = int_enable;
809 	spin_unlock_irqrestore(&spi_engine->lock, flags);
810 
811 	if (!wait_for_completion_timeout(&spi_engine->msg_complete,
812 					 msecs_to_jiffies(5000))) {
813 		dev_err(&host->dev,
814 			"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
815 		msg->status = -ETIMEDOUT;
816 	}
817 
818 	if (trace_spi_transfer_stop_enabled()) {
819 		struct spi_transfer *xfer;
820 
821 		list_for_each_entry(xfer, &msg->transfers, transfer_list)
822 			trace_spi_transfer_stop(msg, xfer);
823 	}
824 
825 out:
826 	spi_finalize_current_message(host);
827 
828 	return msg->status;
829 }
830 
spi_engine_trigger_enable(struct spi_offload * offload)831 static int spi_engine_trigger_enable(struct spi_offload *offload)
832 {
833 	struct spi_engine_offload *priv = offload->priv;
834 	struct spi_engine *spi_engine = priv->spi_engine;
835 	unsigned int reg;
836 
837 	reg = readl_relaxed(spi_engine->base +
838 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
839 	reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
840 	writel_relaxed(reg, spi_engine->base +
841 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
842 	return 0;
843 }
844 
spi_engine_trigger_disable(struct spi_offload * offload)845 static void spi_engine_trigger_disable(struct spi_offload *offload)
846 {
847 	struct spi_engine_offload *priv = offload->priv;
848 	struct spi_engine *spi_engine = priv->spi_engine;
849 	unsigned int reg;
850 
851 	reg = readl_relaxed(spi_engine->base +
852 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
853 	reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
854 	writel_relaxed(reg, spi_engine->base +
855 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
856 }
857 
858 static struct dma_chan
spi_engine_tx_stream_request_dma_chan(struct spi_offload * offload)859 *spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
860 {
861 	struct spi_engine_offload *priv = offload->priv;
862 	char name[16];
863 
864 	snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
865 
866 	return dma_request_chan(offload->provider_dev, name);
867 }
868 
869 static struct dma_chan
spi_engine_rx_stream_request_dma_chan(struct spi_offload * offload)870 *spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
871 {
872 	struct spi_engine_offload *priv = offload->priv;
873 	char name[16];
874 
875 	snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
876 
877 	return dma_request_chan(offload->provider_dev, name);
878 }
879 
880 static const struct spi_offload_ops spi_engine_offload_ops = {
881 	.trigger_enable = spi_engine_trigger_enable,
882 	.trigger_disable = spi_engine_trigger_disable,
883 	.tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
884 	.rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
885 };
886 
spi_engine_release_hw(void * p)887 static void spi_engine_release_hw(void *p)
888 {
889 	struct spi_engine *spi_engine = p;
890 
891 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
892 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
893 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
894 }
895 
spi_engine_probe(struct platform_device * pdev)896 static int spi_engine_probe(struct platform_device *pdev)
897 {
898 	struct spi_engine *spi_engine;
899 	struct spi_controller *host;
900 	unsigned int version;
901 	int irq, ret;
902 
903 	irq = platform_get_irq(pdev, 0);
904 	if (irq < 0)
905 		return irq;
906 
907 	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
908 	if (!host)
909 		return -ENOMEM;
910 
911 	spi_engine = spi_controller_get_devdata(host);
912 
913 	spin_lock_init(&spi_engine->lock);
914 	init_completion(&spi_engine->msg_complete);
915 
916 	/*
917 	 * REVISIT: for now, all SPI Engines only have one offload. In the
918 	 * future, this should be read from a memory mapped register to
919 	 * determine the number of offloads enabled at HDL compile time. For
920 	 * now, we can tell if an offload is present if there is a trigger
921 	 * source wired up to it.
922 	 */
923 	if (device_property_present(&pdev->dev, "trigger-sources")) {
924 		struct spi_engine_offload *priv;
925 
926 		spi_engine->offload =
927 			devm_spi_offload_alloc(&pdev->dev,
928 					       sizeof(struct spi_engine_offload));
929 		if (IS_ERR(spi_engine->offload))
930 			return PTR_ERR(spi_engine->offload);
931 
932 		priv = spi_engine->offload->priv;
933 		priv->spi_engine = spi_engine;
934 		priv->offload_num = 0;
935 
936 		spi_engine->offload->ops = &spi_engine_offload_ops;
937 		spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
938 
939 		if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
940 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
941 			spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
942 		}
943 
944 		if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
945 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
946 			spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
947 		} else {
948 			/*
949 			 * HDL compile option to enable TX DMA stream also disables
950 			 * the SDO memory, so can't do both at the same time.
951 			 */
952 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
953 		}
954 	}
955 
956 	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
957 	if (IS_ERR(spi_engine->clk))
958 		return PTR_ERR(spi_engine->clk);
959 
960 	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
961 	if (IS_ERR(spi_engine->ref_clk))
962 		return PTR_ERR(spi_engine->ref_clk);
963 
964 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
965 	if (IS_ERR(spi_engine->base))
966 		return PTR_ERR(spi_engine->base);
967 
968 	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
969 	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
970 		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
971 			ADI_AXI_PCORE_VER_MAJOR(version),
972 			ADI_AXI_PCORE_VER_MINOR(version),
973 			ADI_AXI_PCORE_VER_PATCH(version));
974 		return -ENODEV;
975 	}
976 
977 	if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
978 		unsigned int sizes = readl(spi_engine->base +
979 				SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
980 
981 		spi_engine->offload_ctrl_mem_size = 1 <<
982 			FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
983 		spi_engine->offload_sdo_mem_size = 1 <<
984 			FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
985 	} else {
986 		spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
987 		spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
988 	}
989 
990 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
991 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
992 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
993 
994 	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
995 				       spi_engine);
996 	if (ret)
997 		return ret;
998 
999 	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
1000 			       host);
1001 	if (ret)
1002 		return ret;
1003 
1004 	host->dev.of_node = pdev->dev.of_node;
1005 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
1006 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1007 	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
1008 	host->transfer_one_message = spi_engine_transfer_one_message;
1009 	host->optimize_message = spi_engine_optimize_message;
1010 	host->unoptimize_message = spi_engine_unoptimize_message;
1011 	host->get_offload = spi_engine_get_offload;
1012 	host->put_offload = spi_engine_put_offload;
1013 	host->num_chipselect = 8;
1014 
1015 	/* Some features depend of the IP core version. */
1016 	if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
1017 		if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
1018 			host->mode_bits |= SPI_CS_HIGH;
1019 			host->setup = spi_engine_setup;
1020 		}
1021 		if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
1022 			host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
1023 	}
1024 
1025 	if (host->max_speed_hz == 0)
1026 		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
1027 
1028 	return devm_spi_register_controller(&pdev->dev, host);
1029 }
1030 
1031 static const struct of_device_id spi_engine_match_table[] = {
1032 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
1033 	{ },
1034 };
1035 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
1036 
1037 static struct platform_driver spi_engine_driver = {
1038 	.probe = spi_engine_probe,
1039 	.driver = {
1040 		.name = "spi-engine",
1041 		.of_match_table = spi_engine_match_table,
1042 	},
1043 };
1044 module_platform_driver(spi_engine_driver);
1045 
1046 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1047 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
1048 MODULE_LICENSE("GPL");
1049