1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for Atmel AT32 and AT91 SPI Controllers
4 *
5 * Copyright (C) 2006 Atmel Corporation
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/clk.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/spi/spi.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20
21 #include <linux/io.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/iopoll.h>
26 #include <trace/events/spi.h>
27
28 /* SPI register offsets */
29 #define SPI_CR 0x0000
30 #define SPI_MR 0x0004
31 #define SPI_RDR 0x0008
32 #define SPI_TDR 0x000c
33 #define SPI_SR 0x0010
34 #define SPI_IER 0x0014
35 #define SPI_IDR 0x0018
36 #define SPI_IMR 0x001c
37 #define SPI_CSR0 0x0030
38 #define SPI_CSR1 0x0034
39 #define SPI_CSR2 0x0038
40 #define SPI_CSR3 0x003c
41 #define SPI_FMR 0x0040
42 #define SPI_FLR 0x0044
43 #define SPI_VERSION 0x00fc
44 #define SPI_RPR 0x0100
45 #define SPI_RCR 0x0104
46 #define SPI_TPR 0x0108
47 #define SPI_TCR 0x010c
48 #define SPI_RNPR 0x0110
49 #define SPI_RNCR 0x0114
50 #define SPI_TNPR 0x0118
51 #define SPI_TNCR 0x011c
52 #define SPI_PTCR 0x0120
53 #define SPI_PTSR 0x0124
54
55 /* Bitfields in CR */
56 #define SPI_SPIEN_OFFSET 0
57 #define SPI_SPIEN_SIZE 1
58 #define SPI_SPIDIS_OFFSET 1
59 #define SPI_SPIDIS_SIZE 1
60 #define SPI_SWRST_OFFSET 7
61 #define SPI_SWRST_SIZE 1
62 #define SPI_LASTXFER_OFFSET 24
63 #define SPI_LASTXFER_SIZE 1
64 #define SPI_TXFCLR_OFFSET 16
65 #define SPI_TXFCLR_SIZE 1
66 #define SPI_RXFCLR_OFFSET 17
67 #define SPI_RXFCLR_SIZE 1
68 #define SPI_FIFOEN_OFFSET 30
69 #define SPI_FIFOEN_SIZE 1
70 #define SPI_FIFODIS_OFFSET 31
71 #define SPI_FIFODIS_SIZE 1
72
73 /* Bitfields in MR */
74 #define SPI_MSTR_OFFSET 0
75 #define SPI_MSTR_SIZE 1
76 #define SPI_PS_OFFSET 1
77 #define SPI_PS_SIZE 1
78 #define SPI_PCSDEC_OFFSET 2
79 #define SPI_PCSDEC_SIZE 1
80 #define SPI_FDIV_OFFSET 3
81 #define SPI_FDIV_SIZE 1
82 #define SPI_MODFDIS_OFFSET 4
83 #define SPI_MODFDIS_SIZE 1
84 #define SPI_WDRBT_OFFSET 5
85 #define SPI_WDRBT_SIZE 1
86 #define SPI_LLB_OFFSET 7
87 #define SPI_LLB_SIZE 1
88 #define SPI_PCS_OFFSET 16
89 #define SPI_PCS_SIZE 4
90 #define SPI_DLYBCS_OFFSET 24
91 #define SPI_DLYBCS_SIZE 8
92
93 /* Bitfields in RDR */
94 #define SPI_RD_OFFSET 0
95 #define SPI_RD_SIZE 16
96
97 /* Bitfields in TDR */
98 #define SPI_TD_OFFSET 0
99 #define SPI_TD_SIZE 16
100
101 /* Bitfields in SR */
102 #define SPI_RDRF_OFFSET 0
103 #define SPI_RDRF_SIZE 1
104 #define SPI_TDRE_OFFSET 1
105 #define SPI_TDRE_SIZE 1
106 #define SPI_MODF_OFFSET 2
107 #define SPI_MODF_SIZE 1
108 #define SPI_OVRES_OFFSET 3
109 #define SPI_OVRES_SIZE 1
110 #define SPI_ENDRX_OFFSET 4
111 #define SPI_ENDRX_SIZE 1
112 #define SPI_ENDTX_OFFSET 5
113 #define SPI_ENDTX_SIZE 1
114 #define SPI_RXBUFF_OFFSET 6
115 #define SPI_RXBUFF_SIZE 1
116 #define SPI_TXBUFE_OFFSET 7
117 #define SPI_TXBUFE_SIZE 1
118 #define SPI_NSSR_OFFSET 8
119 #define SPI_NSSR_SIZE 1
120 #define SPI_TXEMPTY_OFFSET 9
121 #define SPI_TXEMPTY_SIZE 1
122 #define SPI_SPIENS_OFFSET 16
123 #define SPI_SPIENS_SIZE 1
124 #define SPI_TXFEF_OFFSET 24
125 #define SPI_TXFEF_SIZE 1
126 #define SPI_TXFFF_OFFSET 25
127 #define SPI_TXFFF_SIZE 1
128 #define SPI_TXFTHF_OFFSET 26
129 #define SPI_TXFTHF_SIZE 1
130 #define SPI_RXFEF_OFFSET 27
131 #define SPI_RXFEF_SIZE 1
132 #define SPI_RXFFF_OFFSET 28
133 #define SPI_RXFFF_SIZE 1
134 #define SPI_RXFTHF_OFFSET 29
135 #define SPI_RXFTHF_SIZE 1
136 #define SPI_TXFPTEF_OFFSET 30
137 #define SPI_TXFPTEF_SIZE 1
138 #define SPI_RXFPTEF_OFFSET 31
139 #define SPI_RXFPTEF_SIZE 1
140
141 /* Bitfields in CSR0 */
142 #define SPI_CPOL_OFFSET 0
143 #define SPI_CPOL_SIZE 1
144 #define SPI_NCPHA_OFFSET 1
145 #define SPI_NCPHA_SIZE 1
146 #define SPI_CSAAT_OFFSET 3
147 #define SPI_CSAAT_SIZE 1
148 #define SPI_BITS_OFFSET 4
149 #define SPI_BITS_SIZE 4
150 #define SPI_SCBR_OFFSET 8
151 #define SPI_SCBR_SIZE 8
152 #define SPI_DLYBS_OFFSET 16
153 #define SPI_DLYBS_SIZE 8
154 #define SPI_DLYBCT_OFFSET 24
155 #define SPI_DLYBCT_SIZE 8
156
157 /* Bitfields in RCR */
158 #define SPI_RXCTR_OFFSET 0
159 #define SPI_RXCTR_SIZE 16
160
161 /* Bitfields in TCR */
162 #define SPI_TXCTR_OFFSET 0
163 #define SPI_TXCTR_SIZE 16
164
165 /* Bitfields in RNCR */
166 #define SPI_RXNCR_OFFSET 0
167 #define SPI_RXNCR_SIZE 16
168
169 /* Bitfields in TNCR */
170 #define SPI_TXNCR_OFFSET 0
171 #define SPI_TXNCR_SIZE 16
172
173 /* Bitfields in PTCR */
174 #define SPI_RXTEN_OFFSET 0
175 #define SPI_RXTEN_SIZE 1
176 #define SPI_RXTDIS_OFFSET 1
177 #define SPI_RXTDIS_SIZE 1
178 #define SPI_TXTEN_OFFSET 8
179 #define SPI_TXTEN_SIZE 1
180 #define SPI_TXTDIS_OFFSET 9
181 #define SPI_TXTDIS_SIZE 1
182
183 /* Bitfields in FMR */
184 #define SPI_TXRDYM_OFFSET 0
185 #define SPI_TXRDYM_SIZE 2
186 #define SPI_RXRDYM_OFFSET 4
187 #define SPI_RXRDYM_SIZE 2
188 #define SPI_TXFTHRES_OFFSET 16
189 #define SPI_TXFTHRES_SIZE 6
190 #define SPI_RXFTHRES_OFFSET 24
191 #define SPI_RXFTHRES_SIZE 6
192
193 /* Bitfields in FLR */
194 #define SPI_TXFL_OFFSET 0
195 #define SPI_TXFL_SIZE 6
196 #define SPI_RXFL_OFFSET 16
197 #define SPI_RXFL_SIZE 6
198
199 /* Constants for BITS */
200 #define SPI_BITS_8_BPT 0
201 #define SPI_BITS_9_BPT 1
202 #define SPI_BITS_10_BPT 2
203 #define SPI_BITS_11_BPT 3
204 #define SPI_BITS_12_BPT 4
205 #define SPI_BITS_13_BPT 5
206 #define SPI_BITS_14_BPT 6
207 #define SPI_BITS_15_BPT 7
208 #define SPI_BITS_16_BPT 8
209 #define SPI_ONE_DATA 0
210 #define SPI_TWO_DATA 1
211 #define SPI_FOUR_DATA 2
212
213 /* Bit manipulation macros */
214 #define SPI_BIT(name) \
215 (1 << SPI_##name##_OFFSET)
216 #define SPI_BF(name, value) \
217 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
218 #define SPI_BFEXT(name, value) \
219 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
220 #define SPI_BFINS(name, value, old) \
221 (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
222 | SPI_BF(name, value))
223
224 /* Register access macros */
225 #define spi_readl(port, reg) \
226 readl_relaxed((port)->regs + SPI_##reg)
227 #define spi_writel(port, reg, value) \
228 writel_relaxed((value), (port)->regs + SPI_##reg)
229 #define spi_writew(port, reg, value) \
230 writew_relaxed((value), (port)->regs + SPI_##reg)
231
232 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
233 * cache operations; better heuristics consider wordsize and bitrate.
234 */
235 #define DMA_MIN_BYTES 16
236
237 #define AUTOSUSPEND_TIMEOUT 2000
238
239 struct atmel_spi_caps {
240 bool is_spi2;
241 bool has_wdrbt;
242 bool has_dma_support;
243 bool has_pdc_support;
244 };
245
246 /*
247 * The core SPI transfer engine just talks to a register bank to set up
248 * DMA transfers; transfer queue progress is driven by IRQs. The clock
249 * framework provides the base clock, subdivided for each spi_device.
250 */
251 struct atmel_spi {
252 spinlock_t lock;
253 unsigned long flags;
254
255 phys_addr_t phybase;
256 void __iomem *regs;
257 int irq;
258 struct clk *clk;
259 struct clk *gclk;
260 struct platform_device *pdev;
261 unsigned long spi_clk;
262
263 struct spi_transfer *current_transfer;
264 int current_remaining_bytes;
265 int done_status;
266 dma_addr_t dma_addr_rx_bbuf;
267 dma_addr_t dma_addr_tx_bbuf;
268 void *addr_rx_bbuf;
269 void *addr_tx_bbuf;
270
271 struct completion xfer_completion;
272
273 struct atmel_spi_caps caps;
274
275 bool use_dma;
276 bool use_pdc;
277
278 bool keep_cs;
279
280 u32 fifo_size;
281 bool last_polarity;
282 u8 native_cs_free;
283 u8 native_cs_for_gpio;
284 };
285
286 /* Controller-specific per-slave state */
287 struct atmel_spi_device {
288 u32 csr;
289 };
290
291 #define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
292 #define INVALID_DMA_ADDRESS 0xffffffff
293
294 /*
295 * This frequency can be anything supported by the controller, but to avoid
296 * unnecessary delay, the highest possible frequency is chosen.
297 *
298 * This frequency is the highest possible which is not interfering with other
299 * chip select registers (see Note for Serial Clock Bit Rate configuration in
300 * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
301 */
302 #define DUMMY_MSG_FREQUENCY 0x02
303 /*
304 * 8 bits is the minimum data the controller is capable of sending.
305 *
306 * This message can be anything as it should not be treated by any SPI device.
307 */
308 #define DUMMY_MSG 0xAA
309
310 /*
311 * Version 2 of the SPI controller has
312 * - CR.LASTXFER
313 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
314 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
315 * - SPI_CSRx.CSAAT
316 * - SPI_CSRx.SBCR allows faster clocking
317 */
atmel_spi_is_v2(struct atmel_spi * as)318 static bool atmel_spi_is_v2(struct atmel_spi *as)
319 {
320 return as->caps.is_spi2;
321 }
322
323 /*
324 * Send a dummy message.
325 *
326 * This is sometimes needed when using a CS GPIO to force clock transition when
327 * switching between devices with different polarities.
328 */
atmel_spi_send_dummy(struct atmel_spi * as,struct spi_device * spi,int chip_select)329 static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
330 {
331 u32 status;
332 u32 csr;
333
334 /*
335 * Set a clock frequency to allow sending message on SPI bus.
336 * The frequency here can be anything, but is needed for
337 * the controller to send the data.
338 */
339 csr = spi_readl(as, CSR0 + 4 * chip_select);
340 csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
341 spi_writel(as, CSR0 + 4 * chip_select, csr);
342
343 /*
344 * Read all data coming from SPI bus, needed to be able to send
345 * the message.
346 */
347 spi_readl(as, RDR);
348 while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
349 spi_readl(as, RDR);
350 cpu_relax();
351 }
352
353 spi_writel(as, TDR, DUMMY_MSG);
354
355 readl_poll_timeout_atomic(as->regs + SPI_SR, status,
356 (status & SPI_BIT(TXEMPTY)), 1, 1000);
357 }
358
359
360 /*
361 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
362 * they assume that spi slave device state will not change on deselect, so
363 * that automagic deselection is OK. ("NPCSx rises if no data is to be
364 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
365 * controllers have CSAAT and friends.
366 *
367 * Even controller newer than ar91rm9200, using GPIOs can make sens as
368 * it lets us support active-high chipselects despite the controller's
369 * belief that only active-low devices/systems exists.
370 *
371 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
372 * right when driven with GPIO. ("Mode Fault does not allow more than one
373 * Master on Chip Select 0.") No workaround exists for that ... so for
374 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
375 * and (c) will trigger that first erratum in some cases.
376 *
377 * When changing the clock polarity, the SPI controller waits for the next
378 * transmission to enforce the default clock state. This may be an issue when
379 * using a GPIO as Chip Select: the clock level is applied only when the first
380 * packet is sent, once the CS has already been asserted. The workaround is to
381 * avoid this by sending a first (dummy) message before toggling the CS state.
382 */
cs_activate(struct atmel_spi * as,struct spi_device * spi)383 static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
384 {
385 struct atmel_spi_device *asd = spi->controller_state;
386 bool new_polarity;
387 int chip_select;
388 u32 mr;
389
390 if (spi_get_csgpiod(spi, 0))
391 chip_select = as->native_cs_for_gpio;
392 else
393 chip_select = spi_get_chipselect(spi, 0);
394
395 if (atmel_spi_is_v2(as)) {
396 spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
397 /* For the low SPI version, there is a issue that PDC transfer
398 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
399 */
400 spi_writel(as, CSR0, asd->csr);
401
402 mr = spi_readl(as, MR);
403 mr = SPI_BFINS(PCS, ~(0x01 << chip_select), mr);
404 spi_writel(as, MR, mr);
405
406 /*
407 * Ensures the clock polarity is valid before we actually
408 * assert the CS to avoid spurious clock edges to be
409 * processed by the spi devices.
410 */
411 if (spi_get_csgpiod(spi, 0)) {
412 new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
413 if (new_polarity != as->last_polarity) {
414 /*
415 * Need to disable the GPIO before sending the dummy
416 * message because it is already set by the spi core.
417 */
418 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
419 atmel_spi_send_dummy(as, spi, chip_select);
420 as->last_polarity = new_polarity;
421 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
422 }
423 }
424 } else {
425 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
426 int i;
427 u32 csr;
428
429 /* Make sure clock polarity is correct */
430 for (i = 0; i < spi->controller->num_chipselect; i++) {
431 csr = spi_readl(as, CSR0 + 4 * i);
432 if ((csr ^ cpol) & SPI_BIT(CPOL))
433 spi_writel(as, CSR0 + 4 * i,
434 csr ^ SPI_BIT(CPOL));
435 }
436
437 mr = spi_readl(as, MR);
438 mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
439 spi_writel(as, MR, mr);
440 }
441
442 dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
443 }
444
cs_deactivate(struct atmel_spi * as,struct spi_device * spi)445 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
446 {
447 int chip_select;
448 u32 mr;
449
450 if (spi_get_csgpiod(spi, 0))
451 chip_select = as->native_cs_for_gpio;
452 else
453 chip_select = spi_get_chipselect(spi, 0);
454
455 /* only deactivate *this* device; sometimes transfers to
456 * another device may be active when this routine is called.
457 */
458 mr = spi_readl(as, MR);
459 if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
460 mr = SPI_BFINS(PCS, 0xf, mr);
461 spi_writel(as, MR, mr);
462 }
463
464 dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
465
466 if (!spi_get_csgpiod(spi, 0))
467 spi_writel(as, CR, SPI_BIT(LASTXFER));
468 }
469
atmel_spi_lock(struct atmel_spi * as)470 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
471 {
472 spin_lock_irqsave(&as->lock, as->flags);
473 }
474
atmel_spi_unlock(struct atmel_spi * as)475 static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
476 {
477 spin_unlock_irqrestore(&as->lock, as->flags);
478 }
479
atmel_spi_is_vmalloc_xfer(struct spi_transfer * xfer)480 static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
481 {
482 return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
483 }
484
atmel_spi_use_dma(struct atmel_spi * as,struct spi_transfer * xfer)485 static inline bool atmel_spi_use_dma(struct atmel_spi *as,
486 struct spi_transfer *xfer)
487 {
488 return as->use_dma && xfer->len >= DMA_MIN_BYTES;
489 }
490
atmel_spi_can_dma(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)491 static bool atmel_spi_can_dma(struct spi_controller *host,
492 struct spi_device *spi,
493 struct spi_transfer *xfer)
494 {
495 struct atmel_spi *as = spi_controller_get_devdata(host);
496
497 if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
498 return atmel_spi_use_dma(as, xfer) &&
499 !atmel_spi_is_vmalloc_xfer(xfer);
500 else
501 return atmel_spi_use_dma(as, xfer);
502
503 }
504
atmel_spi_dma_slave_config(struct atmel_spi * as,u8 bits_per_word)505 static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
506 {
507 struct spi_controller *host = platform_get_drvdata(as->pdev);
508 struct dma_slave_config slave_config;
509 int err = 0;
510
511 if (bits_per_word > 8) {
512 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
513 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
514 } else {
515 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
516 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
517 }
518
519 slave_config.dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
520 slave_config.src_addr = (dma_addr_t)as->phybase + SPI_RDR;
521 slave_config.src_maxburst = 1;
522 slave_config.dst_maxburst = 1;
523 slave_config.device_fc = false;
524
525 /*
526 * This driver uses fixed peripheral select mode (PS bit set to '0' in
527 * the Mode Register).
528 * So according to the datasheet, when FIFOs are available (and
529 * enabled), the Transmit FIFO operates in Multiple Data Mode.
530 * In this mode, up to 2 data, not 4, can be written into the Transmit
531 * Data Register in a single access.
532 * However, the first data has to be written into the lowest 16 bits and
533 * the second data into the highest 16 bits of the Transmit
534 * Data Register. For 8bit data (the most frequent case), it would
535 * require to rework tx_buf so each data would actually fit 16 bits.
536 * So we'd rather write only one data at the time. Hence the transmit
537 * path works the same whether FIFOs are available (and enabled) or not.
538 */
539 if (dmaengine_slave_config(host->dma_tx, &slave_config)) {
540 dev_err(&as->pdev->dev,
541 "failed to configure tx dma channel\n");
542 err = -EINVAL;
543 }
544
545 /*
546 * This driver configures the spi controller for host mode (MSTR bit
547 * set to '1' in the Mode Register).
548 * So according to the datasheet, when FIFOs are available (and
549 * enabled), the Receive FIFO operates in Single Data Mode.
550 * So the receive path works the same whether FIFOs are available (and
551 * enabled) or not.
552 */
553 if (dmaengine_slave_config(host->dma_rx, &slave_config)) {
554 dev_err(&as->pdev->dev,
555 "failed to configure rx dma channel\n");
556 err = -EINVAL;
557 }
558
559 return err;
560 }
561
atmel_spi_configure_dma(struct spi_controller * host,struct atmel_spi * as)562 static int atmel_spi_configure_dma(struct spi_controller *host,
563 struct atmel_spi *as)
564 {
565 struct device *dev = &as->pdev->dev;
566 int err;
567
568 host->dma_tx = dma_request_chan(dev, "tx");
569 if (IS_ERR(host->dma_tx)) {
570 err = PTR_ERR(host->dma_tx);
571 dev_dbg(dev, "No TX DMA channel, DMA is disabled\n");
572 goto error_clear;
573 }
574
575 host->dma_rx = dma_request_chan(dev, "rx");
576 if (IS_ERR(host->dma_rx)) {
577 err = PTR_ERR(host->dma_rx);
578 /*
579 * No reason to check EPROBE_DEFER here since we have already
580 * requested tx channel.
581 */
582 dev_dbg(dev, "No RX DMA channel, DMA is disabled\n");
583 goto error;
584 }
585
586 err = atmel_spi_dma_slave_config(as, 8);
587 if (err)
588 goto error;
589
590 dev_info(&as->pdev->dev,
591 "Using %s (tx) and %s (rx) for DMA transfers\n",
592 dma_chan_name(host->dma_tx),
593 dma_chan_name(host->dma_rx));
594
595 return 0;
596 error:
597 if (!IS_ERR(host->dma_rx))
598 dma_release_channel(host->dma_rx);
599 if (!IS_ERR(host->dma_tx))
600 dma_release_channel(host->dma_tx);
601 error_clear:
602 host->dma_tx = host->dma_rx = NULL;
603 return err;
604 }
605
atmel_spi_stop_dma(struct spi_controller * host)606 static void atmel_spi_stop_dma(struct spi_controller *host)
607 {
608 if (host->dma_rx)
609 dmaengine_terminate_all(host->dma_rx);
610 if (host->dma_tx)
611 dmaengine_terminate_all(host->dma_tx);
612 }
613
atmel_spi_release_dma(struct spi_controller * host)614 static void atmel_spi_release_dma(struct spi_controller *host)
615 {
616 if (host->dma_rx) {
617 dma_release_channel(host->dma_rx);
618 host->dma_rx = NULL;
619 }
620 if (host->dma_tx) {
621 dma_release_channel(host->dma_tx);
622 host->dma_tx = NULL;
623 }
624 }
625
626 /* This function is called by the DMA driver from tasklet context */
dma_callback(void * data)627 static void dma_callback(void *data)
628 {
629 struct spi_controller *host = data;
630 struct atmel_spi *as = spi_controller_get_devdata(host);
631
632 if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
633 IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
634 memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
635 as->current_transfer->len);
636 }
637 complete(&as->xfer_completion);
638 }
639
640 /*
641 * Next transfer using PIO without FIFO.
642 */
atmel_spi_next_xfer_single(struct spi_controller * host,struct spi_transfer * xfer)643 static void atmel_spi_next_xfer_single(struct spi_controller *host,
644 struct spi_transfer *xfer)
645 {
646 struct atmel_spi *as = spi_controller_get_devdata(host);
647 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
648
649 dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_pio\n");
650
651 /* Make sure data is not remaining in RDR */
652 spi_readl(as, RDR);
653 while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
654 spi_readl(as, RDR);
655 cpu_relax();
656 }
657
658 if (xfer->bits_per_word > 8)
659 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
660 else
661 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
662
663 dev_dbg(host->dev.parent,
664 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
665 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
666 xfer->bits_per_word);
667
668 /* Enable relevant interrupts */
669 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
670 }
671
672 /*
673 * Next transfer using PIO with FIFO.
674 */
atmel_spi_next_xfer_fifo(struct spi_controller * host,struct spi_transfer * xfer)675 static void atmel_spi_next_xfer_fifo(struct spi_controller *host,
676 struct spi_transfer *xfer)
677 {
678 struct atmel_spi *as = spi_controller_get_devdata(host);
679 u32 current_remaining_data, num_data;
680 u32 offset = xfer->len - as->current_remaining_bytes;
681 const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
682 const u8 *bytes = (const u8 *)((u8 *)xfer->tx_buf + offset);
683 u16 td0, td1;
684 u32 fifomr;
685
686 dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_fifo\n");
687
688 /* Compute the number of data to transfer in the current iteration */
689 current_remaining_data = ((xfer->bits_per_word > 8) ?
690 ((u32)as->current_remaining_bytes >> 1) :
691 (u32)as->current_remaining_bytes);
692 num_data = min(current_remaining_data, as->fifo_size);
693
694 /* Flush RX and TX FIFOs */
695 spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
696 while (spi_readl(as, FLR))
697 cpu_relax();
698
699 /* Set RX FIFO Threshold to the number of data to transfer */
700 fifomr = spi_readl(as, FMR);
701 spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
702
703 /* Clear FIFO flags in the Status Register, especially RXFTHF */
704 (void)spi_readl(as, SR);
705
706 /* Fill TX FIFO */
707 while (num_data >= 2) {
708 if (xfer->bits_per_word > 8) {
709 td0 = *words++;
710 td1 = *words++;
711 } else {
712 td0 = *bytes++;
713 td1 = *bytes++;
714 }
715
716 spi_writel(as, TDR, (td1 << 16) | td0);
717 num_data -= 2;
718 }
719
720 if (num_data) {
721 if (xfer->bits_per_word > 8)
722 td0 = *words++;
723 else
724 td0 = *bytes++;
725
726 spi_writew(as, TDR, td0);
727 num_data--;
728 }
729
730 dev_dbg(host->dev.parent,
731 " start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
732 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
733 xfer->bits_per_word);
734
735 /*
736 * Enable RX FIFO Threshold Flag interrupt to be notified about
737 * transfer completion.
738 */
739 spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
740 }
741
742 /*
743 * Next transfer using PIO.
744 */
atmel_spi_next_xfer_pio(struct spi_controller * host,struct spi_transfer * xfer)745 static void atmel_spi_next_xfer_pio(struct spi_controller *host,
746 struct spi_transfer *xfer)
747 {
748 struct atmel_spi *as = spi_controller_get_devdata(host);
749
750 if (as->fifo_size)
751 atmel_spi_next_xfer_fifo(host, xfer);
752 else
753 atmel_spi_next_xfer_single(host, xfer);
754 }
755
756 /*
757 * Submit next transfer for DMA.
758 */
atmel_spi_next_xfer_dma_submit(struct spi_controller * host,struct spi_transfer * xfer,u32 * plen)759 static int atmel_spi_next_xfer_dma_submit(struct spi_controller *host,
760 struct spi_transfer *xfer,
761 u32 *plen)
762 {
763 struct atmel_spi *as = spi_controller_get_devdata(host);
764 struct dma_chan *rxchan = host->dma_rx;
765 struct dma_chan *txchan = host->dma_tx;
766 struct dma_async_tx_descriptor *rxdesc;
767 struct dma_async_tx_descriptor *txdesc;
768 dma_cookie_t cookie;
769
770 dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
771
772 /* Check that the channels are available */
773 if (!rxchan || !txchan)
774 return -ENODEV;
775
776
777 *plen = xfer->len;
778
779 if (atmel_spi_dma_slave_config(as, xfer->bits_per_word))
780 goto err_exit;
781
782 /* Send both scatterlists */
783 if (atmel_spi_is_vmalloc_xfer(xfer) &&
784 IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
785 rxdesc = dmaengine_prep_slave_single(rxchan,
786 as->dma_addr_rx_bbuf,
787 xfer->len,
788 DMA_DEV_TO_MEM,
789 DMA_PREP_INTERRUPT |
790 DMA_CTRL_ACK);
791 } else {
792 rxdesc = dmaengine_prep_slave_sg(rxchan,
793 xfer->rx_sg.sgl,
794 xfer->rx_sg.nents,
795 DMA_DEV_TO_MEM,
796 DMA_PREP_INTERRUPT |
797 DMA_CTRL_ACK);
798 }
799 if (!rxdesc)
800 goto err_dma;
801
802 if (atmel_spi_is_vmalloc_xfer(xfer) &&
803 IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
804 memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
805 txdesc = dmaengine_prep_slave_single(txchan,
806 as->dma_addr_tx_bbuf,
807 xfer->len, DMA_MEM_TO_DEV,
808 DMA_PREP_INTERRUPT |
809 DMA_CTRL_ACK);
810 } else {
811 txdesc = dmaengine_prep_slave_sg(txchan,
812 xfer->tx_sg.sgl,
813 xfer->tx_sg.nents,
814 DMA_MEM_TO_DEV,
815 DMA_PREP_INTERRUPT |
816 DMA_CTRL_ACK);
817 }
818 if (!txdesc)
819 goto err_dma;
820
821 dev_dbg(host->dev.parent,
822 " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
823 xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
824 xfer->rx_buf, (unsigned long long)xfer->rx_dma);
825
826 /* Enable relevant interrupts */
827 spi_writel(as, IER, SPI_BIT(OVRES));
828
829 /* Put the callback on the RX transfer only, that should finish last */
830 rxdesc->callback = dma_callback;
831 rxdesc->callback_param = host;
832
833 /* Submit and fire RX and TX with TX last so we're ready to read! */
834 cookie = rxdesc->tx_submit(rxdesc);
835 if (dma_submit_error(cookie))
836 goto err_dma;
837 cookie = txdesc->tx_submit(txdesc);
838 if (dma_submit_error(cookie))
839 goto err_dma;
840 rxchan->device->device_issue_pending(rxchan);
841 txchan->device->device_issue_pending(txchan);
842
843 return 0;
844
845 err_dma:
846 spi_writel(as, IDR, SPI_BIT(OVRES));
847 atmel_spi_stop_dma(host);
848 err_exit:
849 return -ENOMEM;
850 }
851
atmel_spi_next_xfer_data(struct spi_controller * host,struct spi_transfer * xfer,dma_addr_t * tx_dma,dma_addr_t * rx_dma,u32 * plen)852 static void atmel_spi_next_xfer_data(struct spi_controller *host,
853 struct spi_transfer *xfer,
854 dma_addr_t *tx_dma,
855 dma_addr_t *rx_dma,
856 u32 *plen)
857 {
858 *rx_dma = xfer->rx_dma + xfer->len - *plen;
859 *tx_dma = xfer->tx_dma + xfer->len - *plen;
860 if (*plen > host->max_dma_len)
861 *plen = host->max_dma_len;
862 }
863
atmel_spi_set_xfer_speed(struct atmel_spi * as,struct spi_device * spi,struct spi_transfer * xfer)864 static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
865 struct spi_device *spi,
866 struct spi_transfer *xfer)
867 {
868 u32 scbr, csr;
869 unsigned long bus_hz;
870 int chip_select;
871
872 if (spi_get_csgpiod(spi, 0))
873 chip_select = as->native_cs_for_gpio;
874 else
875 chip_select = spi_get_chipselect(spi, 0);
876
877 /* v1 chips start out at half the peripheral bus speed. */
878 bus_hz = as->spi_clk;
879 if (!atmel_spi_is_v2(as))
880 bus_hz /= 2;
881
882 /*
883 * Calculate the lowest divider that satisfies the
884 * constraint, assuming div32/fdiv/mbz == 0.
885 */
886 scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
887
888 /*
889 * If the resulting divider doesn't fit into the
890 * register bitfield, we can't satisfy the constraint.
891 */
892 if (scbr >= (1 << SPI_SCBR_SIZE)) {
893 dev_err(&spi->dev,
894 "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
895 xfer->speed_hz, scbr, bus_hz/255);
896 return -EINVAL;
897 }
898 if (scbr == 0) {
899 dev_err(&spi->dev,
900 "setup: %d Hz too high, scbr %u; max %ld Hz\n",
901 xfer->speed_hz, scbr, bus_hz);
902 return -EINVAL;
903 }
904 csr = spi_readl(as, CSR0 + 4 * chip_select);
905 csr = SPI_BFINS(SCBR, scbr, csr);
906 spi_writel(as, CSR0 + 4 * chip_select, csr);
907 xfer->effective_speed_hz = bus_hz / scbr;
908
909 return 0;
910 }
911
912 /*
913 * Submit next transfer for PDC.
914 * lock is held, spi irq is blocked
915 */
atmel_spi_pdc_next_xfer(struct spi_controller * host,struct spi_transfer * xfer)916 static void atmel_spi_pdc_next_xfer(struct spi_controller *host,
917 struct spi_transfer *xfer)
918 {
919 struct atmel_spi *as = spi_controller_get_devdata(host);
920 u32 len;
921 dma_addr_t tx_dma, rx_dma;
922
923 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
924
925 len = as->current_remaining_bytes;
926 atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
927 as->current_remaining_bytes -= len;
928
929 spi_writel(as, RPR, rx_dma);
930 spi_writel(as, TPR, tx_dma);
931
932 if (xfer->bits_per_word > 8)
933 len >>= 1;
934 spi_writel(as, RCR, len);
935 spi_writel(as, TCR, len);
936
937 dev_dbg(&host->dev,
938 " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
939 xfer, xfer->len, xfer->tx_buf,
940 (unsigned long long)xfer->tx_dma, xfer->rx_buf,
941 (unsigned long long)xfer->rx_dma);
942
943 if (as->current_remaining_bytes) {
944 len = as->current_remaining_bytes;
945 atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
946 as->current_remaining_bytes -= len;
947
948 spi_writel(as, RNPR, rx_dma);
949 spi_writel(as, TNPR, tx_dma);
950
951 if (xfer->bits_per_word > 8)
952 len >>= 1;
953 spi_writel(as, RNCR, len);
954 spi_writel(as, TNCR, len);
955
956 dev_dbg(&host->dev,
957 " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
958 xfer, xfer->len, xfer->tx_buf,
959 (unsigned long long)xfer->tx_dma, xfer->rx_buf,
960 (unsigned long long)xfer->rx_dma);
961 }
962
963 /* REVISIT: We're waiting for RXBUFF before we start the next
964 * transfer because we need to handle some difficult timing
965 * issues otherwise. If we wait for TXBUFE in one transfer and
966 * then starts waiting for RXBUFF in the next, it's difficult
967 * to tell the difference between the RXBUFF interrupt we're
968 * actually waiting for and the RXBUFF interrupt of the
969 * previous transfer.
970 *
971 * It should be doable, though. Just not now...
972 */
973 spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
974 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
975 }
976
977 /*
978 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
979 * - The buffer is either valid for CPU access, else NULL
980 * - If the buffer is valid, so is its DMA address
981 */
982 static int
atmel_spi_dma_map_xfer(struct atmel_spi * as,struct spi_transfer * xfer)983 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
984 {
985 struct device *dev = &as->pdev->dev;
986
987 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
988 if (xfer->tx_buf) {
989 /* tx_buf is a const void* where we need a void * for the dma
990 * mapping */
991 void *nonconst_tx = (void *)xfer->tx_buf;
992
993 xfer->tx_dma = dma_map_single(dev,
994 nonconst_tx, xfer->len,
995 DMA_TO_DEVICE);
996 if (dma_mapping_error(dev, xfer->tx_dma))
997 return -ENOMEM;
998 }
999 if (xfer->rx_buf) {
1000 xfer->rx_dma = dma_map_single(dev,
1001 xfer->rx_buf, xfer->len,
1002 DMA_FROM_DEVICE);
1003 if (dma_mapping_error(dev, xfer->rx_dma)) {
1004 if (xfer->tx_buf)
1005 dma_unmap_single(dev,
1006 xfer->tx_dma, xfer->len,
1007 DMA_TO_DEVICE);
1008 return -ENOMEM;
1009 }
1010 }
1011 return 0;
1012 }
1013
atmel_spi_dma_unmap_xfer(struct spi_controller * host,struct spi_transfer * xfer)1014 static void atmel_spi_dma_unmap_xfer(struct spi_controller *host,
1015 struct spi_transfer *xfer)
1016 {
1017 if (xfer->tx_dma != INVALID_DMA_ADDRESS)
1018 dma_unmap_single(host->dev.parent, xfer->tx_dma,
1019 xfer->len, DMA_TO_DEVICE);
1020 if (xfer->rx_dma != INVALID_DMA_ADDRESS)
1021 dma_unmap_single(host->dev.parent, xfer->rx_dma,
1022 xfer->len, DMA_FROM_DEVICE);
1023 }
1024
atmel_spi_disable_pdc_transfer(struct atmel_spi * as)1025 static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
1026 {
1027 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
1028 }
1029
1030 static void
atmel_spi_pump_single_data(struct atmel_spi * as,struct spi_transfer * xfer)1031 atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
1032 {
1033 u8 *rxp;
1034 u16 *rxp16;
1035 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
1036
1037 if (xfer->bits_per_word > 8) {
1038 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
1039 *rxp16 = spi_readl(as, RDR);
1040 } else {
1041 rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
1042 *rxp = spi_readl(as, RDR);
1043 }
1044 if (xfer->bits_per_word > 8) {
1045 if (as->current_remaining_bytes > 2)
1046 as->current_remaining_bytes -= 2;
1047 else
1048 as->current_remaining_bytes = 0;
1049 } else {
1050 as->current_remaining_bytes--;
1051 }
1052 }
1053
1054 static void
atmel_spi_pump_fifo_data(struct atmel_spi * as,struct spi_transfer * xfer)1055 atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
1056 {
1057 u32 fifolr = spi_readl(as, FLR);
1058 u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
1059 u32 offset = xfer->len - as->current_remaining_bytes;
1060 u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
1061 u8 *bytes = (u8 *)((u8 *)xfer->rx_buf + offset);
1062 u16 rd; /* RD field is the lowest 16 bits of RDR */
1063
1064 /* Update the number of remaining bytes to transfer */
1065 num_bytes = ((xfer->bits_per_word > 8) ?
1066 (num_data << 1) :
1067 num_data);
1068
1069 if (as->current_remaining_bytes > num_bytes)
1070 as->current_remaining_bytes -= num_bytes;
1071 else
1072 as->current_remaining_bytes = 0;
1073
1074 /* Handle odd number of bytes when data are more than 8bit width */
1075 if (xfer->bits_per_word > 8)
1076 as->current_remaining_bytes &= ~0x1;
1077
1078 /* Read data */
1079 while (num_data) {
1080 rd = spi_readl(as, RDR);
1081 if (xfer->bits_per_word > 8)
1082 *words++ = rd;
1083 else
1084 *bytes++ = rd;
1085 num_data--;
1086 }
1087 }
1088
1089 /* Called from IRQ
1090 *
1091 * Must update "current_remaining_bytes" to keep track of data
1092 * to transfer.
1093 */
1094 static void
atmel_spi_pump_pio_data(struct atmel_spi * as,struct spi_transfer * xfer)1095 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
1096 {
1097 if (as->fifo_size)
1098 atmel_spi_pump_fifo_data(as, xfer);
1099 else
1100 atmel_spi_pump_single_data(as, xfer);
1101 }
1102
1103 /* Interrupt
1104 *
1105 */
1106 static irqreturn_t
atmel_spi_pio_interrupt(int irq,void * dev_id)1107 atmel_spi_pio_interrupt(int irq, void *dev_id)
1108 {
1109 struct spi_controller *host = dev_id;
1110 struct atmel_spi *as = spi_controller_get_devdata(host);
1111 u32 status, pending, imr;
1112 struct spi_transfer *xfer;
1113 int ret = IRQ_NONE;
1114
1115 imr = spi_readl(as, IMR);
1116 status = spi_readl(as, SR);
1117 pending = status & imr;
1118
1119 if (pending & SPI_BIT(OVRES)) {
1120 ret = IRQ_HANDLED;
1121 spi_writel(as, IDR, SPI_BIT(OVRES));
1122 dev_warn(host->dev.parent, "overrun\n");
1123
1124 /*
1125 * When we get an overrun, we disregard the current
1126 * transfer. Data will not be copied back from any
1127 * bounce buffer and msg->actual_len will not be
1128 * updated with the last xfer.
1129 *
1130 * We will also not process any remaning transfers in
1131 * the message.
1132 */
1133 as->done_status = -EIO;
1134 smp_wmb();
1135
1136 /* Clear any overrun happening while cleaning up */
1137 spi_readl(as, SR);
1138
1139 complete(&as->xfer_completion);
1140
1141 } else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
1142 atmel_spi_lock(as);
1143
1144 if (as->current_remaining_bytes) {
1145 ret = IRQ_HANDLED;
1146 xfer = as->current_transfer;
1147 atmel_spi_pump_pio_data(as, xfer);
1148 if (!as->current_remaining_bytes)
1149 spi_writel(as, IDR, pending);
1150
1151 complete(&as->xfer_completion);
1152 }
1153
1154 atmel_spi_unlock(as);
1155 } else {
1156 WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
1157 ret = IRQ_HANDLED;
1158 spi_writel(as, IDR, pending);
1159 }
1160
1161 return ret;
1162 }
1163
1164 static irqreturn_t
atmel_spi_pdc_interrupt(int irq,void * dev_id)1165 atmel_spi_pdc_interrupt(int irq, void *dev_id)
1166 {
1167 struct spi_controller *host = dev_id;
1168 struct atmel_spi *as = spi_controller_get_devdata(host);
1169 u32 status, pending, imr;
1170 int ret = IRQ_NONE;
1171
1172 imr = spi_readl(as, IMR);
1173 status = spi_readl(as, SR);
1174 pending = status & imr;
1175
1176 if (pending & SPI_BIT(OVRES)) {
1177
1178 ret = IRQ_HANDLED;
1179
1180 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
1181 | SPI_BIT(OVRES)));
1182
1183 /* Clear any overrun happening while cleaning up */
1184 spi_readl(as, SR);
1185
1186 as->done_status = -EIO;
1187
1188 complete(&as->xfer_completion);
1189
1190 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
1191 ret = IRQ_HANDLED;
1192
1193 spi_writel(as, IDR, pending);
1194
1195 complete(&as->xfer_completion);
1196 }
1197
1198 return ret;
1199 }
1200
atmel_word_delay_csr(struct spi_device * spi,struct atmel_spi * as)1201 static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
1202 {
1203 struct spi_delay *delay = &spi->word_delay;
1204 u32 value = delay->value;
1205
1206 switch (delay->unit) {
1207 case SPI_DELAY_UNIT_NSECS:
1208 value /= 1000;
1209 break;
1210 case SPI_DELAY_UNIT_USECS:
1211 break;
1212 default:
1213 return -EINVAL;
1214 }
1215
1216 return (as->spi_clk / 1000000 * value) >> 5;
1217 }
1218
initialize_native_cs_for_gpio(struct atmel_spi * as)1219 static void initialize_native_cs_for_gpio(struct atmel_spi *as)
1220 {
1221 int i;
1222 struct spi_controller *host = platform_get_drvdata(as->pdev);
1223
1224 if (!as->native_cs_free)
1225 return; /* already initialized */
1226
1227 if (!host->cs_gpiods)
1228 return; /* No CS GPIO */
1229
1230 /*
1231 * On the first version of the controller (AT91RM9200), CS0
1232 * can't be used associated with GPIO
1233 */
1234 if (atmel_spi_is_v2(as))
1235 i = 0;
1236 else
1237 i = 1;
1238
1239 for (; i < 4; i++)
1240 if (host->cs_gpiods[i])
1241 as->native_cs_free |= BIT(i);
1242
1243 if (as->native_cs_free)
1244 as->native_cs_for_gpio = ffs(as->native_cs_free);
1245 }
1246
atmel_spi_setup(struct spi_device * spi)1247 static int atmel_spi_setup(struct spi_device *spi)
1248 {
1249 struct atmel_spi *as;
1250 struct atmel_spi_device *asd;
1251 u32 csr;
1252 unsigned int bits = spi->bits_per_word;
1253 int chip_select;
1254 int word_delay_csr;
1255
1256 as = spi_controller_get_devdata(spi->controller);
1257
1258 /* see notes above re chipselect */
1259 if (!spi_get_csgpiod(spi, 0) && (spi->mode & SPI_CS_HIGH)) {
1260 dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
1261 return -EINVAL;
1262 }
1263
1264 /* Setup() is called during spi_register_controller(aka
1265 * spi_register_master) but after all membmers of the cs_gpiod
1266 * array have been filled, so we can looked for which native
1267 * CS will be free for using with GPIO
1268 */
1269 initialize_native_cs_for_gpio(as);
1270
1271 if (spi_get_csgpiod(spi, 0) && as->native_cs_free) {
1272 dev_err(&spi->dev,
1273 "No native CS available to support this GPIO CS\n");
1274 return -EBUSY;
1275 }
1276
1277 if (spi_get_csgpiod(spi, 0))
1278 chip_select = as->native_cs_for_gpio;
1279 else
1280 chip_select = spi_get_chipselect(spi, 0);
1281
1282 csr = SPI_BF(BITS, bits - 8);
1283 if (spi->mode & SPI_CPOL)
1284 csr |= SPI_BIT(CPOL);
1285 if (!(spi->mode & SPI_CPHA))
1286 csr |= SPI_BIT(NCPHA);
1287
1288 if (!spi_get_csgpiod(spi, 0))
1289 csr |= SPI_BIT(CSAAT);
1290 csr |= SPI_BF(DLYBS, 0);
1291
1292 word_delay_csr = atmel_word_delay_csr(spi, as);
1293 if (word_delay_csr < 0)
1294 return word_delay_csr;
1295
1296 /* DLYBCT adds delays between words. This is useful for slow devices
1297 * that need a bit of time to setup the next transfer.
1298 */
1299 csr |= SPI_BF(DLYBCT, word_delay_csr);
1300
1301 asd = spi->controller_state;
1302 if (!asd) {
1303 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
1304 if (!asd)
1305 return -ENOMEM;
1306
1307 spi->controller_state = asd;
1308 }
1309
1310 asd->csr = csr;
1311
1312 dev_dbg(&spi->dev,
1313 "setup: bpw %u mode 0x%x -> csr%d %08x\n",
1314 bits, spi->mode, spi_get_chipselect(spi, 0), csr);
1315
1316 if (!atmel_spi_is_v2(as))
1317 spi_writel(as, CSR0 + 4 * chip_select, csr);
1318
1319 return 0;
1320 }
1321
atmel_spi_set_cs(struct spi_device * spi,bool enable)1322 static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
1323 {
1324 struct atmel_spi *as = spi_controller_get_devdata(spi->controller);
1325 /* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
1326 * since we already have routines for activate/deactivate translate
1327 * high/low to active/inactive
1328 */
1329 enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
1330
1331 if (enable) {
1332 cs_activate(as, spi);
1333 } else {
1334 cs_deactivate(as, spi);
1335 }
1336
1337 }
1338
atmel_spi_one_transfer(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)1339 static int atmel_spi_one_transfer(struct spi_controller *host,
1340 struct spi_device *spi,
1341 struct spi_transfer *xfer)
1342 {
1343 struct atmel_spi *as;
1344 u8 bits;
1345 u32 len;
1346 struct atmel_spi_device *asd;
1347 int timeout;
1348 int ret;
1349 unsigned int dma_timeout;
1350 long ret_timeout;
1351
1352 as = spi_controller_get_devdata(host);
1353
1354 asd = spi->controller_state;
1355 bits = (asd->csr >> 4) & 0xf;
1356 if (bits != xfer->bits_per_word - 8) {
1357 dev_dbg(&spi->dev,
1358 "you can't yet change bits_per_word in transfers\n");
1359 return -ENOPROTOOPT;
1360 }
1361
1362 /*
1363 * DMA map early, for performance (empties dcache ASAP) and
1364 * better fault reporting.
1365 */
1366 if (as->use_pdc) {
1367 if (atmel_spi_dma_map_xfer(as, xfer) < 0)
1368 return -ENOMEM;
1369 }
1370
1371 atmel_spi_set_xfer_speed(as, spi, xfer);
1372
1373 as->done_status = 0;
1374 as->current_transfer = xfer;
1375 as->current_remaining_bytes = xfer->len;
1376 while (as->current_remaining_bytes) {
1377 reinit_completion(&as->xfer_completion);
1378
1379 if (as->use_pdc) {
1380 atmel_spi_lock(as);
1381 atmel_spi_pdc_next_xfer(host, xfer);
1382 atmel_spi_unlock(as);
1383 } else if (atmel_spi_use_dma(as, xfer)) {
1384 len = as->current_remaining_bytes;
1385 ret = atmel_spi_next_xfer_dma_submit(host,
1386 xfer, &len);
1387 if (ret) {
1388 dev_err(&spi->dev,
1389 "unable to use DMA, fallback to PIO\n");
1390 as->done_status = ret;
1391 break;
1392 } else {
1393 as->current_remaining_bytes -= len;
1394 if (as->current_remaining_bytes < 0)
1395 as->current_remaining_bytes = 0;
1396 }
1397 } else {
1398 atmel_spi_lock(as);
1399 atmel_spi_next_xfer_pio(host, xfer);
1400 atmel_spi_unlock(as);
1401 }
1402
1403 dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
1404 ret_timeout = wait_for_completion_timeout(&as->xfer_completion, dma_timeout);
1405 if (!ret_timeout) {
1406 dev_err(&spi->dev, "spi transfer timeout\n");
1407 as->done_status = -EIO;
1408 }
1409
1410 if (as->done_status)
1411 break;
1412 }
1413
1414 if (as->done_status) {
1415 if (as->use_pdc) {
1416 dev_warn(host->dev.parent,
1417 "overrun (%u/%u remaining)\n",
1418 spi_readl(as, TCR), spi_readl(as, RCR));
1419
1420 /*
1421 * Clean up DMA registers and make sure the data
1422 * registers are empty.
1423 */
1424 spi_writel(as, RNCR, 0);
1425 spi_writel(as, TNCR, 0);
1426 spi_writel(as, RCR, 0);
1427 spi_writel(as, TCR, 0);
1428 for (timeout = 1000; timeout; timeout--)
1429 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
1430 break;
1431 if (!timeout)
1432 dev_warn(host->dev.parent,
1433 "timeout waiting for TXEMPTY");
1434 while (spi_readl(as, SR) & SPI_BIT(RDRF))
1435 spi_readl(as, RDR);
1436
1437 /* Clear any overrun happening while cleaning up */
1438 spi_readl(as, SR);
1439
1440 } else if (atmel_spi_use_dma(as, xfer)) {
1441 atmel_spi_stop_dma(host);
1442 }
1443 }
1444
1445 if (as->use_pdc)
1446 atmel_spi_dma_unmap_xfer(host, xfer);
1447
1448 if (as->use_pdc)
1449 atmel_spi_disable_pdc_transfer(as);
1450
1451 return as->done_status;
1452 }
1453
atmel_spi_cleanup(struct spi_device * spi)1454 static void atmel_spi_cleanup(struct spi_device *spi)
1455 {
1456 struct atmel_spi_device *asd = spi->controller_state;
1457
1458 if (!asd)
1459 return;
1460
1461 spi->controller_state = NULL;
1462 kfree(asd);
1463 }
1464
atmel_get_version(struct atmel_spi * as)1465 static inline unsigned int atmel_get_version(struct atmel_spi *as)
1466 {
1467 return spi_readl(as, VERSION) & 0x00000fff;
1468 }
1469
atmel_get_caps(struct atmel_spi * as)1470 static void atmel_get_caps(struct atmel_spi *as)
1471 {
1472 unsigned int version;
1473
1474 version = atmel_get_version(as);
1475
1476 as->caps.is_spi2 = version > 0x121;
1477 as->caps.has_wdrbt = version >= 0x210;
1478 as->caps.has_dma_support = version >= 0x212;
1479 as->caps.has_pdc_support = version < 0x212;
1480 }
1481
atmel_spi_init(struct atmel_spi * as)1482 static void atmel_spi_init(struct atmel_spi *as)
1483 {
1484 u32 mr = 0;
1485
1486 spi_writel(as, CR, SPI_BIT(SWRST));
1487 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1488
1489 /* It is recommended to enable FIFOs first thing after reset */
1490 if (as->fifo_size)
1491 spi_writel(as, CR, SPI_BIT(FIFOEN));
1492
1493 /*
1494 * If GCLK is selected as the source clock for the bit rate generation
1495 * Enable the BRSRCCLK/FDIV/DIV32 bit
1496 */
1497 if (as->gclk)
1498 mr |= SPI_BIT(FDIV);
1499
1500 if (as->caps.has_wdrbt)
1501 mr |= SPI_BIT(WDRBT);
1502
1503 spi_writel(as, MR, mr | SPI_BIT(MODFDIS) | SPI_BIT(MSTR));
1504
1505 if (as->use_pdc)
1506 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
1507 spi_writel(as, CR, SPI_BIT(SPIEN));
1508 }
1509
atmel_spi_probe(struct platform_device * pdev)1510 static int atmel_spi_probe(struct platform_device *pdev)
1511 {
1512 struct resource *regs;
1513 int irq;
1514 struct clk *clk;
1515 int ret;
1516 struct spi_controller *host;
1517 struct atmel_spi *as;
1518
1519 /* Select default pin state */
1520 pinctrl_pm_select_default_state(&pdev->dev);
1521
1522 irq = platform_get_irq(pdev, 0);
1523 if (irq < 0)
1524 return irq;
1525
1526 clk = devm_clk_get(&pdev->dev, "spi_clk");
1527 if (IS_ERR(clk))
1528 return PTR_ERR(clk);
1529
1530 /* setup spi core then atmel-specific driver state */
1531 host = spi_alloc_host(&pdev->dev, sizeof(*as));
1532 if (!host)
1533 return -ENOMEM;
1534
1535 /* the spi->mode bits understood by this driver: */
1536 host->use_gpio_descriptors = true;
1537 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1538 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
1539 host->dev.of_node = pdev->dev.of_node;
1540 host->bus_num = pdev->id;
1541 host->num_chipselect = 4;
1542 host->setup = atmel_spi_setup;
1543 host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX |
1544 SPI_CONTROLLER_GPIO_SS);
1545 host->transfer_one = atmel_spi_one_transfer;
1546 host->set_cs = atmel_spi_set_cs;
1547 host->cleanup = atmel_spi_cleanup;
1548 host->auto_runtime_pm = true;
1549 host->max_dma_len = SPI_MAX_DMA_XFER;
1550 host->can_dma = atmel_spi_can_dma;
1551 platform_set_drvdata(pdev, host);
1552
1553 as = spi_controller_get_devdata(host);
1554
1555 spin_lock_init(&as->lock);
1556
1557 as->pdev = pdev;
1558 as->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
1559 if (IS_ERR(as->regs)) {
1560 ret = PTR_ERR(as->regs);
1561 goto out_unmap_regs;
1562 }
1563 as->phybase = regs->start;
1564 as->irq = irq;
1565 as->clk = clk;
1566 as->gclk = devm_clk_get_optional(&pdev->dev, "spi_gclk");
1567 if (IS_ERR(as->gclk)) {
1568 ret = PTR_ERR(as->gclk);
1569 goto out_unmap_regs;
1570 }
1571
1572 init_completion(&as->xfer_completion);
1573
1574 atmel_get_caps(as);
1575
1576 as->use_dma = false;
1577 as->use_pdc = false;
1578 if (as->caps.has_dma_support) {
1579 ret = atmel_spi_configure_dma(host, as);
1580 if (ret == 0) {
1581 as->use_dma = true;
1582 } else if (ret == -EPROBE_DEFER) {
1583 goto out_unmap_regs;
1584 }
1585 } else if (as->caps.has_pdc_support) {
1586 as->use_pdc = true;
1587 }
1588
1589 if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1590 as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
1591 SPI_MAX_DMA_XFER,
1592 &as->dma_addr_rx_bbuf,
1593 GFP_KERNEL | GFP_DMA);
1594 if (!as->addr_rx_bbuf) {
1595 as->use_dma = false;
1596 } else {
1597 as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
1598 SPI_MAX_DMA_XFER,
1599 &as->dma_addr_tx_bbuf,
1600 GFP_KERNEL | GFP_DMA);
1601 if (!as->addr_tx_bbuf) {
1602 as->use_dma = false;
1603 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1604 as->addr_rx_bbuf,
1605 as->dma_addr_rx_bbuf);
1606 }
1607 }
1608 if (!as->use_dma)
1609 dev_info(host->dev.parent,
1610 " can not allocate dma coherent memory\n");
1611 }
1612
1613 if (as->caps.has_dma_support && !as->use_dma)
1614 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
1615
1616 if (as->use_pdc) {
1617 ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
1618 0, dev_name(&pdev->dev), host);
1619 } else {
1620 ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
1621 0, dev_name(&pdev->dev), host);
1622 }
1623 if (ret)
1624 goto out_unmap_regs;
1625
1626 /* Initialize the hardware */
1627 ret = clk_prepare_enable(clk);
1628 if (ret)
1629 goto out_free_irq;
1630
1631 /*
1632 * In cases where the peripheral clock is higher,the FLEX_SPI_CSRx.SCBR
1633 * exceeds the threshold (SCBR ≤ 255), the GCLK is used as the source clock
1634 * for the SPCK (SPI Serial Clock) bit rate generation
1635 */
1636 if (as->gclk) {
1637 ret = clk_prepare_enable(as->gclk);
1638 if (ret)
1639 goto out_disable_clk;
1640 as->spi_clk = clk_get_rate(as->gclk);
1641 } else {
1642 as->spi_clk = clk_get_rate(clk);
1643 }
1644
1645 as->fifo_size = 0;
1646 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
1647 &as->fifo_size)) {
1648 dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
1649 }
1650
1651 atmel_spi_init(as);
1652
1653 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1654 pm_runtime_use_autosuspend(&pdev->dev);
1655 pm_runtime_set_active(&pdev->dev);
1656 pm_runtime_enable(&pdev->dev);
1657
1658 ret = devm_spi_register_controller(&pdev->dev, host);
1659 if (ret)
1660 goto out_free_dma;
1661
1662 /* go! */
1663 dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
1664 atmel_get_version(as), (unsigned long)regs->start,
1665 irq);
1666
1667 return 0;
1668
1669 out_free_dma:
1670 pm_runtime_disable(&pdev->dev);
1671 pm_runtime_set_suspended(&pdev->dev);
1672
1673 if (as->use_dma)
1674 atmel_spi_release_dma(host);
1675
1676 spi_writel(as, CR, SPI_BIT(SWRST));
1677 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1678 clk_disable_unprepare(as->gclk);
1679 out_disable_clk:
1680 clk_disable_unprepare(clk);
1681 out_free_irq:
1682 out_unmap_regs:
1683 spi_controller_put(host);
1684 return ret;
1685 }
1686
atmel_spi_remove(struct platform_device * pdev)1687 static void atmel_spi_remove(struct platform_device *pdev)
1688 {
1689 struct spi_controller *host = platform_get_drvdata(pdev);
1690 struct atmel_spi *as = spi_controller_get_devdata(host);
1691
1692 pm_runtime_get_sync(&pdev->dev);
1693
1694 /* reset the hardware and block queue progress */
1695 if (as->use_dma) {
1696 atmel_spi_stop_dma(host);
1697 atmel_spi_release_dma(host);
1698 if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
1699 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1700 as->addr_tx_bbuf,
1701 as->dma_addr_tx_bbuf);
1702 dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
1703 as->addr_rx_bbuf,
1704 as->dma_addr_rx_bbuf);
1705 }
1706 }
1707
1708 spin_lock_irq(&as->lock);
1709 spi_writel(as, CR, SPI_BIT(SWRST));
1710 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1711 spi_readl(as, SR);
1712 spin_unlock_irq(&as->lock);
1713
1714 clk_disable_unprepare(as->clk);
1715 if (as->gclk)
1716 clk_disable_unprepare(as->gclk);
1717
1718 pm_runtime_put_noidle(&pdev->dev);
1719 pm_runtime_disable(&pdev->dev);
1720 }
1721
atmel_spi_runtime_suspend(struct device * dev)1722 static int atmel_spi_runtime_suspend(struct device *dev)
1723 {
1724 struct spi_controller *host = dev_get_drvdata(dev);
1725 struct atmel_spi *as = spi_controller_get_devdata(host);
1726
1727 clk_disable_unprepare(as->clk);
1728 if (as->gclk)
1729 clk_disable_unprepare(as->gclk);
1730 pinctrl_pm_select_sleep_state(dev);
1731
1732 return 0;
1733 }
1734
atmel_spi_runtime_resume(struct device * dev)1735 static int atmel_spi_runtime_resume(struct device *dev)
1736 {
1737 struct spi_controller *host = dev_get_drvdata(dev);
1738 struct atmel_spi *as = spi_controller_get_devdata(host);
1739 int ret;
1740
1741 pinctrl_pm_select_default_state(dev);
1742
1743 ret = clk_prepare_enable(as->clk);
1744 if (ret)
1745 return ret;
1746 if (as->gclk) {
1747 ret = clk_prepare_enable(as->gclk);
1748 if (ret)
1749 return ret;
1750 }
1751
1752 return 0;
1753 }
1754
atmel_spi_suspend(struct device * dev)1755 static int atmel_spi_suspend(struct device *dev)
1756 {
1757 struct spi_controller *host = dev_get_drvdata(dev);
1758 int ret;
1759
1760 /* Stop the queue running */
1761 ret = spi_controller_suspend(host);
1762 if (ret)
1763 return ret;
1764
1765 if (!pm_runtime_suspended(dev))
1766 atmel_spi_runtime_suspend(dev);
1767
1768 return 0;
1769 }
1770
atmel_spi_resume(struct device * dev)1771 static int atmel_spi_resume(struct device *dev)
1772 {
1773 struct spi_controller *host = dev_get_drvdata(dev);
1774 struct atmel_spi *as = spi_controller_get_devdata(host);
1775 int ret;
1776
1777 ret = clk_prepare_enable(as->clk);
1778 if (ret)
1779 return ret;
1780 if (as->gclk) {
1781 ret = clk_prepare_enable(as->gclk);
1782 if (ret)
1783 return ret;
1784 }
1785
1786 atmel_spi_init(as);
1787
1788 clk_disable_unprepare(as->clk);
1789 if (as->gclk)
1790 clk_disable_unprepare(as->gclk);
1791
1792 if (!pm_runtime_suspended(dev)) {
1793 ret = atmel_spi_runtime_resume(dev);
1794 if (ret)
1795 return ret;
1796 }
1797
1798 /* Start the queue running */
1799 return spi_controller_resume(host);
1800 }
1801
1802 static const struct dev_pm_ops atmel_spi_pm_ops = {
1803 SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
1804 RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
1805 atmel_spi_runtime_resume, NULL)
1806 };
1807
1808 static const struct of_device_id atmel_spi_dt_ids[] = {
1809 { .compatible = "atmel,at91rm9200-spi" },
1810 { /* sentinel */ }
1811 };
1812
1813 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
1814
1815 static struct platform_driver atmel_spi_driver = {
1816 .driver = {
1817 .name = "atmel_spi",
1818 .pm = pm_ptr(&atmel_spi_pm_ops),
1819 .of_match_table = atmel_spi_dt_ids,
1820 },
1821 .probe = atmel_spi_probe,
1822 .remove = atmel_spi_remove,
1823 };
1824 module_platform_driver(atmel_spi_driver);
1825
1826 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
1827 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1828 MODULE_LICENSE("GPL");
1829 MODULE_ALIAS("platform:atmel_spi");
1830