1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * ASPEED FMC/SPI Memory Controller Driver
4 *
5 * Copyright (c) 2015-2022, IBM Corporation.
6 * Copyright (c) 2020, ASPEED Corporation.
7 */
8
9 #include <linux/clk.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/spi/spi.h>
15 #include <linux/spi/spi-mem.h>
16
17 #define DEVICE_NAME "spi-aspeed-smc"
18
19 /* Type setting Register */
20 #define CONFIG_REG 0x0
21 #define CONFIG_TYPE_SPI 0x2
22
23 /* CE Control Register */
24 #define CE_CTRL_REG 0x4
25
26 /* CEx Control Register */
27 #define CE0_CTRL_REG 0x10
28 #define CTRL_IO_MODE_MASK GENMASK(30, 28)
29 #define CTRL_IO_SINGLE_DATA 0x0
30 #define CTRL_IO_DUAL_DATA BIT(29)
31 #define CTRL_IO_QUAD_DATA BIT(30)
32 #define CTRL_COMMAND_SHIFT 16
33 #define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */
34 #define CTRL_IO_DUMMY_SET(dummy) \
35 (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
36 #define CTRL_FREQ_SEL_SHIFT 8
37 #define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT)
38 #define CTRL_CE_STOP_ACTIVE BIT(2)
39 #define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0)
40 #define CTRL_IO_MODE_NORMAL 0x0
41 #define CTRL_IO_MODE_READ 0x1
42 #define CTRL_IO_MODE_WRITE 0x2
43 #define CTRL_IO_MODE_USER 0x3
44
45 #define CTRL_IO_CMD_MASK 0xf0ff40c3
46
47 /* CEx Address Decoding Range Register */
48 #define CE0_SEGMENT_ADDR_REG 0x30
49
50 /* CEx Read timing compensation register */
51 #define CE0_TIMING_COMPENSATION_REG 0x94
52
53 enum aspeed_spi_ctl_reg_value {
54 ASPEED_SPI_BASE,
55 ASPEED_SPI_READ,
56 ASPEED_SPI_WRITE,
57 ASPEED_SPI_MAX,
58 };
59
60 struct aspeed_spi;
61
62 struct aspeed_spi_chip {
63 struct aspeed_spi *aspi;
64 u32 cs;
65 void __iomem *ctl;
66 void __iomem *ahb_base;
67 u32 ahb_window_size;
68 u32 ctl_val[ASPEED_SPI_MAX];
69 u32 clk_freq;
70 };
71
72 struct aspeed_spi_data {
73 u32 ctl0;
74 u32 max_cs;
75 bool hastype;
76 u32 mode_bits;
77 u32 we0;
78 u32 timing;
79 u32 hclk_mask;
80 u32 hdiv_max;
81
82 u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
83 u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
84 u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
85 int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
86 const u8 *golden_buf, u8 *test_buf);
87 };
88
89 #define ASPEED_SPI_MAX_NUM_CS 5
90
91 struct aspeed_spi {
92 const struct aspeed_spi_data *data;
93
94 void __iomem *regs;
95 void __iomem *ahb_base;
96 u32 ahb_base_phy;
97 u32 ahb_window_size;
98 struct device *dev;
99
100 struct clk *clk;
101 u32 clk_freq;
102
103 struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
104 };
105
aspeed_spi_get_io_mode(const struct spi_mem_op * op)106 static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
107 {
108 switch (op->data.buswidth) {
109 case 1:
110 return CTRL_IO_SINGLE_DATA;
111 case 2:
112 return CTRL_IO_DUAL_DATA;
113 case 4:
114 return CTRL_IO_QUAD_DATA;
115 default:
116 return CTRL_IO_SINGLE_DATA;
117 }
118 }
119
aspeed_spi_set_io_mode(struct aspeed_spi_chip * chip,u32 io_mode)120 static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
121 {
122 u32 ctl;
123
124 if (io_mode > 0) {
125 ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
126 ctl |= io_mode;
127 writel(ctl, chip->ctl);
128 }
129 }
130
aspeed_spi_start_user(struct aspeed_spi_chip * chip)131 static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
132 {
133 u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
134
135 ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
136 writel(ctl, chip->ctl);
137
138 ctl &= ~CTRL_CE_STOP_ACTIVE;
139 writel(ctl, chip->ctl);
140 }
141
aspeed_spi_stop_user(struct aspeed_spi_chip * chip)142 static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
143 {
144 u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
145 CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
146
147 writel(ctl, chip->ctl);
148
149 /* Restore defaults */
150 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
151 }
152
aspeed_spi_read_from_ahb(void * buf,void __iomem * src,size_t len)153 static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
154 {
155 size_t offset = 0;
156
157 if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
158 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
159 ioread32_rep(src, buf, len >> 2);
160 offset = len & ~0x3;
161 len -= offset;
162 }
163 ioread8_rep(src, (u8 *)buf + offset, len);
164 return 0;
165 }
166
aspeed_spi_write_to_ahb(void __iomem * dst,const void * buf,size_t len)167 static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
168 {
169 size_t offset = 0;
170
171 if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
172 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
173 iowrite32_rep(dst, buf, len >> 2);
174 offset = len & ~0x3;
175 len -= offset;
176 }
177 iowrite8_rep(dst, (const u8 *)buf + offset, len);
178 return 0;
179 }
180
aspeed_spi_send_cmd_addr(struct aspeed_spi_chip * chip,u8 addr_nbytes,u64 offset,u32 opcode)181 static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
182 u64 offset, u32 opcode)
183 {
184 __be32 temp;
185 u32 cmdaddr;
186
187 switch (addr_nbytes) {
188 case 3:
189 cmdaddr = offset & 0xFFFFFF;
190 cmdaddr |= opcode << 24;
191
192 temp = cpu_to_be32(cmdaddr);
193 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
194 break;
195 case 4:
196 temp = cpu_to_be32(offset);
197 aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
198 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
199 break;
200 default:
201 WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
202 return -EOPNOTSUPP;
203 }
204 return 0;
205 }
206
aspeed_spi_read_reg(struct aspeed_spi_chip * chip,const struct spi_mem_op * op)207 static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
208 const struct spi_mem_op *op)
209 {
210 aspeed_spi_start_user(chip);
211 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
212 aspeed_spi_read_from_ahb(op->data.buf.in,
213 chip->ahb_base, op->data.nbytes);
214 aspeed_spi_stop_user(chip);
215 return 0;
216 }
217
aspeed_spi_write_reg(struct aspeed_spi_chip * chip,const struct spi_mem_op * op)218 static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
219 const struct spi_mem_op *op)
220 {
221 aspeed_spi_start_user(chip);
222 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
223 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
224 op->data.nbytes);
225 aspeed_spi_stop_user(chip);
226 return 0;
227 }
228
aspeed_spi_read_user(struct aspeed_spi_chip * chip,const struct spi_mem_op * op,u64 offset,size_t len,void * buf)229 static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
230 const struct spi_mem_op *op,
231 u64 offset, size_t len, void *buf)
232 {
233 int io_mode = aspeed_spi_get_io_mode(op);
234 u8 dummy = 0xFF;
235 int i;
236 int ret;
237
238 aspeed_spi_start_user(chip);
239
240 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
241 if (ret < 0)
242 goto stop_user;
243
244 if (op->dummy.buswidth && op->dummy.nbytes) {
245 for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
246 aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
247 }
248
249 aspeed_spi_set_io_mode(chip, io_mode);
250
251 aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
252 stop_user:
253 aspeed_spi_stop_user(chip);
254 return ret;
255 }
256
aspeed_spi_write_user(struct aspeed_spi_chip * chip,const struct spi_mem_op * op)257 static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
258 const struct spi_mem_op *op)
259 {
260 int ret;
261
262 aspeed_spi_start_user(chip);
263 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
264 if (ret < 0)
265 goto stop_user;
266 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
267 stop_user:
268 aspeed_spi_stop_user(chip);
269 return ret;
270 }
271
272 /* support for 1-1-1, 1-1-2 or 1-1-4 */
aspeed_spi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)273 static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
274 {
275 if (op->cmd.buswidth > 1)
276 return false;
277
278 if (op->addr.nbytes != 0) {
279 if (op->addr.buswidth > 1)
280 return false;
281 if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
282 return false;
283 }
284
285 if (op->dummy.nbytes != 0) {
286 if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
287 return false;
288 }
289
290 if (op->data.nbytes != 0 && op->data.buswidth > 4)
291 return false;
292
293 return spi_mem_default_supports_op(mem, op);
294 }
295
296 static const struct aspeed_spi_data ast2400_spi_data;
297
do_aspeed_spi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)298 static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
299 {
300 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
301 struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
302 u32 addr_mode, addr_mode_backup;
303 u32 ctl_val;
304 int ret = 0;
305
306 addr_mode = readl(aspi->regs + CE_CTRL_REG);
307 addr_mode_backup = addr_mode;
308
309 ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
310 ctl_val &= ~CTRL_IO_CMD_MASK;
311
312 ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
313
314 /* 4BYTE address mode */
315 if (op->addr.nbytes) {
316 if (op->addr.nbytes == 4)
317 addr_mode |= (0x11 << chip->cs);
318 else
319 addr_mode &= ~(0x11 << chip->cs);
320
321 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
322 ctl_val |= CTRL_IO_ADDRESS_4B;
323 }
324
325 if (op->dummy.nbytes)
326 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
327
328 if (op->data.nbytes)
329 ctl_val |= aspeed_spi_get_io_mode(op);
330
331 if (op->data.dir == SPI_MEM_DATA_OUT)
332 ctl_val |= CTRL_IO_MODE_WRITE;
333 else
334 ctl_val |= CTRL_IO_MODE_READ;
335
336 if (addr_mode != addr_mode_backup)
337 writel(addr_mode, aspi->regs + CE_CTRL_REG);
338 writel(ctl_val, chip->ctl);
339
340 if (op->data.dir == SPI_MEM_DATA_IN) {
341 if (!op->addr.nbytes)
342 ret = aspeed_spi_read_reg(chip, op);
343 else
344 ret = aspeed_spi_read_user(chip, op, op->addr.val,
345 op->data.nbytes, op->data.buf.in);
346 } else {
347 if (!op->addr.nbytes)
348 ret = aspeed_spi_write_reg(chip, op);
349 else
350 ret = aspeed_spi_write_user(chip, op);
351 }
352
353 /* Restore defaults */
354 if (addr_mode != addr_mode_backup)
355 writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
356 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
357 return ret;
358 }
359
aspeed_spi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)360 static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
361 {
362 int ret;
363
364 ret = do_aspeed_spi_exec_op(mem, op);
365 if (ret)
366 dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
367 return ret;
368 }
369
aspeed_spi_get_name(struct spi_mem * mem)370 static const char *aspeed_spi_get_name(struct spi_mem *mem)
371 {
372 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
373 struct device *dev = aspi->dev;
374
375 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
376 spi_get_chipselect(mem->spi, 0));
377 }
378
379 struct aspeed_spi_window {
380 u32 cs;
381 u32 offset;
382 u32 size;
383 };
384
aspeed_spi_get_windows(struct aspeed_spi * aspi,struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])385 static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
386 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
387 {
388 const struct aspeed_spi_data *data = aspi->data;
389 u32 reg_val;
390 u32 cs;
391
392 for (cs = 0; cs < aspi->data->max_cs; cs++) {
393 reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
394 windows[cs].cs = cs;
395 windows[cs].size = data->segment_end(aspi, reg_val) -
396 data->segment_start(aspi, reg_val);
397 windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
398 dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
399 windows[cs].offset, windows[cs].size);
400 }
401 }
402
403 /*
404 * On the AST2600, some CE windows are closed by default at reset but
405 * U-Boot should open all.
406 */
aspeed_spi_chip_set_default_window(struct aspeed_spi_chip * chip)407 static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
408 {
409 struct aspeed_spi *aspi = chip->aspi;
410 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
411 struct aspeed_spi_window *win = &windows[chip->cs];
412
413 /* No segment registers for the AST2400 SPI controller */
414 if (aspi->data == &ast2400_spi_data) {
415 win->offset = 0;
416 win->size = aspi->ahb_window_size;
417 } else {
418 aspeed_spi_get_windows(aspi, windows);
419 }
420
421 chip->ahb_base = aspi->ahb_base + win->offset;
422 chip->ahb_window_size = win->size;
423
424 dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
425 chip->cs, aspi->ahb_base_phy + win->offset,
426 aspi->ahb_base_phy + win->offset + win->size - 1,
427 win->size >> 20);
428
429 return chip->ahb_window_size ? 0 : -1;
430 }
431
aspeed_spi_set_window(struct aspeed_spi * aspi,const struct aspeed_spi_window * win)432 static int aspeed_spi_set_window(struct aspeed_spi *aspi,
433 const struct aspeed_spi_window *win)
434 {
435 u32 start = aspi->ahb_base_phy + win->offset;
436 u32 end = start + win->size;
437 void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
438 u32 seg_val_backup = readl(seg_reg);
439 u32 seg_val = aspi->data->segment_reg(aspi, start, end);
440
441 if (seg_val == seg_val_backup)
442 return 0;
443
444 writel(seg_val, seg_reg);
445
446 /*
447 * Restore initial value if something goes wrong else we could
448 * loose access to the chip.
449 */
450 if (seg_val != readl(seg_reg)) {
451 dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
452 win->cs, start, end - 1, win->size >> 20);
453 writel(seg_val_backup, seg_reg);
454 return -EIO;
455 }
456
457 if (win->size)
458 dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
459 win->cs, start, end - 1, win->size >> 20);
460 else
461 dev_dbg(aspi->dev, "CE%d window closed", win->cs);
462
463 return 0;
464 }
465
466 /*
467 * Yet to be done when possible :
468 * - Align mappings on flash size (we don't have the info)
469 * - ioremap each window, not strictly necessary since the overall window
470 * is correct.
471 */
472 static const struct aspeed_spi_data ast2500_spi_data;
473 static const struct aspeed_spi_data ast2600_spi_data;
474 static const struct aspeed_spi_data ast2600_fmc_data;
475
aspeed_spi_chip_adjust_window(struct aspeed_spi_chip * chip,u32 local_offset,u32 size)476 static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
477 u32 local_offset, u32 size)
478 {
479 struct aspeed_spi *aspi = chip->aspi;
480 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
481 struct aspeed_spi_window *win = &windows[chip->cs];
482 int ret;
483
484 /* No segment registers for the AST2400 SPI controller */
485 if (aspi->data == &ast2400_spi_data)
486 return 0;
487
488 /*
489 * Due to an HW issue on the AST2500 SPI controller, the CE0
490 * window size should be smaller than the maximum 128MB.
491 */
492 if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
493 size = 120 << 20;
494 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
495 chip->cs, size >> 20);
496 }
497
498 /*
499 * The decoding size of AST2600 SPI controller should set at
500 * least 2MB.
501 */
502 if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
503 size < SZ_2M) {
504 size = SZ_2M;
505 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
506 chip->cs, size >> 20);
507 }
508
509 aspeed_spi_get_windows(aspi, windows);
510
511 /* Adjust this chip window */
512 win->offset += local_offset;
513 win->size = size;
514
515 if (win->offset + win->size > aspi->ahb_window_size) {
516 win->size = aspi->ahb_window_size - win->offset;
517 dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
518 }
519
520 ret = aspeed_spi_set_window(aspi, win);
521 if (ret)
522 return ret;
523
524 /* Update chip mapping info */
525 chip->ahb_base = aspi->ahb_base + win->offset;
526 chip->ahb_window_size = win->size;
527
528 /*
529 * Also adjust next chip window to make sure that it does not
530 * overlap with the current window.
531 */
532 if (chip->cs < aspi->data->max_cs - 1) {
533 struct aspeed_spi_window *next = &windows[chip->cs + 1];
534
535 /* Change offset and size to keep the same end address */
536 if ((next->offset + next->size) > (win->offset + win->size))
537 next->size = (next->offset + next->size) - (win->offset + win->size);
538 else
539 next->size = 0;
540 next->offset = win->offset + win->size;
541
542 aspeed_spi_set_window(aspi, next);
543 }
544 return 0;
545 }
546
547 static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
548
aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc * desc)549 static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
550 {
551 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
552 struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
553 struct spi_mem_op *op = &desc->info.op_tmpl;
554 u32 ctl_val;
555 int ret = 0;
556
557 dev_dbg(aspi->dev,
558 "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
559 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
560 desc->info.offset, desc->info.offset + desc->info.length,
561 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
562 op->dummy.buswidth, op->data.buswidth,
563 op->addr.nbytes, op->dummy.nbytes);
564
565 chip->clk_freq = desc->mem->spi->max_speed_hz;
566
567 /* Only for reads */
568 if (op->data.dir != SPI_MEM_DATA_IN)
569 return -EOPNOTSUPP;
570
571 aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
572
573 if (desc->info.length > chip->ahb_window_size)
574 dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
575 chip->cs, chip->ahb_window_size >> 20);
576
577 /* Define the default IO read settings */
578 ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
579 ctl_val |= aspeed_spi_get_io_mode(op) |
580 op->cmd.opcode << CTRL_COMMAND_SHIFT |
581 CTRL_IO_MODE_READ;
582
583 if (op->dummy.nbytes)
584 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
585
586 /* Tune 4BYTE address mode */
587 if (op->addr.nbytes) {
588 u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
589
590 if (op->addr.nbytes == 4)
591 addr_mode |= (0x11 << chip->cs);
592 else
593 addr_mode &= ~(0x11 << chip->cs);
594 writel(addr_mode, aspi->regs + CE_CTRL_REG);
595
596 /* AST2400 SPI controller sets 4BYTE address mode in
597 * CE0 Control Register
598 */
599 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
600 ctl_val |= CTRL_IO_ADDRESS_4B;
601 }
602
603 /* READ mode is the controller default setting */
604 chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
605 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
606
607 ret = aspeed_spi_do_calibration(chip);
608
609 dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
610 chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
611
612 return ret;
613 }
614
aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offset,size_t len,void * buf)615 static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
616 u64 offset, size_t len, void *buf)
617 {
618 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
619 struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
620
621 /* Switch to USER command mode if mapping window is too small */
622 if (chip->ahb_window_size < offset + len) {
623 int ret;
624
625 ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
626 if (ret < 0)
627 return ret;
628 } else {
629 memcpy_fromio(buf, chip->ahb_base + offset, len);
630 }
631
632 return len;
633 }
634
635 static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
636 .supports_op = aspeed_spi_supports_op,
637 .exec_op = aspeed_spi_exec_op,
638 .get_name = aspeed_spi_get_name,
639 .dirmap_create = aspeed_spi_dirmap_create,
640 .dirmap_read = aspeed_spi_dirmap_read,
641 };
642
aspeed_spi_chip_set_type(struct aspeed_spi * aspi,unsigned int cs,int type)643 static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
644 {
645 u32 reg;
646
647 reg = readl(aspi->regs + CONFIG_REG);
648 reg &= ~(0x3 << (cs * 2));
649 reg |= type << (cs * 2);
650 writel(reg, aspi->regs + CONFIG_REG);
651 }
652
aspeed_spi_chip_enable(struct aspeed_spi * aspi,unsigned int cs,bool enable)653 static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
654 {
655 u32 we_bit = BIT(aspi->data->we0 + cs);
656 u32 reg = readl(aspi->regs + CONFIG_REG);
657
658 if (enable)
659 reg |= we_bit;
660 else
661 reg &= ~we_bit;
662 writel(reg, aspi->regs + CONFIG_REG);
663 }
664
aspeed_spi_setup(struct spi_device * spi)665 static int aspeed_spi_setup(struct spi_device *spi)
666 {
667 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
668 const struct aspeed_spi_data *data = aspi->data;
669 unsigned int cs = spi_get_chipselect(spi, 0);
670 struct aspeed_spi_chip *chip = &aspi->chips[cs];
671
672 chip->aspi = aspi;
673 chip->cs = cs;
674 chip->ctl = aspi->regs + data->ctl0 + cs * 4;
675
676 /* The driver only supports SPI type flash */
677 if (data->hastype)
678 aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
679
680 if (aspeed_spi_chip_set_default_window(chip) < 0) {
681 dev_warn(aspi->dev, "CE%d window invalid", cs);
682 return -EINVAL;
683 }
684
685 aspeed_spi_chip_enable(aspi, cs, true);
686
687 chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
688
689 dev_dbg(aspi->dev, "CE%d setup done\n", cs);
690 return 0;
691 }
692
aspeed_spi_cleanup(struct spi_device * spi)693 static void aspeed_spi_cleanup(struct spi_device *spi)
694 {
695 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
696 unsigned int cs = spi_get_chipselect(spi, 0);
697
698 aspeed_spi_chip_enable(aspi, cs, false);
699
700 dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
701 }
702
aspeed_spi_enable(struct aspeed_spi * aspi,bool enable)703 static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
704 {
705 int cs;
706
707 for (cs = 0; cs < aspi->data->max_cs; cs++)
708 aspeed_spi_chip_enable(aspi, cs, enable);
709 }
710
aspeed_spi_probe(struct platform_device * pdev)711 static int aspeed_spi_probe(struct platform_device *pdev)
712 {
713 struct device *dev = &pdev->dev;
714 const struct aspeed_spi_data *data;
715 struct spi_controller *ctlr;
716 struct aspeed_spi *aspi;
717 struct resource *res;
718 int ret;
719
720 data = of_device_get_match_data(&pdev->dev);
721 if (!data)
722 return -ENODEV;
723
724 ctlr = devm_spi_alloc_host(dev, sizeof(*aspi));
725 if (!ctlr)
726 return -ENOMEM;
727
728 aspi = spi_controller_get_devdata(ctlr);
729 platform_set_drvdata(pdev, aspi);
730 aspi->data = data;
731 aspi->dev = dev;
732
733 aspi->regs = devm_platform_ioremap_resource(pdev, 0);
734 if (IS_ERR(aspi->regs))
735 return PTR_ERR(aspi->regs);
736
737 aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
738 if (IS_ERR(aspi->ahb_base)) {
739 dev_err(dev, "missing AHB mapping window\n");
740 return PTR_ERR(aspi->ahb_base);
741 }
742
743 aspi->ahb_window_size = resource_size(res);
744 aspi->ahb_base_phy = res->start;
745
746 aspi->clk = devm_clk_get_enabled(&pdev->dev, NULL);
747 if (IS_ERR(aspi->clk)) {
748 dev_err(dev, "missing clock\n");
749 return PTR_ERR(aspi->clk);
750 }
751
752 aspi->clk_freq = clk_get_rate(aspi->clk);
753 if (!aspi->clk_freq) {
754 dev_err(dev, "invalid clock\n");
755 return -EINVAL;
756 }
757
758 /* IRQ is for DMA, which the driver doesn't support yet */
759
760 ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
761 ctlr->bus_num = pdev->id;
762 ctlr->mem_ops = &aspeed_spi_mem_ops;
763 ctlr->setup = aspeed_spi_setup;
764 ctlr->cleanup = aspeed_spi_cleanup;
765 ctlr->num_chipselect = data->max_cs;
766 ctlr->dev.of_node = dev->of_node;
767
768 ret = devm_spi_register_controller(dev, ctlr);
769 if (ret)
770 dev_err(&pdev->dev, "spi_register_controller failed\n");
771
772 return ret;
773 }
774
aspeed_spi_remove(struct platform_device * pdev)775 static void aspeed_spi_remove(struct platform_device *pdev)
776 {
777 struct aspeed_spi *aspi = platform_get_drvdata(pdev);
778
779 aspeed_spi_enable(aspi, false);
780 }
781
782 /*
783 * AHB mappings
784 */
785
786 /*
787 * The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
788 * The address range is encoded with absolute addresses in the overall
789 * mapping window.
790 */
aspeed_spi_segment_start(struct aspeed_spi * aspi,u32 reg)791 static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
792 {
793 return ((reg >> 16) & 0xFF) << 23;
794 }
795
aspeed_spi_segment_end(struct aspeed_spi * aspi,u32 reg)796 static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
797 {
798 return ((reg >> 24) & 0xFF) << 23;
799 }
800
aspeed_spi_segment_reg(struct aspeed_spi * aspi,u32 start,u32 end)801 static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
802 {
803 return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
804 }
805
806 /*
807 * The Segment Registers of the AST2600 use a 1MB unit. The address
808 * range is encoded with offsets in the overall mapping window.
809 */
810
811 #define AST2600_SEG_ADDR_MASK 0x0ff00000
812
aspeed_spi_segment_ast2600_start(struct aspeed_spi * aspi,u32 reg)813 static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
814 u32 reg)
815 {
816 u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
817
818 return aspi->ahb_base_phy + start_offset;
819 }
820
aspeed_spi_segment_ast2600_end(struct aspeed_spi * aspi,u32 reg)821 static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
822 u32 reg)
823 {
824 u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
825
826 /* segment is disabled */
827 if (!end_offset)
828 return aspi->ahb_base_phy;
829
830 return aspi->ahb_base_phy + end_offset + 0x100000;
831 }
832
aspeed_spi_segment_ast2600_reg(struct aspeed_spi * aspi,u32 start,u32 end)833 static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
834 u32 start, u32 end)
835 {
836 /* disable zero size segments */
837 if (start == end)
838 return 0;
839
840 return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
841 ((end - 1) & AST2600_SEG_ADDR_MASK);
842 }
843
844 /*
845 * Read timing compensation sequences
846 */
847
848 #define CALIBRATE_BUF_SIZE SZ_16K
849
aspeed_spi_check_reads(struct aspeed_spi_chip * chip,const u8 * golden_buf,u8 * test_buf)850 static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
851 const u8 *golden_buf, u8 *test_buf)
852 {
853 int i;
854
855 for (i = 0; i < 10; i++) {
856 memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
857 if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
858 #if defined(VERBOSE_DEBUG)
859 print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE,
860 test_buf, 0x100);
861 #endif
862 return false;
863 }
864 }
865 return true;
866 }
867
868 #define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
869
870 /*
871 * The timing register is shared by all devices. Only update for CE0.
872 */
aspeed_spi_calibrate(struct aspeed_spi_chip * chip,u32 hdiv,const u8 * golden_buf,u8 * test_buf)873 static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
874 const u8 *golden_buf, u8 *test_buf)
875 {
876 struct aspeed_spi *aspi = chip->aspi;
877 const struct aspeed_spi_data *data = aspi->data;
878 int i;
879 int good_pass = -1, pass_count = 0;
880 u32 shift = (hdiv - 1) << 2;
881 u32 mask = ~(0xfu << shift);
882 u32 fread_timing_val = 0;
883
884 /* Try HCLK delay 0..5, each one with/without delay and look for a
885 * good pair.
886 */
887 for (i = 0; i < 12; i++) {
888 bool pass;
889
890 if (chip->cs == 0) {
891 fread_timing_val &= mask;
892 fread_timing_val |= FREAD_TPASS(i) << shift;
893 writel(fread_timing_val, aspi->regs + data->timing);
894 }
895 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
896 dev_dbg(aspi->dev,
897 " * [%08x] %d HCLK delay, %dns DI delay : %s",
898 fread_timing_val, i / 2, (i & 1) ? 0 : 4,
899 pass ? "PASS" : "FAIL");
900 if (pass) {
901 pass_count++;
902 if (pass_count == 3) {
903 good_pass = i - 1;
904 break;
905 }
906 } else {
907 pass_count = 0;
908 }
909 }
910
911 /* No good setting for this frequency */
912 if (good_pass < 0)
913 return -1;
914
915 /* We have at least one pass of margin, let's use first pass */
916 if (chip->cs == 0) {
917 fread_timing_val &= mask;
918 fread_timing_val |= FREAD_TPASS(good_pass) << shift;
919 writel(fread_timing_val, aspi->regs + data->timing);
920 }
921 dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
922 good_pass, fread_timing_val);
923 return 0;
924 }
925
aspeed_spi_check_calib_data(const u8 * test_buf,u32 size)926 static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
927 {
928 const u32 *tb32 = (const u32 *)test_buf;
929 u32 i, cnt = 0;
930
931 /* We check if we have enough words that are neither all 0
932 * nor all 1's so the calibration can be considered valid.
933 *
934 * I use an arbitrary threshold for now of 64
935 */
936 size >>= 2;
937 for (i = 0; i < size; i++) {
938 if (tb32[i] != 0 && tb32[i] != 0xffffffff)
939 cnt++;
940 }
941 return cnt >= 64;
942 }
943
944 static const u32 aspeed_spi_hclk_divs[] = {
945 0xf, /* HCLK */
946 0x7, /* HCLK/2 */
947 0xe, /* HCLK/3 */
948 0x6, /* HCLK/4 */
949 0xd, /* HCLK/5 */
950 };
951
952 #define ASPEED_SPI_HCLK_DIV(i) \
953 (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
954
aspeed_spi_do_calibration(struct aspeed_spi_chip * chip)955 static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
956 {
957 struct aspeed_spi *aspi = chip->aspi;
958 const struct aspeed_spi_data *data = aspi->data;
959 u32 ahb_freq = aspi->clk_freq;
960 u32 max_freq = chip->clk_freq;
961 u32 ctl_val;
962 u8 *golden_buf = NULL;
963 u8 *test_buf = NULL;
964 int i, rc, best_div = -1;
965
966 dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
967 ahb_freq / 1000000);
968
969 /*
970 * use the related low frequency to get check calibration data
971 * and get golden data.
972 */
973 ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
974 writel(ctl_val, chip->ctl);
975
976 test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
977 if (!test_buf)
978 return -ENOMEM;
979
980 golden_buf = test_buf + CALIBRATE_BUF_SIZE;
981
982 memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
983 if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
984 dev_info(aspi->dev, "Calibration area too uniform, using low speed");
985 goto no_calib;
986 }
987
988 #if defined(VERBOSE_DEBUG)
989 print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE,
990 golden_buf, 0x100);
991 #endif
992
993 /* Now we iterate the HCLK dividers until we find our breaking point */
994 for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
995 u32 tv, freq;
996
997 freq = ahb_freq / i;
998 if (freq > max_freq)
999 continue;
1000
1001 /* Set the timing */
1002 tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
1003 writel(tv, chip->ctl);
1004 dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
1005 rc = data->calibrate(chip, i, golden_buf, test_buf);
1006 if (rc == 0)
1007 best_div = i;
1008 }
1009
1010 /* Nothing found ? */
1011 if (best_div < 0) {
1012 dev_warn(aspi->dev, "No good frequency, using dumb slow");
1013 } else {
1014 dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
1015
1016 /* Record the freq */
1017 for (i = 0; i < ASPEED_SPI_MAX; i++)
1018 chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
1019 ASPEED_SPI_HCLK_DIV(best_div);
1020 }
1021
1022 no_calib:
1023 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
1024 kfree(test_buf);
1025 return 0;
1026 }
1027
1028 #define TIMING_DELAY_DI BIT(3)
1029 #define TIMING_DELAY_HCYCLE_MAX 5
1030 #define TIMING_REG_AST2600(chip) \
1031 ((chip)->aspi->regs + (chip)->aspi->data->timing + \
1032 (chip)->cs * 4)
1033
aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip * chip,u32 hdiv,const u8 * golden_buf,u8 * test_buf)1034 static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
1035 const u8 *golden_buf, u8 *test_buf)
1036 {
1037 struct aspeed_spi *aspi = chip->aspi;
1038 int hcycle;
1039 u32 shift = (hdiv - 2) << 3;
1040 u32 mask = ~(0xfu << shift);
1041 u32 fread_timing_val = 0;
1042
1043 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
1044 int delay_ns;
1045 bool pass = false;
1046
1047 fread_timing_val &= mask;
1048 fread_timing_val |= hcycle << shift;
1049
1050 /* no DI input delay first */
1051 writel(fread_timing_val, TIMING_REG_AST2600(chip));
1052 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1053 dev_dbg(aspi->dev,
1054 " * [%08x] %d HCLK delay, DI delay none : %s",
1055 fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
1056 if (pass)
1057 return 0;
1058
1059 /* Add DI input delays */
1060 fread_timing_val &= mask;
1061 fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
1062
1063 for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
1064 fread_timing_val &= ~(0xf << (4 + shift));
1065 fread_timing_val |= delay_ns << (4 + shift);
1066
1067 writel(fread_timing_val, TIMING_REG_AST2600(chip));
1068 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1069 dev_dbg(aspi->dev,
1070 " * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
1071 fread_timing_val, hcycle, (delay_ns + 1) / 2,
1072 (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
1073 /*
1074 * TODO: This is optimistic. We should look
1075 * for a working interval and save the middle
1076 * value in the read timing register.
1077 */
1078 if (pass)
1079 return 0;
1080 }
1081 }
1082
1083 /* No good setting for this frequency */
1084 return -1;
1085 }
1086
1087 /*
1088 * Platform definitions
1089 */
1090 static const struct aspeed_spi_data ast2400_fmc_data = {
1091 .max_cs = 5,
1092 .hastype = true,
1093 .we0 = 16,
1094 .ctl0 = CE0_CTRL_REG,
1095 .timing = CE0_TIMING_COMPENSATION_REG,
1096 .hclk_mask = 0xfffff0ff,
1097 .hdiv_max = 1,
1098 .calibrate = aspeed_spi_calibrate,
1099 .segment_start = aspeed_spi_segment_start,
1100 .segment_end = aspeed_spi_segment_end,
1101 .segment_reg = aspeed_spi_segment_reg,
1102 };
1103
1104 static const struct aspeed_spi_data ast2400_spi_data = {
1105 .max_cs = 1,
1106 .hastype = false,
1107 .we0 = 0,
1108 .ctl0 = 0x04,
1109 .timing = 0x14,
1110 .hclk_mask = 0xfffff0ff,
1111 .hdiv_max = 1,
1112 .calibrate = aspeed_spi_calibrate,
1113 /* No segment registers */
1114 };
1115
1116 static const struct aspeed_spi_data ast2500_fmc_data = {
1117 .max_cs = 3,
1118 .hastype = true,
1119 .we0 = 16,
1120 .ctl0 = CE0_CTRL_REG,
1121 .timing = CE0_TIMING_COMPENSATION_REG,
1122 .hclk_mask = 0xffffd0ff,
1123 .hdiv_max = 1,
1124 .calibrate = aspeed_spi_calibrate,
1125 .segment_start = aspeed_spi_segment_start,
1126 .segment_end = aspeed_spi_segment_end,
1127 .segment_reg = aspeed_spi_segment_reg,
1128 };
1129
1130 static const struct aspeed_spi_data ast2500_spi_data = {
1131 .max_cs = 2,
1132 .hastype = false,
1133 .we0 = 16,
1134 .ctl0 = CE0_CTRL_REG,
1135 .timing = CE0_TIMING_COMPENSATION_REG,
1136 .hclk_mask = 0xffffd0ff,
1137 .hdiv_max = 1,
1138 .calibrate = aspeed_spi_calibrate,
1139 .segment_start = aspeed_spi_segment_start,
1140 .segment_end = aspeed_spi_segment_end,
1141 .segment_reg = aspeed_spi_segment_reg,
1142 };
1143
1144 static const struct aspeed_spi_data ast2600_fmc_data = {
1145 .max_cs = 3,
1146 .hastype = false,
1147 .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
1148 .we0 = 16,
1149 .ctl0 = CE0_CTRL_REG,
1150 .timing = CE0_TIMING_COMPENSATION_REG,
1151 .hclk_mask = 0xf0fff0ff,
1152 .hdiv_max = 2,
1153 .calibrate = aspeed_spi_ast2600_calibrate,
1154 .segment_start = aspeed_spi_segment_ast2600_start,
1155 .segment_end = aspeed_spi_segment_ast2600_end,
1156 .segment_reg = aspeed_spi_segment_ast2600_reg,
1157 };
1158
1159 static const struct aspeed_spi_data ast2600_spi_data = {
1160 .max_cs = 2,
1161 .hastype = false,
1162 .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
1163 .we0 = 16,
1164 .ctl0 = CE0_CTRL_REG,
1165 .timing = CE0_TIMING_COMPENSATION_REG,
1166 .hclk_mask = 0xf0fff0ff,
1167 .hdiv_max = 2,
1168 .calibrate = aspeed_spi_ast2600_calibrate,
1169 .segment_start = aspeed_spi_segment_ast2600_start,
1170 .segment_end = aspeed_spi_segment_ast2600_end,
1171 .segment_reg = aspeed_spi_segment_ast2600_reg,
1172 };
1173
1174 static const struct of_device_id aspeed_spi_matches[] = {
1175 { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
1176 { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
1177 { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
1178 { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
1179 { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
1180 { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
1181 { }
1182 };
1183 MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
1184
1185 static struct platform_driver aspeed_spi_driver = {
1186 .probe = aspeed_spi_probe,
1187 .remove = aspeed_spi_remove,
1188 .driver = {
1189 .name = DEVICE_NAME,
1190 .of_match_table = aspeed_spi_matches,
1191 }
1192 };
1193
1194 module_platform_driver(aspeed_spi_driver);
1195
1196 MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
1197 MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>");
1198 MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
1199 MODULE_LICENSE("GPL v2");
1200