1 /*
2  * Designware SPI core controller driver (refer pxa2xx_spi.c)
3  *
4  * Copyright (c) 2009, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/highmem.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/spi/spi.h>
27 
28 #include "spi-dw.h"
29 
30 #ifdef CONFIG_DEBUG_FS
31 #include <linux/debugfs.h>
32 #endif
33 
34 #define START_STATE	((void *)0)
35 #define RUNNING_STATE	((void *)1)
36 #define DONE_STATE	((void *)2)
37 #define ERROR_STATE	((void *)-1)
38 
39 #define QUEUE_RUNNING	0
40 #define QUEUE_STOPPED	1
41 
42 #define MRST_SPI_DEASSERT	0
43 #define MRST_SPI_ASSERT		1
44 
45 /* Slave spi_dev related */
46 struct chip_data {
47 	u16 cr0;
48 	u8 cs;			/* chip select pin */
49 	u8 n_bytes;		/* current is a 1/2/4 byte op */
50 	u8 tmode;		/* TR/TO/RO/EEPROM */
51 	u8 type;		/* SPI/SSP/MicroWire */
52 
53 	u8 poll_mode;		/* 1 means use poll mode */
54 
55 	u32 dma_width;
56 	u32 rx_threshold;
57 	u32 tx_threshold;
58 	u8 enable_dma;
59 	u8 bits_per_word;
60 	u16 clk_div;		/* baud rate divider */
61 	u32 speed_hz;		/* baud rate */
62 	void (*cs_control)(u32 command);
63 };
64 
65 #ifdef CONFIG_DEBUG_FS
spi_show_regs_open(struct inode * inode,struct file * file)66 static int spi_show_regs_open(struct inode *inode, struct file *file)
67 {
68 	file->private_data = inode->i_private;
69 	return 0;
70 }
71 
72 #define SPI_REGS_BUFSIZE	1024
spi_show_regs(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)73 static ssize_t  spi_show_regs(struct file *file, char __user *user_buf,
74 				size_t count, loff_t *ppos)
75 {
76 	struct dw_spi *dws;
77 	char *buf;
78 	u32 len = 0;
79 	ssize_t ret;
80 
81 	dws = file->private_data;
82 
83 	buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
84 	if (!buf)
85 		return 0;
86 
87 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
88 			"MRST SPI0 registers:\n");
89 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
90 			"=================================\n");
91 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
92 			"CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
93 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
94 			"CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
95 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
96 			"SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
97 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
98 			"SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
99 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
100 			"BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
101 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
102 			"TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
103 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
104 			"RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
105 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
106 			"TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
107 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
108 			"RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
109 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
110 			"SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
111 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
112 			"IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
113 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
114 			"ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
115 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
116 			"DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
117 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
118 			"DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
119 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
120 			"DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
121 	len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
122 			"=================================\n");
123 
124 	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
125 	kfree(buf);
126 	return ret;
127 }
128 
129 static const struct file_operations mrst_spi_regs_ops = {
130 	.owner		= THIS_MODULE,
131 	.open		= spi_show_regs_open,
132 	.read		= spi_show_regs,
133 	.llseek		= default_llseek,
134 };
135 
mrst_spi_debugfs_init(struct dw_spi * dws)136 static int mrst_spi_debugfs_init(struct dw_spi *dws)
137 {
138 	dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
139 	if (!dws->debugfs)
140 		return -ENOMEM;
141 
142 	debugfs_create_file("registers", S_IFREG | S_IRUGO,
143 		dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
144 	return 0;
145 }
146 
mrst_spi_debugfs_remove(struct dw_spi * dws)147 static void mrst_spi_debugfs_remove(struct dw_spi *dws)
148 {
149 	if (dws->debugfs)
150 		debugfs_remove_recursive(dws->debugfs);
151 }
152 
153 #else
mrst_spi_debugfs_init(struct dw_spi * dws)154 static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
155 {
156 	return 0;
157 }
158 
mrst_spi_debugfs_remove(struct dw_spi * dws)159 static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
160 {
161 }
162 #endif /* CONFIG_DEBUG_FS */
163 
164 /* Return the max entries we can fill into tx fifo */
tx_max(struct dw_spi * dws)165 static inline u32 tx_max(struct dw_spi *dws)
166 {
167 	u32 tx_left, tx_room, rxtx_gap;
168 
169 	tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
170 	tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR);
171 
172 	/*
173 	 * Another concern is about the tx/rx mismatch, we
174 	 * though to use (dws->fifo_len - rxflr - txflr) as
175 	 * one maximum value for tx, but it doesn't cover the
176 	 * data which is out of tx/rx fifo and inside the
177 	 * shift registers. So a control from sw point of
178 	 * view is taken.
179 	 */
180 	rxtx_gap =  ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
181 			/ dws->n_bytes;
182 
183 	return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
184 }
185 
186 /* Return the max entries we should read out of rx fifo */
rx_max(struct dw_spi * dws)187 static inline u32 rx_max(struct dw_spi *dws)
188 {
189 	u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
190 
191 	return min(rx_left, (u32)dw_readw(dws, DW_SPI_RXFLR));
192 }
193 
dw_writer(struct dw_spi * dws)194 static void dw_writer(struct dw_spi *dws)
195 {
196 	u32 max = tx_max(dws);
197 	u16 txw = 0;
198 
199 	while (max--) {
200 		/* Set the tx word if the transfer's original "tx" is not null */
201 		if (dws->tx_end - dws->len) {
202 			if (dws->n_bytes == 1)
203 				txw = *(u8 *)(dws->tx);
204 			else
205 				txw = *(u16 *)(dws->tx);
206 		}
207 		dw_writew(dws, DW_SPI_DR, txw);
208 		dws->tx += dws->n_bytes;
209 	}
210 }
211 
dw_reader(struct dw_spi * dws)212 static void dw_reader(struct dw_spi *dws)
213 {
214 	u32 max = rx_max(dws);
215 	u16 rxw;
216 
217 	while (max--) {
218 		rxw = dw_readw(dws, DW_SPI_DR);
219 		/* Care rx only if the transfer's original "rx" is not null */
220 		if (dws->rx_end - dws->len) {
221 			if (dws->n_bytes == 1)
222 				*(u8 *)(dws->rx) = rxw;
223 			else
224 				*(u16 *)(dws->rx) = rxw;
225 		}
226 		dws->rx += dws->n_bytes;
227 	}
228 }
229 
next_transfer(struct dw_spi * dws)230 static void *next_transfer(struct dw_spi *dws)
231 {
232 	struct spi_message *msg = dws->cur_msg;
233 	struct spi_transfer *trans = dws->cur_transfer;
234 
235 	/* Move to next transfer */
236 	if (trans->transfer_list.next != &msg->transfers) {
237 		dws->cur_transfer =
238 			list_entry(trans->transfer_list.next,
239 					struct spi_transfer,
240 					transfer_list);
241 		return RUNNING_STATE;
242 	} else
243 		return DONE_STATE;
244 }
245 
246 /*
247  * Note: first step is the protocol driver prepares
248  * a dma-capable memory, and this func just need translate
249  * the virt addr to physical
250  */
map_dma_buffers(struct dw_spi * dws)251 static int map_dma_buffers(struct dw_spi *dws)
252 {
253 	if (!dws->cur_msg->is_dma_mapped
254 		|| !dws->dma_inited
255 		|| !dws->cur_chip->enable_dma
256 		|| !dws->dma_ops)
257 		return 0;
258 
259 	if (dws->cur_transfer->tx_dma)
260 		dws->tx_dma = dws->cur_transfer->tx_dma;
261 
262 	if (dws->cur_transfer->rx_dma)
263 		dws->rx_dma = dws->cur_transfer->rx_dma;
264 
265 	return 1;
266 }
267 
268 /* Caller already set message->status; dma and pio irqs are blocked */
giveback(struct dw_spi * dws)269 static void giveback(struct dw_spi *dws)
270 {
271 	struct spi_transfer *last_transfer;
272 	unsigned long flags;
273 	struct spi_message *msg;
274 
275 	spin_lock_irqsave(&dws->lock, flags);
276 	msg = dws->cur_msg;
277 	dws->cur_msg = NULL;
278 	dws->cur_transfer = NULL;
279 	dws->prev_chip = dws->cur_chip;
280 	dws->cur_chip = NULL;
281 	dws->dma_mapped = 0;
282 	queue_work(dws->workqueue, &dws->pump_messages);
283 	spin_unlock_irqrestore(&dws->lock, flags);
284 
285 	last_transfer = list_entry(msg->transfers.prev,
286 					struct spi_transfer,
287 					transfer_list);
288 
289 	if (!last_transfer->cs_change && dws->cs_control)
290 		dws->cs_control(MRST_SPI_DEASSERT);
291 
292 	msg->state = NULL;
293 	if (msg->complete)
294 		msg->complete(msg->context);
295 }
296 
int_error_stop(struct dw_spi * dws,const char * msg)297 static void int_error_stop(struct dw_spi *dws, const char *msg)
298 {
299 	/* Stop the hw */
300 	spi_enable_chip(dws, 0);
301 
302 	dev_err(&dws->master->dev, "%s\n", msg);
303 	dws->cur_msg->state = ERROR_STATE;
304 	tasklet_schedule(&dws->pump_transfers);
305 }
306 
dw_spi_xfer_done(struct dw_spi * dws)307 void dw_spi_xfer_done(struct dw_spi *dws)
308 {
309 	/* Update total byte transferred return count actual bytes read */
310 	dws->cur_msg->actual_length += dws->len;
311 
312 	/* Move to next transfer */
313 	dws->cur_msg->state = next_transfer(dws);
314 
315 	/* Handle end of message */
316 	if (dws->cur_msg->state == DONE_STATE) {
317 		dws->cur_msg->status = 0;
318 		giveback(dws);
319 	} else
320 		tasklet_schedule(&dws->pump_transfers);
321 }
322 EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
323 
interrupt_transfer(struct dw_spi * dws)324 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
325 {
326 	u16 irq_status = dw_readw(dws, DW_SPI_ISR);
327 
328 	/* Error handling */
329 	if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
330 		dw_readw(dws, DW_SPI_TXOICR);
331 		dw_readw(dws, DW_SPI_RXOICR);
332 		dw_readw(dws, DW_SPI_RXUICR);
333 		int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
334 		return IRQ_HANDLED;
335 	}
336 
337 	dw_reader(dws);
338 	if (dws->rx_end == dws->rx) {
339 		spi_mask_intr(dws, SPI_INT_TXEI);
340 		dw_spi_xfer_done(dws);
341 		return IRQ_HANDLED;
342 	}
343 	if (irq_status & SPI_INT_TXEI) {
344 		spi_mask_intr(dws, SPI_INT_TXEI);
345 		dw_writer(dws);
346 		/* Enable TX irq always, it will be disabled when RX finished */
347 		spi_umask_intr(dws, SPI_INT_TXEI);
348 	}
349 
350 	return IRQ_HANDLED;
351 }
352 
dw_spi_irq(int irq,void * dev_id)353 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
354 {
355 	struct dw_spi *dws = dev_id;
356 	u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f;
357 
358 	if (!irq_status)
359 		return IRQ_NONE;
360 
361 	if (!dws->cur_msg) {
362 		spi_mask_intr(dws, SPI_INT_TXEI);
363 		return IRQ_HANDLED;
364 	}
365 
366 	return dws->transfer_handler(dws);
367 }
368 
369 /* Must be called inside pump_transfers() */
poll_transfer(struct dw_spi * dws)370 static void poll_transfer(struct dw_spi *dws)
371 {
372 	do {
373 		dw_writer(dws);
374 		dw_reader(dws);
375 		cpu_relax();
376 	} while (dws->rx_end > dws->rx);
377 
378 	dw_spi_xfer_done(dws);
379 }
380 
pump_transfers(unsigned long data)381 static void pump_transfers(unsigned long data)
382 {
383 	struct dw_spi *dws = (struct dw_spi *)data;
384 	struct spi_message *message = NULL;
385 	struct spi_transfer *transfer = NULL;
386 	struct spi_transfer *previous = NULL;
387 	struct spi_device *spi = NULL;
388 	struct chip_data *chip = NULL;
389 	u8 bits = 0;
390 	u8 imask = 0;
391 	u8 cs_change = 0;
392 	u16 txint_level = 0;
393 	u16 clk_div = 0;
394 	u32 speed = 0;
395 	u32 cr0 = 0;
396 
397 	/* Get current state information */
398 	message = dws->cur_msg;
399 	transfer = dws->cur_transfer;
400 	chip = dws->cur_chip;
401 	spi = message->spi;
402 
403 	if (unlikely(!chip->clk_div))
404 		chip->clk_div = dws->max_freq / chip->speed_hz;
405 
406 	if (message->state == ERROR_STATE) {
407 		message->status = -EIO;
408 		goto early_exit;
409 	}
410 
411 	/* Handle end of message */
412 	if (message->state == DONE_STATE) {
413 		message->status = 0;
414 		goto early_exit;
415 	}
416 
417 	/* Delay if requested at end of transfer*/
418 	if (message->state == RUNNING_STATE) {
419 		previous = list_entry(transfer->transfer_list.prev,
420 					struct spi_transfer,
421 					transfer_list);
422 		if (previous->delay_usecs)
423 			udelay(previous->delay_usecs);
424 	}
425 
426 	dws->n_bytes = chip->n_bytes;
427 	dws->dma_width = chip->dma_width;
428 	dws->cs_control = chip->cs_control;
429 
430 	dws->rx_dma = transfer->rx_dma;
431 	dws->tx_dma = transfer->tx_dma;
432 	dws->tx = (void *)transfer->tx_buf;
433 	dws->tx_end = dws->tx + transfer->len;
434 	dws->rx = transfer->rx_buf;
435 	dws->rx_end = dws->rx + transfer->len;
436 	dws->cs_change = transfer->cs_change;
437 	dws->len = dws->cur_transfer->len;
438 	if (chip != dws->prev_chip)
439 		cs_change = 1;
440 
441 	cr0 = chip->cr0;
442 
443 	/* Handle per transfer options for bpw and speed */
444 	if (transfer->speed_hz) {
445 		speed = chip->speed_hz;
446 
447 		if (transfer->speed_hz != speed) {
448 			speed = transfer->speed_hz;
449 			if (speed > dws->max_freq) {
450 				printk(KERN_ERR "MRST SPI0: unsupported"
451 					"freq: %dHz\n", speed);
452 				message->status = -EIO;
453 				goto early_exit;
454 			}
455 
456 			/* clk_div doesn't support odd number */
457 			clk_div = dws->max_freq / speed;
458 			clk_div = (clk_div + 1) & 0xfffe;
459 
460 			chip->speed_hz = speed;
461 			chip->clk_div = clk_div;
462 		}
463 	}
464 	if (transfer->bits_per_word) {
465 		bits = transfer->bits_per_word;
466 
467 		switch (bits) {
468 		case 8:
469 		case 16:
470 			dws->n_bytes = dws->dma_width = bits >> 3;
471 			break;
472 		default:
473 			printk(KERN_ERR "MRST SPI0: unsupported bits:"
474 				"%db\n", bits);
475 			message->status = -EIO;
476 			goto early_exit;
477 		}
478 
479 		cr0 = (bits - 1)
480 			| (chip->type << SPI_FRF_OFFSET)
481 			| (spi->mode << SPI_MODE_OFFSET)
482 			| (chip->tmode << SPI_TMOD_OFFSET);
483 	}
484 	message->state = RUNNING_STATE;
485 
486 	/*
487 	 * Adjust transfer mode if necessary. Requires platform dependent
488 	 * chipselect mechanism.
489 	 */
490 	if (dws->cs_control) {
491 		if (dws->rx && dws->tx)
492 			chip->tmode = SPI_TMOD_TR;
493 		else if (dws->rx)
494 			chip->tmode = SPI_TMOD_RO;
495 		else
496 			chip->tmode = SPI_TMOD_TO;
497 
498 		cr0 &= ~SPI_TMOD_MASK;
499 		cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
500 	}
501 
502 	/* Check if current transfer is a DMA transaction */
503 	dws->dma_mapped = map_dma_buffers(dws);
504 
505 	/*
506 	 * Interrupt mode
507 	 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
508 	 */
509 	if (!dws->dma_mapped && !chip->poll_mode) {
510 		int templen = dws->len / dws->n_bytes;
511 		txint_level = dws->fifo_len / 2;
512 		txint_level = (templen > txint_level) ? txint_level : templen;
513 
514 		imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
515 		dws->transfer_handler = interrupt_transfer;
516 	}
517 
518 	/*
519 	 * Reprogram registers only if
520 	 *	1. chip select changes
521 	 *	2. clk_div is changed
522 	 *	3. control value changes
523 	 */
524 	if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
525 		spi_enable_chip(dws, 0);
526 
527 		if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
528 			dw_writew(dws, DW_SPI_CTRL0, cr0);
529 
530 		spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
531 		spi_chip_sel(dws, spi->chip_select);
532 
533 		/* Set the interrupt mask, for poll mode just disable all int */
534 		spi_mask_intr(dws, 0xff);
535 		if (imask)
536 			spi_umask_intr(dws, imask);
537 		if (txint_level)
538 			dw_writew(dws, DW_SPI_TXFLTR, txint_level);
539 
540 		spi_enable_chip(dws, 1);
541 		if (cs_change)
542 			dws->prev_chip = chip;
543 	}
544 
545 	if (dws->dma_mapped)
546 		dws->dma_ops->dma_transfer(dws, cs_change);
547 
548 	if (chip->poll_mode)
549 		poll_transfer(dws);
550 
551 	return;
552 
553 early_exit:
554 	giveback(dws);
555 	return;
556 }
557 
pump_messages(struct work_struct * work)558 static void pump_messages(struct work_struct *work)
559 {
560 	struct dw_spi *dws =
561 		container_of(work, struct dw_spi, pump_messages);
562 	unsigned long flags;
563 
564 	/* Lock queue and check for queue work */
565 	spin_lock_irqsave(&dws->lock, flags);
566 	if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
567 		dws->busy = 0;
568 		spin_unlock_irqrestore(&dws->lock, flags);
569 		return;
570 	}
571 
572 	/* Make sure we are not already running a message */
573 	if (dws->cur_msg) {
574 		spin_unlock_irqrestore(&dws->lock, flags);
575 		return;
576 	}
577 
578 	/* Extract head of queue */
579 	dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
580 	list_del_init(&dws->cur_msg->queue);
581 
582 	/* Initial message state*/
583 	dws->cur_msg->state = START_STATE;
584 	dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
585 						struct spi_transfer,
586 						transfer_list);
587 	dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
588 
589 	/* Mark as busy and launch transfers */
590 	tasklet_schedule(&dws->pump_transfers);
591 
592 	dws->busy = 1;
593 	spin_unlock_irqrestore(&dws->lock, flags);
594 }
595 
596 /* spi_device use this to queue in their spi_msg */
dw_spi_transfer(struct spi_device * spi,struct spi_message * msg)597 static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
598 {
599 	struct dw_spi *dws = spi_master_get_devdata(spi->master);
600 	unsigned long flags;
601 
602 	spin_lock_irqsave(&dws->lock, flags);
603 
604 	if (dws->run == QUEUE_STOPPED) {
605 		spin_unlock_irqrestore(&dws->lock, flags);
606 		return -ESHUTDOWN;
607 	}
608 
609 	msg->actual_length = 0;
610 	msg->status = -EINPROGRESS;
611 	msg->state = START_STATE;
612 
613 	list_add_tail(&msg->queue, &dws->queue);
614 
615 	if (dws->run == QUEUE_RUNNING && !dws->busy) {
616 
617 		if (dws->cur_transfer || dws->cur_msg)
618 			queue_work(dws->workqueue,
619 					&dws->pump_messages);
620 		else {
621 			/* If no other data transaction in air, just go */
622 			spin_unlock_irqrestore(&dws->lock, flags);
623 			pump_messages(&dws->pump_messages);
624 			return 0;
625 		}
626 	}
627 
628 	spin_unlock_irqrestore(&dws->lock, flags);
629 	return 0;
630 }
631 
632 /* This may be called twice for each spi dev */
dw_spi_setup(struct spi_device * spi)633 static int dw_spi_setup(struct spi_device *spi)
634 {
635 	struct dw_spi_chip *chip_info = NULL;
636 	struct chip_data *chip;
637 
638 	if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
639 		return -EINVAL;
640 
641 	/* Only alloc on first setup */
642 	chip = spi_get_ctldata(spi);
643 	if (!chip) {
644 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
645 		if (!chip)
646 			return -ENOMEM;
647 	}
648 
649 	/*
650 	 * Protocol drivers may change the chip settings, so...
651 	 * if chip_info exists, use it
652 	 */
653 	chip_info = spi->controller_data;
654 
655 	/* chip_info doesn't always exist */
656 	if (chip_info) {
657 		if (chip_info->cs_control)
658 			chip->cs_control = chip_info->cs_control;
659 
660 		chip->poll_mode = chip_info->poll_mode;
661 		chip->type = chip_info->type;
662 
663 		chip->rx_threshold = 0;
664 		chip->tx_threshold = 0;
665 
666 		chip->enable_dma = chip_info->enable_dma;
667 	}
668 
669 	if (spi->bits_per_word <= 8) {
670 		chip->n_bytes = 1;
671 		chip->dma_width = 1;
672 	} else if (spi->bits_per_word <= 16) {
673 		chip->n_bytes = 2;
674 		chip->dma_width = 2;
675 	} else {
676 		/* Never take >16b case for MRST SPIC */
677 		dev_err(&spi->dev, "invalid wordsize\n");
678 		return -EINVAL;
679 	}
680 	chip->bits_per_word = spi->bits_per_word;
681 
682 	if (!spi->max_speed_hz) {
683 		dev_err(&spi->dev, "No max speed HZ parameter\n");
684 		return -EINVAL;
685 	}
686 	chip->speed_hz = spi->max_speed_hz;
687 
688 	chip->tmode = 0; /* Tx & Rx */
689 	/* Default SPI mode is SCPOL = 0, SCPH = 0 */
690 	chip->cr0 = (chip->bits_per_word - 1)
691 			| (chip->type << SPI_FRF_OFFSET)
692 			| (spi->mode  << SPI_MODE_OFFSET)
693 			| (chip->tmode << SPI_TMOD_OFFSET);
694 
695 	spi_set_ctldata(spi, chip);
696 	return 0;
697 }
698 
dw_spi_cleanup(struct spi_device * spi)699 static void dw_spi_cleanup(struct spi_device *spi)
700 {
701 	struct chip_data *chip = spi_get_ctldata(spi);
702 	kfree(chip);
703 }
704 
init_queue(struct dw_spi * dws)705 static int __devinit init_queue(struct dw_spi *dws)
706 {
707 	INIT_LIST_HEAD(&dws->queue);
708 	spin_lock_init(&dws->lock);
709 
710 	dws->run = QUEUE_STOPPED;
711 	dws->busy = 0;
712 
713 	tasklet_init(&dws->pump_transfers,
714 			pump_transfers,	(unsigned long)dws);
715 
716 	INIT_WORK(&dws->pump_messages, pump_messages);
717 	dws->workqueue = create_singlethread_workqueue(
718 					dev_name(dws->master->dev.parent));
719 	if (dws->workqueue == NULL)
720 		return -EBUSY;
721 
722 	return 0;
723 }
724 
start_queue(struct dw_spi * dws)725 static int start_queue(struct dw_spi *dws)
726 {
727 	unsigned long flags;
728 
729 	spin_lock_irqsave(&dws->lock, flags);
730 
731 	if (dws->run == QUEUE_RUNNING || dws->busy) {
732 		spin_unlock_irqrestore(&dws->lock, flags);
733 		return -EBUSY;
734 	}
735 
736 	dws->run = QUEUE_RUNNING;
737 	dws->cur_msg = NULL;
738 	dws->cur_transfer = NULL;
739 	dws->cur_chip = NULL;
740 	dws->prev_chip = NULL;
741 	spin_unlock_irqrestore(&dws->lock, flags);
742 
743 	queue_work(dws->workqueue, &dws->pump_messages);
744 
745 	return 0;
746 }
747 
stop_queue(struct dw_spi * dws)748 static int stop_queue(struct dw_spi *dws)
749 {
750 	unsigned long flags;
751 	unsigned limit = 50;
752 	int status = 0;
753 
754 	spin_lock_irqsave(&dws->lock, flags);
755 	dws->run = QUEUE_STOPPED;
756 	while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
757 		spin_unlock_irqrestore(&dws->lock, flags);
758 		msleep(10);
759 		spin_lock_irqsave(&dws->lock, flags);
760 	}
761 
762 	if (!list_empty(&dws->queue) || dws->busy)
763 		status = -EBUSY;
764 	spin_unlock_irqrestore(&dws->lock, flags);
765 
766 	return status;
767 }
768 
destroy_queue(struct dw_spi * dws)769 static int destroy_queue(struct dw_spi *dws)
770 {
771 	int status;
772 
773 	status = stop_queue(dws);
774 	if (status != 0)
775 		return status;
776 	destroy_workqueue(dws->workqueue);
777 	return 0;
778 }
779 
780 /* Restart the controller, disable all interrupts, clean rx fifo */
spi_hw_init(struct dw_spi * dws)781 static void spi_hw_init(struct dw_spi *dws)
782 {
783 	spi_enable_chip(dws, 0);
784 	spi_mask_intr(dws, 0xff);
785 	spi_enable_chip(dws, 1);
786 
787 	/*
788 	 * Try to detect the FIFO depth if not set by interface driver,
789 	 * the depth could be from 2 to 256 from HW spec
790 	 */
791 	if (!dws->fifo_len) {
792 		u32 fifo;
793 		for (fifo = 2; fifo <= 257; fifo++) {
794 			dw_writew(dws, DW_SPI_TXFLTR, fifo);
795 			if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
796 				break;
797 		}
798 
799 		dws->fifo_len = (fifo == 257) ? 0 : fifo;
800 		dw_writew(dws, DW_SPI_TXFLTR, 0);
801 	}
802 }
803 
dw_spi_add_host(struct dw_spi * dws)804 int __devinit dw_spi_add_host(struct dw_spi *dws)
805 {
806 	struct spi_master *master;
807 	int ret;
808 
809 	BUG_ON(dws == NULL);
810 
811 	master = spi_alloc_master(dws->parent_dev, 0);
812 	if (!master) {
813 		ret = -ENOMEM;
814 		goto exit;
815 	}
816 
817 	dws->master = master;
818 	dws->type = SSI_MOTO_SPI;
819 	dws->prev_chip = NULL;
820 	dws->dma_inited = 0;
821 	dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
822 	snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
823 			dws->bus_num);
824 
825 	ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
826 			dws->name, dws);
827 	if (ret < 0) {
828 		dev_err(&master->dev, "can not get IRQ\n");
829 		goto err_free_master;
830 	}
831 
832 	master->mode_bits = SPI_CPOL | SPI_CPHA;
833 	master->bus_num = dws->bus_num;
834 	master->num_chipselect = dws->num_cs;
835 	master->cleanup = dw_spi_cleanup;
836 	master->setup = dw_spi_setup;
837 	master->transfer = dw_spi_transfer;
838 
839 	/* Basic HW init */
840 	spi_hw_init(dws);
841 
842 	if (dws->dma_ops && dws->dma_ops->dma_init) {
843 		ret = dws->dma_ops->dma_init(dws);
844 		if (ret) {
845 			dev_warn(&master->dev, "DMA init failed\n");
846 			dws->dma_inited = 0;
847 		}
848 	}
849 
850 	/* Initial and start queue */
851 	ret = init_queue(dws);
852 	if (ret) {
853 		dev_err(&master->dev, "problem initializing queue\n");
854 		goto err_diable_hw;
855 	}
856 	ret = start_queue(dws);
857 	if (ret) {
858 		dev_err(&master->dev, "problem starting queue\n");
859 		goto err_diable_hw;
860 	}
861 
862 	spi_master_set_devdata(master, dws);
863 	ret = spi_register_master(master);
864 	if (ret) {
865 		dev_err(&master->dev, "problem registering spi master\n");
866 		goto err_queue_alloc;
867 	}
868 
869 	mrst_spi_debugfs_init(dws);
870 	return 0;
871 
872 err_queue_alloc:
873 	destroy_queue(dws);
874 	if (dws->dma_ops && dws->dma_ops->dma_exit)
875 		dws->dma_ops->dma_exit(dws);
876 err_diable_hw:
877 	spi_enable_chip(dws, 0);
878 	free_irq(dws->irq, dws);
879 err_free_master:
880 	spi_master_put(master);
881 exit:
882 	return ret;
883 }
884 EXPORT_SYMBOL_GPL(dw_spi_add_host);
885 
dw_spi_remove_host(struct dw_spi * dws)886 void __devexit dw_spi_remove_host(struct dw_spi *dws)
887 {
888 	int status = 0;
889 
890 	if (!dws)
891 		return;
892 	mrst_spi_debugfs_remove(dws);
893 
894 	/* Remove the queue */
895 	status = destroy_queue(dws);
896 	if (status != 0)
897 		dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
898 			"complete, message memory not freed\n");
899 
900 	if (dws->dma_ops && dws->dma_ops->dma_exit)
901 		dws->dma_ops->dma_exit(dws);
902 	spi_enable_chip(dws, 0);
903 	/* Disable clk */
904 	spi_set_clk(dws, 0);
905 	free_irq(dws->irq, dws);
906 
907 	/* Disconnect from the SPI framework */
908 	spi_unregister_master(dws->master);
909 }
910 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
911 
dw_spi_suspend_host(struct dw_spi * dws)912 int dw_spi_suspend_host(struct dw_spi *dws)
913 {
914 	int ret = 0;
915 
916 	ret = stop_queue(dws);
917 	if (ret)
918 		return ret;
919 	spi_enable_chip(dws, 0);
920 	spi_set_clk(dws, 0);
921 	return ret;
922 }
923 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
924 
dw_spi_resume_host(struct dw_spi * dws)925 int dw_spi_resume_host(struct dw_spi *dws)
926 {
927 	int ret;
928 
929 	spi_hw_init(dws);
930 	ret = start_queue(dws);
931 	if (ret)
932 		dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
933 	return ret;
934 }
935 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
936 
937 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
938 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
939 MODULE_LICENSE("GPL v2");
940