1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18 
19 /*
20  * The MMCIF driver is now processing MMC requests asynchronously, according
21  * to the Linux MMC API requirement.
22  *
23  * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24  * data, and optional stop. To achieve asynchronous processing each of these
25  * stages is split into two halves: a top and a bottom half. The top half
26  * initialises the hardware, installs a timeout handler to handle completion
27  * timeouts, and returns. In case of the command stage this immediately returns
28  * control to the caller, leaving all further processing to run asynchronously.
29  * All further request processing is performed by the bottom halves.
30  *
31  * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32  * thread, a DMA completion callback, if DMA is used, a timeout work, and
33  * request- and stage-specific handler methods.
34  *
35  * Each bottom half run begins with either a hardware interrupt, a DMA callback
36  * invocation, or a timeout work run. In case of an error or a successful
37  * processing completion, the MMC core is informed and the request processing is
38  * finished. In case processing has to continue, i.e., if data has to be read
39  * from or written to the card, or if a stop command has to be sent, the next
40  * top half is called, which performs the necessary hardware handling and
41  * reschedules the timeout work. This returns the driver state machine into the
42  * bottom half waiting state.
43  */
44 
45 #include <linux/bitops.h>
46 #include <linux/clk.h>
47 #include <linux/completion.h>
48 #include <linux/delay.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/mmc/card.h>
52 #include <linux/mmc/core.h>
53 #include <linux/mmc/host.h>
54 #include <linux/mmc/mmc.h>
55 #include <linux/mmc/sdio.h>
56 #include <linux/mmc/sh_mmcif.h>
57 #include <linux/pagemap.h>
58 #include <linux/platform_device.h>
59 #include <linux/pm_runtime.h>
60 #include <linux/spinlock.h>
61 #include <linux/module.h>
62 
63 #define DRIVER_NAME	"sh_mmcif"
64 #define DRIVER_VERSION	"2010-04-28"
65 
66 /* CE_CMD_SET */
67 #define CMD_MASK		0x3f000000
68 #define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
69 #define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
70 #define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
71 #define CMD_SET_RBSY		(1 << 21) /* R1b */
72 #define CMD_SET_CCSEN		(1 << 20)
73 #define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
74 #define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
75 #define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
76 #define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
77 #define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
78 #define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
79 #define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
80 #define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
81 #define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
82 #define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
83 #define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
84 #define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
85 #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
86 #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
87 #define CMD_SET_CCSH		(1 << 5)
88 #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
89 #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
90 #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
91 
92 /* CE_CMD_CTRL */
93 #define CMD_CTRL_BREAK		(1 << 0)
94 
95 /* CE_BLOCK_SET */
96 #define BLOCK_SIZE_MASK		0x0000ffff
97 
98 /* CE_INT */
99 #define INT_CCSDE		(1 << 29)
100 #define INT_CMD12DRE		(1 << 26)
101 #define INT_CMD12RBE		(1 << 25)
102 #define INT_CMD12CRE		(1 << 24)
103 #define INT_DTRANE		(1 << 23)
104 #define INT_BUFRE		(1 << 22)
105 #define INT_BUFWEN		(1 << 21)
106 #define INT_BUFREN		(1 << 20)
107 #define INT_CCSRCV		(1 << 19)
108 #define INT_RBSYE		(1 << 17)
109 #define INT_CRSPE		(1 << 16)
110 #define INT_CMDVIO		(1 << 15)
111 #define INT_BUFVIO		(1 << 14)
112 #define INT_WDATERR		(1 << 11)
113 #define INT_RDATERR		(1 << 10)
114 #define INT_RIDXERR		(1 << 9)
115 #define INT_RSPERR		(1 << 8)
116 #define INT_CCSTO		(1 << 5)
117 #define INT_CRCSTO		(1 << 4)
118 #define INT_WDATTO		(1 << 3)
119 #define INT_RDATTO		(1 << 2)
120 #define INT_RBSYTO		(1 << 1)
121 #define INT_RSPTO		(1 << 0)
122 #define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
123 				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
124 				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
125 				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
126 
127 /* CE_INT_MASK */
128 #define MASK_ALL		0x00000000
129 #define MASK_MCCSDE		(1 << 29)
130 #define MASK_MCMD12DRE		(1 << 26)
131 #define MASK_MCMD12RBE		(1 << 25)
132 #define MASK_MCMD12CRE		(1 << 24)
133 #define MASK_MDTRANE		(1 << 23)
134 #define MASK_MBUFRE		(1 << 22)
135 #define MASK_MBUFWEN		(1 << 21)
136 #define MASK_MBUFREN		(1 << 20)
137 #define MASK_MCCSRCV		(1 << 19)
138 #define MASK_MRBSYE		(1 << 17)
139 #define MASK_MCRSPE		(1 << 16)
140 #define MASK_MCMDVIO		(1 << 15)
141 #define MASK_MBUFVIO		(1 << 14)
142 #define MASK_MWDATERR		(1 << 11)
143 #define MASK_MRDATERR		(1 << 10)
144 #define MASK_MRIDXERR		(1 << 9)
145 #define MASK_MRSPERR		(1 << 8)
146 #define MASK_MCCSTO		(1 << 5)
147 #define MASK_MCRCSTO		(1 << 4)
148 #define MASK_MWDATTO		(1 << 3)
149 #define MASK_MRDATTO		(1 << 2)
150 #define MASK_MRBSYTO		(1 << 1)
151 #define MASK_MRSPTO		(1 << 0)
152 
153 #define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
154 				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
155 				 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
156 				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
157 
158 /* CE_HOST_STS1 */
159 #define STS1_CMDSEQ		(1 << 31)
160 
161 /* CE_HOST_STS2 */
162 #define STS2_CRCSTE		(1 << 31)
163 #define STS2_CRC16E		(1 << 30)
164 #define STS2_AC12CRCE		(1 << 29)
165 #define STS2_RSPCRC7E		(1 << 28)
166 #define STS2_CRCSTEBE		(1 << 27)
167 #define STS2_RDATEBE		(1 << 26)
168 #define STS2_AC12REBE		(1 << 25)
169 #define STS2_RSPEBE		(1 << 24)
170 #define STS2_AC12IDXE		(1 << 23)
171 #define STS2_RSPIDXE		(1 << 22)
172 #define STS2_CCSTO		(1 << 15)
173 #define STS2_RDATTO		(1 << 14)
174 #define STS2_DATBSYTO		(1 << 13)
175 #define STS2_CRCSTTO		(1 << 12)
176 #define STS2_AC12BSYTO		(1 << 11)
177 #define STS2_RSPBSYTO		(1 << 10)
178 #define STS2_AC12RSPTO		(1 << 9)
179 #define STS2_RSPTO		(1 << 8)
180 #define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
181 				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
182 #define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
183 				 STS2_DATBSYTO | STS2_CRCSTTO |		\
184 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
185 				 STS2_AC12RSPTO | STS2_RSPTO)
186 
187 #define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
188 #define CLKDEV_MMC_DATA		20000000 /* 20MHz */
189 #define CLKDEV_INIT		400000   /* 400 KHz */
190 
191 enum mmcif_state {
192 	STATE_IDLE,
193 	STATE_REQUEST,
194 	STATE_IOS,
195 };
196 
197 enum mmcif_wait_for {
198 	MMCIF_WAIT_FOR_REQUEST,
199 	MMCIF_WAIT_FOR_CMD,
200 	MMCIF_WAIT_FOR_MREAD,
201 	MMCIF_WAIT_FOR_MWRITE,
202 	MMCIF_WAIT_FOR_READ,
203 	MMCIF_WAIT_FOR_WRITE,
204 	MMCIF_WAIT_FOR_READ_END,
205 	MMCIF_WAIT_FOR_WRITE_END,
206 	MMCIF_WAIT_FOR_STOP,
207 };
208 
209 struct sh_mmcif_host {
210 	struct mmc_host *mmc;
211 	struct mmc_request *mrq;
212 	struct platform_device *pd;
213 	struct sh_dmae_slave dma_slave_tx;
214 	struct sh_dmae_slave dma_slave_rx;
215 	struct clk *hclk;
216 	unsigned int clk;
217 	int bus_width;
218 	bool sd_error;
219 	bool dying;
220 	long timeout;
221 	void __iomem *addr;
222 	u32 *pio_ptr;
223 	spinlock_t lock;		/* protect sh_mmcif_host::state */
224 	enum mmcif_state state;
225 	enum mmcif_wait_for wait_for;
226 	struct delayed_work timeout_work;
227 	size_t blocksize;
228 	int sg_idx;
229 	int sg_blkidx;
230 	bool power;
231 	bool card_present;
232 
233 	/* DMA support */
234 	struct dma_chan		*chan_rx;
235 	struct dma_chan		*chan_tx;
236 	struct completion	dma_complete;
237 	bool			dma_active;
238 };
239 
sh_mmcif_bitset(struct sh_mmcif_host * host,unsigned int reg,u32 val)240 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
241 					unsigned int reg, u32 val)
242 {
243 	writel(val | readl(host->addr + reg), host->addr + reg);
244 }
245 
sh_mmcif_bitclr(struct sh_mmcif_host * host,unsigned int reg,u32 val)246 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
247 					unsigned int reg, u32 val)
248 {
249 	writel(~val & readl(host->addr + reg), host->addr + reg);
250 }
251 
mmcif_dma_complete(void * arg)252 static void mmcif_dma_complete(void *arg)
253 {
254 	struct sh_mmcif_host *host = arg;
255 	struct mmc_data *data = host->mrq->data;
256 
257 	dev_dbg(&host->pd->dev, "Command completed\n");
258 
259 	if (WARN(!data, "%s: NULL data in DMA completion!\n",
260 		 dev_name(&host->pd->dev)))
261 		return;
262 
263 	if (data->flags & MMC_DATA_READ)
264 		dma_unmap_sg(host->chan_rx->device->dev,
265 			     data->sg, data->sg_len,
266 			     DMA_FROM_DEVICE);
267 	else
268 		dma_unmap_sg(host->chan_tx->device->dev,
269 			     data->sg, data->sg_len,
270 			     DMA_TO_DEVICE);
271 
272 	complete(&host->dma_complete);
273 }
274 
sh_mmcif_start_dma_rx(struct sh_mmcif_host * host)275 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
276 {
277 	struct mmc_data *data = host->mrq->data;
278 	struct scatterlist *sg = data->sg;
279 	struct dma_async_tx_descriptor *desc = NULL;
280 	struct dma_chan *chan = host->chan_rx;
281 	dma_cookie_t cookie = -EINVAL;
282 	int ret;
283 
284 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
285 			 DMA_FROM_DEVICE);
286 	if (ret > 0) {
287 		host->dma_active = true;
288 		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
289 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
290 	}
291 
292 	if (desc) {
293 		desc->callback = mmcif_dma_complete;
294 		desc->callback_param = host;
295 		cookie = dmaengine_submit(desc);
296 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
297 		dma_async_issue_pending(chan);
298 	}
299 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
300 		__func__, data->sg_len, ret, cookie);
301 
302 	if (!desc) {
303 		/* DMA failed, fall back to PIO */
304 		if (ret >= 0)
305 			ret = -EIO;
306 		host->chan_rx = NULL;
307 		host->dma_active = false;
308 		dma_release_channel(chan);
309 		/* Free the Tx channel too */
310 		chan = host->chan_tx;
311 		if (chan) {
312 			host->chan_tx = NULL;
313 			dma_release_channel(chan);
314 		}
315 		dev_warn(&host->pd->dev,
316 			 "DMA failed: %d, falling back to PIO\n", ret);
317 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
318 	}
319 
320 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
321 		desc, cookie, data->sg_len);
322 }
323 
sh_mmcif_start_dma_tx(struct sh_mmcif_host * host)324 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
325 {
326 	struct mmc_data *data = host->mrq->data;
327 	struct scatterlist *sg = data->sg;
328 	struct dma_async_tx_descriptor *desc = NULL;
329 	struct dma_chan *chan = host->chan_tx;
330 	dma_cookie_t cookie = -EINVAL;
331 	int ret;
332 
333 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
334 			 DMA_TO_DEVICE);
335 	if (ret > 0) {
336 		host->dma_active = true;
337 		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
338 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
339 	}
340 
341 	if (desc) {
342 		desc->callback = mmcif_dma_complete;
343 		desc->callback_param = host;
344 		cookie = dmaengine_submit(desc);
345 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
346 		dma_async_issue_pending(chan);
347 	}
348 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
349 		__func__, data->sg_len, ret, cookie);
350 
351 	if (!desc) {
352 		/* DMA failed, fall back to PIO */
353 		if (ret >= 0)
354 			ret = -EIO;
355 		host->chan_tx = NULL;
356 		host->dma_active = false;
357 		dma_release_channel(chan);
358 		/* Free the Rx channel too */
359 		chan = host->chan_rx;
360 		if (chan) {
361 			host->chan_rx = NULL;
362 			dma_release_channel(chan);
363 		}
364 		dev_warn(&host->pd->dev,
365 			 "DMA failed: %d, falling back to PIO\n", ret);
366 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
367 	}
368 
369 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
370 		desc, cookie);
371 }
372 
sh_mmcif_filter(struct dma_chan * chan,void * arg)373 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
374 {
375 	dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
376 	chan->private = arg;
377 	return true;
378 }
379 
sh_mmcif_request_dma(struct sh_mmcif_host * host,struct sh_mmcif_plat_data * pdata)380 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
381 				 struct sh_mmcif_plat_data *pdata)
382 {
383 	struct sh_dmae_slave *tx, *rx;
384 	host->dma_active = false;
385 
386 	/* We can only either use DMA for both Tx and Rx or not use it at all */
387 	if (pdata->dma) {
388 		dev_warn(&host->pd->dev,
389 			 "Update your platform to use embedded DMA slave IDs\n");
390 		tx = &pdata->dma->chan_priv_tx;
391 		rx = &pdata->dma->chan_priv_rx;
392 	} else {
393 		tx = &host->dma_slave_tx;
394 		tx->slave_id = pdata->slave_id_tx;
395 		rx = &host->dma_slave_rx;
396 		rx->slave_id = pdata->slave_id_rx;
397 	}
398 	if (tx->slave_id > 0 && rx->slave_id > 0) {
399 		dma_cap_mask_t mask;
400 
401 		dma_cap_zero(mask);
402 		dma_cap_set(DMA_SLAVE, mask);
403 
404 		host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
405 		dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
406 			host->chan_tx);
407 
408 		if (!host->chan_tx)
409 			return;
410 
411 		host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
412 		dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
413 			host->chan_rx);
414 
415 		if (!host->chan_rx) {
416 			dma_release_channel(host->chan_tx);
417 			host->chan_tx = NULL;
418 			return;
419 		}
420 
421 		init_completion(&host->dma_complete);
422 	}
423 }
424 
sh_mmcif_release_dma(struct sh_mmcif_host * host)425 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
426 {
427 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
428 	/* Descriptors are freed automatically */
429 	if (host->chan_tx) {
430 		struct dma_chan *chan = host->chan_tx;
431 		host->chan_tx = NULL;
432 		dma_release_channel(chan);
433 	}
434 	if (host->chan_rx) {
435 		struct dma_chan *chan = host->chan_rx;
436 		host->chan_rx = NULL;
437 		dma_release_channel(chan);
438 	}
439 
440 	host->dma_active = false;
441 }
442 
sh_mmcif_clock_control(struct sh_mmcif_host * host,unsigned int clk)443 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
444 {
445 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
446 
447 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
448 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
449 
450 	if (!clk)
451 		return;
452 	if (p->sup_pclk && clk == host->clk)
453 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
454 	else
455 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
456 				((fls(host->clk / clk) - 1) << 16));
457 
458 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
459 }
460 
sh_mmcif_sync_reset(struct sh_mmcif_host * host)461 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
462 {
463 	u32 tmp;
464 
465 	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
466 
467 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
468 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
469 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
470 		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
471 	/* byte swap on */
472 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
473 }
474 
sh_mmcif_error_manage(struct sh_mmcif_host * host)475 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
476 {
477 	u32 state1, state2;
478 	int ret, timeout;
479 
480 	host->sd_error = false;
481 
482 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
483 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
484 	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
485 	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
486 
487 	if (state1 & STS1_CMDSEQ) {
488 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
489 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
490 		for (timeout = 10000000; timeout; timeout--) {
491 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
492 			      & STS1_CMDSEQ))
493 				break;
494 			mdelay(1);
495 		}
496 		if (!timeout) {
497 			dev_err(&host->pd->dev,
498 				"Forced end of command sequence timeout err\n");
499 			return -EIO;
500 		}
501 		sh_mmcif_sync_reset(host);
502 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
503 		return -EIO;
504 	}
505 
506 	if (state2 & STS2_CRC_ERR) {
507 		dev_dbg(&host->pd->dev, ": CRC error\n");
508 		ret = -EIO;
509 	} else if (state2 & STS2_TIMEOUT_ERR) {
510 		dev_dbg(&host->pd->dev, ": Timeout\n");
511 		ret = -ETIMEDOUT;
512 	} else {
513 		dev_dbg(&host->pd->dev, ": End/Index error\n");
514 		ret = -EIO;
515 	}
516 	return ret;
517 }
518 
sh_mmcif_next_block(struct sh_mmcif_host * host,u32 * p)519 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
520 {
521 	struct mmc_data *data = host->mrq->data;
522 
523 	host->sg_blkidx += host->blocksize;
524 
525 	/* data->sg->length must be a multiple of host->blocksize? */
526 	BUG_ON(host->sg_blkidx > data->sg->length);
527 
528 	if (host->sg_blkidx == data->sg->length) {
529 		host->sg_blkidx = 0;
530 		if (++host->sg_idx < data->sg_len)
531 			host->pio_ptr = sg_virt(++data->sg);
532 	} else {
533 		host->pio_ptr = p;
534 	}
535 
536 	if (host->sg_idx == data->sg_len)
537 		return false;
538 
539 	return true;
540 }
541 
sh_mmcif_single_read(struct sh_mmcif_host * host,struct mmc_request * mrq)542 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
543 				 struct mmc_request *mrq)
544 {
545 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
546 			   BLOCK_SIZE_MASK) + 3;
547 
548 	host->wait_for = MMCIF_WAIT_FOR_READ;
549 	schedule_delayed_work(&host->timeout_work, host->timeout);
550 
551 	/* buf read enable */
552 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
553 }
554 
sh_mmcif_read_block(struct sh_mmcif_host * host)555 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
556 {
557 	struct mmc_data *data = host->mrq->data;
558 	u32 *p = sg_virt(data->sg);
559 	int i;
560 
561 	if (host->sd_error) {
562 		data->error = sh_mmcif_error_manage(host);
563 		return false;
564 	}
565 
566 	for (i = 0; i < host->blocksize / 4; i++)
567 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
568 
569 	/* buffer read end */
570 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
571 	host->wait_for = MMCIF_WAIT_FOR_READ_END;
572 
573 	return true;
574 }
575 
sh_mmcif_multi_read(struct sh_mmcif_host * host,struct mmc_request * mrq)576 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
577 				struct mmc_request *mrq)
578 {
579 	struct mmc_data *data = mrq->data;
580 
581 	if (!data->sg_len || !data->sg->length)
582 		return;
583 
584 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
585 		BLOCK_SIZE_MASK;
586 
587 	host->wait_for = MMCIF_WAIT_FOR_MREAD;
588 	host->sg_idx = 0;
589 	host->sg_blkidx = 0;
590 	host->pio_ptr = sg_virt(data->sg);
591 	schedule_delayed_work(&host->timeout_work, host->timeout);
592 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
593 }
594 
sh_mmcif_mread_block(struct sh_mmcif_host * host)595 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
596 {
597 	struct mmc_data *data = host->mrq->data;
598 	u32 *p = host->pio_ptr;
599 	int i;
600 
601 	if (host->sd_error) {
602 		data->error = sh_mmcif_error_manage(host);
603 		return false;
604 	}
605 
606 	BUG_ON(!data->sg->length);
607 
608 	for (i = 0; i < host->blocksize / 4; i++)
609 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
610 
611 	if (!sh_mmcif_next_block(host, p))
612 		return false;
613 
614 	schedule_delayed_work(&host->timeout_work, host->timeout);
615 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
616 
617 	return true;
618 }
619 
sh_mmcif_single_write(struct sh_mmcif_host * host,struct mmc_request * mrq)620 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
621 					struct mmc_request *mrq)
622 {
623 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
624 			   BLOCK_SIZE_MASK) + 3;
625 
626 	host->wait_for = MMCIF_WAIT_FOR_WRITE;
627 	schedule_delayed_work(&host->timeout_work, host->timeout);
628 
629 	/* buf write enable */
630 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
631 }
632 
sh_mmcif_write_block(struct sh_mmcif_host * host)633 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
634 {
635 	struct mmc_data *data = host->mrq->data;
636 	u32 *p = sg_virt(data->sg);
637 	int i;
638 
639 	if (host->sd_error) {
640 		data->error = sh_mmcif_error_manage(host);
641 		return false;
642 	}
643 
644 	for (i = 0; i < host->blocksize / 4; i++)
645 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
646 
647 	/* buffer write end */
648 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
649 	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
650 
651 	return true;
652 }
653 
sh_mmcif_multi_write(struct sh_mmcif_host * host,struct mmc_request * mrq)654 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
655 				struct mmc_request *mrq)
656 {
657 	struct mmc_data *data = mrq->data;
658 
659 	if (!data->sg_len || !data->sg->length)
660 		return;
661 
662 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
663 		BLOCK_SIZE_MASK;
664 
665 	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
666 	host->sg_idx = 0;
667 	host->sg_blkidx = 0;
668 	host->pio_ptr = sg_virt(data->sg);
669 	schedule_delayed_work(&host->timeout_work, host->timeout);
670 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
671 }
672 
sh_mmcif_mwrite_block(struct sh_mmcif_host * host)673 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
674 {
675 	struct mmc_data *data = host->mrq->data;
676 	u32 *p = host->pio_ptr;
677 	int i;
678 
679 	if (host->sd_error) {
680 		data->error = sh_mmcif_error_manage(host);
681 		return false;
682 	}
683 
684 	BUG_ON(!data->sg->length);
685 
686 	for (i = 0; i < host->blocksize / 4; i++)
687 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
688 
689 	if (!sh_mmcif_next_block(host, p))
690 		return false;
691 
692 	schedule_delayed_work(&host->timeout_work, host->timeout);
693 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
694 
695 	return true;
696 }
697 
sh_mmcif_get_response(struct sh_mmcif_host * host,struct mmc_command * cmd)698 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
699 						struct mmc_command *cmd)
700 {
701 	if (cmd->flags & MMC_RSP_136) {
702 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
703 		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
704 		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
705 		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
706 	} else
707 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
708 }
709 
sh_mmcif_get_cmd12response(struct sh_mmcif_host * host,struct mmc_command * cmd)710 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
711 						struct mmc_command *cmd)
712 {
713 	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
714 }
715 
sh_mmcif_set_cmd(struct sh_mmcif_host * host,struct mmc_request * mrq)716 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
717 			    struct mmc_request *mrq)
718 {
719 	struct mmc_data *data = mrq->data;
720 	struct mmc_command *cmd = mrq->cmd;
721 	u32 opc = cmd->opcode;
722 	u32 tmp = 0;
723 
724 	/* Response Type check */
725 	switch (mmc_resp_type(cmd)) {
726 	case MMC_RSP_NONE:
727 		tmp |= CMD_SET_RTYP_NO;
728 		break;
729 	case MMC_RSP_R1:
730 	case MMC_RSP_R1B:
731 	case MMC_RSP_R3:
732 		tmp |= CMD_SET_RTYP_6B;
733 		break;
734 	case MMC_RSP_R2:
735 		tmp |= CMD_SET_RTYP_17B;
736 		break;
737 	default:
738 		dev_err(&host->pd->dev, "Unsupported response type.\n");
739 		break;
740 	}
741 	switch (opc) {
742 	/* RBSY */
743 	case MMC_SWITCH:
744 	case MMC_STOP_TRANSMISSION:
745 	case MMC_SET_WRITE_PROT:
746 	case MMC_CLR_WRITE_PROT:
747 	case MMC_ERASE:
748 	case MMC_GEN_CMD:
749 		tmp |= CMD_SET_RBSY;
750 		break;
751 	}
752 	/* WDAT / DATW */
753 	if (data) {
754 		tmp |= CMD_SET_WDAT;
755 		switch (host->bus_width) {
756 		case MMC_BUS_WIDTH_1:
757 			tmp |= CMD_SET_DATW_1;
758 			break;
759 		case MMC_BUS_WIDTH_4:
760 			tmp |= CMD_SET_DATW_4;
761 			break;
762 		case MMC_BUS_WIDTH_8:
763 			tmp |= CMD_SET_DATW_8;
764 			break;
765 		default:
766 			dev_err(&host->pd->dev, "Unsupported bus width.\n");
767 			break;
768 		}
769 	}
770 	/* DWEN */
771 	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
772 		tmp |= CMD_SET_DWEN;
773 	/* CMLTE/CMD12EN */
774 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
775 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
776 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
777 				data->blocks << 16);
778 	}
779 	/* RIDXC[1:0] check bits */
780 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
781 	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
782 		tmp |= CMD_SET_RIDXC_BITS;
783 	/* RCRC7C[1:0] check bits */
784 	if (opc == MMC_SEND_OP_COND)
785 		tmp |= CMD_SET_CRC7C_BITS;
786 	/* RCRC7C[1:0] internal CRC7 */
787 	if (opc == MMC_ALL_SEND_CID ||
788 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
789 		tmp |= CMD_SET_CRC7C_INTERNAL;
790 
791 	return (opc << 24) | tmp;
792 }
793 
sh_mmcif_data_trans(struct sh_mmcif_host * host,struct mmc_request * mrq,u32 opc)794 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
795 			       struct mmc_request *mrq, u32 opc)
796 {
797 	switch (opc) {
798 	case MMC_READ_MULTIPLE_BLOCK:
799 		sh_mmcif_multi_read(host, mrq);
800 		return 0;
801 	case MMC_WRITE_MULTIPLE_BLOCK:
802 		sh_mmcif_multi_write(host, mrq);
803 		return 0;
804 	case MMC_WRITE_BLOCK:
805 		sh_mmcif_single_write(host, mrq);
806 		return 0;
807 	case MMC_READ_SINGLE_BLOCK:
808 	case MMC_SEND_EXT_CSD:
809 		sh_mmcif_single_read(host, mrq);
810 		return 0;
811 	default:
812 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
813 		return -EINVAL;
814 	}
815 }
816 
sh_mmcif_start_cmd(struct sh_mmcif_host * host,struct mmc_request * mrq)817 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
818 			       struct mmc_request *mrq)
819 {
820 	struct mmc_command *cmd = mrq->cmd;
821 	u32 opc = cmd->opcode;
822 	u32 mask;
823 
824 	switch (opc) {
825 	/* response busy check */
826 	case MMC_SWITCH:
827 	case MMC_STOP_TRANSMISSION:
828 	case MMC_SET_WRITE_PROT:
829 	case MMC_CLR_WRITE_PROT:
830 	case MMC_ERASE:
831 	case MMC_GEN_CMD:
832 		mask = MASK_START_CMD | MASK_MRBSYE;
833 		break;
834 	default:
835 		mask = MASK_START_CMD | MASK_MCRSPE;
836 		break;
837 	}
838 
839 	if (mrq->data) {
840 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
841 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
842 				mrq->data->blksz);
843 	}
844 	opc = sh_mmcif_set_cmd(host, mrq);
845 
846 	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
847 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
848 	/* set arg */
849 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
850 	/* set cmd */
851 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
852 
853 	host->wait_for = MMCIF_WAIT_FOR_CMD;
854 	schedule_delayed_work(&host->timeout_work, host->timeout);
855 }
856 
sh_mmcif_stop_cmd(struct sh_mmcif_host * host,struct mmc_request * mrq)857 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
858 			      struct mmc_request *mrq)
859 {
860 	switch (mrq->cmd->opcode) {
861 	case MMC_READ_MULTIPLE_BLOCK:
862 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
863 		break;
864 	case MMC_WRITE_MULTIPLE_BLOCK:
865 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
866 		break;
867 	default:
868 		dev_err(&host->pd->dev, "unsupported stop cmd\n");
869 		mrq->stop->error = sh_mmcif_error_manage(host);
870 		return;
871 	}
872 
873 	host->wait_for = MMCIF_WAIT_FOR_STOP;
874 	schedule_delayed_work(&host->timeout_work, host->timeout);
875 }
876 
sh_mmcif_request(struct mmc_host * mmc,struct mmc_request * mrq)877 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
878 {
879 	struct sh_mmcif_host *host = mmc_priv(mmc);
880 	unsigned long flags;
881 
882 	spin_lock_irqsave(&host->lock, flags);
883 	if (host->state != STATE_IDLE) {
884 		spin_unlock_irqrestore(&host->lock, flags);
885 		mrq->cmd->error = -EAGAIN;
886 		mmc_request_done(mmc, mrq);
887 		return;
888 	}
889 
890 	host->state = STATE_REQUEST;
891 	spin_unlock_irqrestore(&host->lock, flags);
892 
893 	switch (mrq->cmd->opcode) {
894 	/* MMCIF does not support SD/SDIO command */
895 	case SD_IO_SEND_OP_COND:
896 	case MMC_APP_CMD:
897 		host->state = STATE_IDLE;
898 		mrq->cmd->error = -ETIMEDOUT;
899 		mmc_request_done(mmc, mrq);
900 		return;
901 	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
902 		if (!mrq->data) {
903 			/* send_if_cond cmd (not support) */
904 			host->state = STATE_IDLE;
905 			mrq->cmd->error = -ETIMEDOUT;
906 			mmc_request_done(mmc, mrq);
907 			return;
908 		}
909 		break;
910 	default:
911 		break;
912 	}
913 
914 	host->mrq = mrq;
915 
916 	sh_mmcif_start_cmd(host, mrq);
917 }
918 
sh_mmcif_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)919 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
920 {
921 	struct sh_mmcif_host *host = mmc_priv(mmc);
922 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
923 	unsigned long flags;
924 
925 	spin_lock_irqsave(&host->lock, flags);
926 	if (host->state != STATE_IDLE) {
927 		spin_unlock_irqrestore(&host->lock, flags);
928 		return;
929 	}
930 
931 	host->state = STATE_IOS;
932 	spin_unlock_irqrestore(&host->lock, flags);
933 
934 	if (ios->power_mode == MMC_POWER_UP) {
935 		if (!host->card_present) {
936 			/* See if we also get DMA */
937 			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
938 			host->card_present = true;
939 		}
940 	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
941 		/* clock stop */
942 		sh_mmcif_clock_control(host, 0);
943 		if (ios->power_mode == MMC_POWER_OFF) {
944 			if (host->card_present) {
945 				sh_mmcif_release_dma(host);
946 				host->card_present = false;
947 			}
948 		}
949 		if (host->power) {
950 			pm_runtime_put(&host->pd->dev);
951 			host->power = false;
952 			if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
953 				p->down_pwr(host->pd);
954 		}
955 		host->state = STATE_IDLE;
956 		return;
957 	}
958 
959 	if (ios->clock) {
960 		if (!host->power) {
961 			if (p->set_pwr)
962 				p->set_pwr(host->pd, ios->power_mode);
963 			pm_runtime_get_sync(&host->pd->dev);
964 			host->power = true;
965 			sh_mmcif_sync_reset(host);
966 		}
967 		sh_mmcif_clock_control(host, ios->clock);
968 	}
969 
970 	host->bus_width = ios->bus_width;
971 	host->state = STATE_IDLE;
972 }
973 
sh_mmcif_get_cd(struct mmc_host * mmc)974 static int sh_mmcif_get_cd(struct mmc_host *mmc)
975 {
976 	struct sh_mmcif_host *host = mmc_priv(mmc);
977 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
978 
979 	if (!p->get_cd)
980 		return -ENOSYS;
981 	else
982 		return p->get_cd(host->pd);
983 }
984 
985 static struct mmc_host_ops sh_mmcif_ops = {
986 	.request	= sh_mmcif_request,
987 	.set_ios	= sh_mmcif_set_ios,
988 	.get_cd		= sh_mmcif_get_cd,
989 };
990 
sh_mmcif_end_cmd(struct sh_mmcif_host * host)991 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
992 {
993 	struct mmc_command *cmd = host->mrq->cmd;
994 	struct mmc_data *data = host->mrq->data;
995 	long time;
996 
997 	if (host->sd_error) {
998 		switch (cmd->opcode) {
999 		case MMC_ALL_SEND_CID:
1000 		case MMC_SELECT_CARD:
1001 		case MMC_APP_CMD:
1002 			cmd->error = -ETIMEDOUT;
1003 			host->sd_error = false;
1004 			break;
1005 		default:
1006 			cmd->error = sh_mmcif_error_manage(host);
1007 			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
1008 				cmd->opcode, cmd->error);
1009 			break;
1010 		}
1011 		return false;
1012 	}
1013 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
1014 		cmd->error = 0;
1015 		return false;
1016 	}
1017 
1018 	sh_mmcif_get_response(host, cmd);
1019 
1020 	if (!data)
1021 		return false;
1022 
1023 	if (data->flags & MMC_DATA_READ) {
1024 		if (host->chan_rx)
1025 			sh_mmcif_start_dma_rx(host);
1026 	} else {
1027 		if (host->chan_tx)
1028 			sh_mmcif_start_dma_tx(host);
1029 	}
1030 
1031 	if (!host->dma_active) {
1032 		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1033 		if (!data->error)
1034 			return true;
1035 		return false;
1036 	}
1037 
1038 	/* Running in the IRQ thread, can sleep */
1039 	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1040 							 host->timeout);
1041 	if (host->sd_error) {
1042 		dev_err(host->mmc->parent,
1043 			"Error IRQ while waiting for DMA completion!\n");
1044 		/* Woken up by an error IRQ: abort DMA */
1045 		if (data->flags & MMC_DATA_READ)
1046 			dmaengine_terminate_all(host->chan_rx);
1047 		else
1048 			dmaengine_terminate_all(host->chan_tx);
1049 		data->error = sh_mmcif_error_manage(host);
1050 	} else if (!time) {
1051 		data->error = -ETIMEDOUT;
1052 	} else if (time < 0) {
1053 		data->error = time;
1054 	}
1055 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1056 			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1057 	host->dma_active = false;
1058 
1059 	if (data->error)
1060 		data->bytes_xfered = 0;
1061 
1062 	return false;
1063 }
1064 
sh_mmcif_irqt(int irq,void * dev_id)1065 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1066 {
1067 	struct sh_mmcif_host *host = dev_id;
1068 	struct mmc_request *mrq = host->mrq;
1069 	struct mmc_data *data = mrq->data;
1070 
1071 	cancel_delayed_work_sync(&host->timeout_work);
1072 
1073 	/*
1074 	 * All handlers return true, if processing continues, and false, if the
1075 	 * request has to be completed - successfully or not
1076 	 */
1077 	switch (host->wait_for) {
1078 	case MMCIF_WAIT_FOR_REQUEST:
1079 		/* We're too late, the timeout has already kicked in */
1080 		return IRQ_HANDLED;
1081 	case MMCIF_WAIT_FOR_CMD:
1082 		if (sh_mmcif_end_cmd(host))
1083 			/* Wait for data */
1084 			return IRQ_HANDLED;
1085 		break;
1086 	case MMCIF_WAIT_FOR_MREAD:
1087 		if (sh_mmcif_mread_block(host))
1088 			/* Wait for more data */
1089 			return IRQ_HANDLED;
1090 		break;
1091 	case MMCIF_WAIT_FOR_READ:
1092 		if (sh_mmcif_read_block(host))
1093 			/* Wait for data end */
1094 			return IRQ_HANDLED;
1095 		break;
1096 	case MMCIF_WAIT_FOR_MWRITE:
1097 		if (sh_mmcif_mwrite_block(host))
1098 			/* Wait data to write */
1099 			return IRQ_HANDLED;
1100 		break;
1101 	case MMCIF_WAIT_FOR_WRITE:
1102 		if (sh_mmcif_write_block(host))
1103 			/* Wait for data end */
1104 			return IRQ_HANDLED;
1105 		break;
1106 	case MMCIF_WAIT_FOR_STOP:
1107 		if (host->sd_error) {
1108 			mrq->stop->error = sh_mmcif_error_manage(host);
1109 			break;
1110 		}
1111 		sh_mmcif_get_cmd12response(host, mrq->stop);
1112 		mrq->stop->error = 0;
1113 		break;
1114 	case MMCIF_WAIT_FOR_READ_END:
1115 	case MMCIF_WAIT_FOR_WRITE_END:
1116 		if (host->sd_error)
1117 			data->error = sh_mmcif_error_manage(host);
1118 		break;
1119 	default:
1120 		BUG();
1121 	}
1122 
1123 	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1124 		if (!mrq->cmd->error && data && !data->error)
1125 			data->bytes_xfered =
1126 				data->blocks * data->blksz;
1127 
1128 		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1129 			sh_mmcif_stop_cmd(host, mrq);
1130 			if (!mrq->stop->error)
1131 				return IRQ_HANDLED;
1132 		}
1133 	}
1134 
1135 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1136 	host->state = STATE_IDLE;
1137 	host->mrq = NULL;
1138 	mmc_request_done(host->mmc, mrq);
1139 
1140 	return IRQ_HANDLED;
1141 }
1142 
sh_mmcif_intr(int irq,void * dev_id)1143 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1144 {
1145 	struct sh_mmcif_host *host = dev_id;
1146 	u32 state;
1147 	int err = 0;
1148 
1149 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1150 
1151 	if (state & INT_ERR_STS) {
1152 		/* error interrupts - process first */
1153 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1154 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1155 		err = 1;
1156 	} else if (state & INT_RBSYE) {
1157 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1158 				~(INT_RBSYE | INT_CRSPE));
1159 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
1160 	} else if (state & INT_CRSPE) {
1161 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
1162 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
1163 	} else if (state & INT_BUFREN) {
1164 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
1165 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
1166 	} else if (state & INT_BUFWEN) {
1167 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
1168 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
1169 	} else if (state & INT_CMD12DRE) {
1170 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1171 			~(INT_CMD12DRE | INT_CMD12RBE |
1172 			  INT_CMD12CRE | INT_BUFRE));
1173 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
1174 	} else if (state & INT_BUFRE) {
1175 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
1176 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
1177 	} else if (state & INT_DTRANE) {
1178 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
1179 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
1180 	} else if (state & INT_CMD12RBE) {
1181 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1182 				~(INT_CMD12RBE | INT_CMD12CRE));
1183 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
1184 	} else {
1185 		dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
1186 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1187 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1188 		err = 1;
1189 	}
1190 	if (err) {
1191 		host->sd_error = true;
1192 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
1193 	}
1194 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1195 		if (!host->dma_active)
1196 			return IRQ_WAKE_THREAD;
1197 		else if (host->sd_error)
1198 			mmcif_dma_complete(host);
1199 	} else {
1200 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1201 	}
1202 
1203 	return IRQ_HANDLED;
1204 }
1205 
mmcif_timeout_work(struct work_struct * work)1206 static void mmcif_timeout_work(struct work_struct *work)
1207 {
1208 	struct delayed_work *d = container_of(work, struct delayed_work, work);
1209 	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1210 	struct mmc_request *mrq = host->mrq;
1211 
1212 	if (host->dying)
1213 		/* Don't run after mmc_remove_host() */
1214 		return;
1215 
1216 	/*
1217 	 * Handle races with cancel_delayed_work(), unless
1218 	 * cancel_delayed_work_sync() is used
1219 	 */
1220 	switch (host->wait_for) {
1221 	case MMCIF_WAIT_FOR_CMD:
1222 		mrq->cmd->error = sh_mmcif_error_manage(host);
1223 		break;
1224 	case MMCIF_WAIT_FOR_STOP:
1225 		mrq->stop->error = sh_mmcif_error_manage(host);
1226 		break;
1227 	case MMCIF_WAIT_FOR_MREAD:
1228 	case MMCIF_WAIT_FOR_MWRITE:
1229 	case MMCIF_WAIT_FOR_READ:
1230 	case MMCIF_WAIT_FOR_WRITE:
1231 	case MMCIF_WAIT_FOR_READ_END:
1232 	case MMCIF_WAIT_FOR_WRITE_END:
1233 		mrq->data->error = sh_mmcif_error_manage(host);
1234 		break;
1235 	default:
1236 		BUG();
1237 	}
1238 
1239 	host->state = STATE_IDLE;
1240 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1241 	host->mrq = NULL;
1242 	mmc_request_done(host->mmc, mrq);
1243 }
1244 
sh_mmcif_probe(struct platform_device * pdev)1245 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1246 {
1247 	int ret = 0, irq[2];
1248 	struct mmc_host *mmc;
1249 	struct sh_mmcif_host *host;
1250 	struct sh_mmcif_plat_data *pd;
1251 	struct resource *res;
1252 	void __iomem *reg;
1253 	char clk_name[8];
1254 
1255 	irq[0] = platform_get_irq(pdev, 0);
1256 	irq[1] = platform_get_irq(pdev, 1);
1257 	if (irq[0] < 0 || irq[1] < 0) {
1258 		dev_err(&pdev->dev, "Get irq error\n");
1259 		return -ENXIO;
1260 	}
1261 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1262 	if (!res) {
1263 		dev_err(&pdev->dev, "platform_get_resource error.\n");
1264 		return -ENXIO;
1265 	}
1266 	reg = ioremap(res->start, resource_size(res));
1267 	if (!reg) {
1268 		dev_err(&pdev->dev, "ioremap error.\n");
1269 		return -ENOMEM;
1270 	}
1271 	pd = pdev->dev.platform_data;
1272 	if (!pd) {
1273 		dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1274 		ret = -ENXIO;
1275 		goto clean_up;
1276 	}
1277 	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1278 	if (!mmc) {
1279 		ret = -ENOMEM;
1280 		goto clean_up;
1281 	}
1282 	host		= mmc_priv(mmc);
1283 	host->mmc	= mmc;
1284 	host->addr	= reg;
1285 	host->timeout	= 1000;
1286 
1287 	snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1288 	host->hclk = clk_get(&pdev->dev, clk_name);
1289 	if (IS_ERR(host->hclk)) {
1290 		dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1291 		ret = PTR_ERR(host->hclk);
1292 		goto clean_up1;
1293 	}
1294 	clk_enable(host->hclk);
1295 	host->clk = clk_get_rate(host->hclk);
1296 	host->pd = pdev;
1297 
1298 	spin_lock_init(&host->lock);
1299 
1300 	mmc->ops = &sh_mmcif_ops;
1301 	mmc->f_max = host->clk;
1302 	/* close to 400KHz */
1303 	if (mmc->f_max < 51200000)
1304 		mmc->f_min = mmc->f_max / 128;
1305 	else if (mmc->f_max < 102400000)
1306 		mmc->f_min = mmc->f_max / 256;
1307 	else
1308 		mmc->f_min = mmc->f_max / 512;
1309 	if (pd->ocr)
1310 		mmc->ocr_avail = pd->ocr;
1311 	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1312 	if (pd->caps)
1313 		mmc->caps |= pd->caps;
1314 	mmc->max_segs = 32;
1315 	mmc->max_blk_size = 512;
1316 	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1317 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1318 	mmc->max_seg_size = mmc->max_req_size;
1319 
1320 	sh_mmcif_sync_reset(host);
1321 	platform_set_drvdata(pdev, host);
1322 
1323 	pm_runtime_enable(&pdev->dev);
1324 	host->power = false;
1325 
1326 	ret = pm_runtime_resume(&pdev->dev);
1327 	if (ret < 0)
1328 		goto clean_up2;
1329 
1330 	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1331 
1332 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1333 
1334 	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
1335 	if (ret) {
1336 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1337 		goto clean_up3;
1338 	}
1339 	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
1340 	if (ret) {
1341 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1342 		goto clean_up4;
1343 	}
1344 
1345 	ret = mmc_add_host(mmc);
1346 	if (ret < 0)
1347 		goto clean_up5;
1348 
1349 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1350 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1351 		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1352 	return ret;
1353 
1354 clean_up5:
1355 	free_irq(irq[1], host);
1356 clean_up4:
1357 	free_irq(irq[0], host);
1358 clean_up3:
1359 	pm_runtime_suspend(&pdev->dev);
1360 clean_up2:
1361 	pm_runtime_disable(&pdev->dev);
1362 	clk_disable(host->hclk);
1363 clean_up1:
1364 	mmc_free_host(mmc);
1365 clean_up:
1366 	if (reg)
1367 		iounmap(reg);
1368 	return ret;
1369 }
1370 
sh_mmcif_remove(struct platform_device * pdev)1371 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1372 {
1373 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1374 	int irq[2];
1375 
1376 	host->dying = true;
1377 	pm_runtime_get_sync(&pdev->dev);
1378 
1379 	mmc_remove_host(host->mmc);
1380 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1381 
1382 	/*
1383 	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1384 	 * mmc_remove_host() call above. But swapping order doesn't help either
1385 	 * (a query on the linux-mmc mailing list didn't bring any replies).
1386 	 */
1387 	cancel_delayed_work_sync(&host->timeout_work);
1388 
1389 	if (host->addr)
1390 		iounmap(host->addr);
1391 
1392 	irq[0] = platform_get_irq(pdev, 0);
1393 	irq[1] = platform_get_irq(pdev, 1);
1394 
1395 	free_irq(irq[0], host);
1396 	free_irq(irq[1], host);
1397 
1398 	platform_set_drvdata(pdev, NULL);
1399 
1400 	clk_disable(host->hclk);
1401 	mmc_free_host(host->mmc);
1402 	pm_runtime_put_sync(&pdev->dev);
1403 	pm_runtime_disable(&pdev->dev);
1404 
1405 	return 0;
1406 }
1407 
1408 #ifdef CONFIG_PM
sh_mmcif_suspend(struct device * dev)1409 static int sh_mmcif_suspend(struct device *dev)
1410 {
1411 	struct platform_device *pdev = to_platform_device(dev);
1412 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1413 	int ret = mmc_suspend_host(host->mmc);
1414 
1415 	if (!ret) {
1416 		sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1417 		clk_disable(host->hclk);
1418 	}
1419 
1420 	return ret;
1421 }
1422 
sh_mmcif_resume(struct device * dev)1423 static int sh_mmcif_resume(struct device *dev)
1424 {
1425 	struct platform_device *pdev = to_platform_device(dev);
1426 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1427 
1428 	clk_enable(host->hclk);
1429 
1430 	return mmc_resume_host(host->mmc);
1431 }
1432 #else
1433 #define sh_mmcif_suspend	NULL
1434 #define sh_mmcif_resume		NULL
1435 #endif	/* CONFIG_PM */
1436 
1437 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1438 	.suspend = sh_mmcif_suspend,
1439 	.resume = sh_mmcif_resume,
1440 };
1441 
1442 static struct platform_driver sh_mmcif_driver = {
1443 	.probe		= sh_mmcif_probe,
1444 	.remove		= sh_mmcif_remove,
1445 	.driver		= {
1446 		.name	= DRIVER_NAME,
1447 		.pm	= &sh_mmcif_dev_pm_ops,
1448 	},
1449 };
1450 
1451 module_platform_driver(sh_mmcif_driver);
1452 
1453 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1454 MODULE_LICENSE("GPL");
1455 MODULE_ALIAS("platform:" DRIVER_NAME);
1456 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1457