1 /*
2  * Special handling for DW core on Intel MID platform
3  *
4  * Copyright (c) 2009, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/interrupt.h>
23 #include <linux/slab.h>
24 #include <linux/spi/spi.h>
25 
26 #include "spi-dw.h"
27 
28 #ifdef CONFIG_SPI_DW_MID_DMA
29 #include <linux/intel_mid_dma.h>
30 #include <linux/pci.h>
31 
32 struct mid_dma {
33 	struct intel_mid_dma_slave	dmas_tx;
34 	struct intel_mid_dma_slave	dmas_rx;
35 };
36 
mid_spi_dma_chan_filter(struct dma_chan * chan,void * param)37 static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
38 {
39 	struct dw_spi *dws = param;
40 
41 	return dws->dmac && (&dws->dmac->dev == chan->device->dev);
42 }
43 
mid_spi_dma_init(struct dw_spi * dws)44 static int mid_spi_dma_init(struct dw_spi *dws)
45 {
46 	struct mid_dma *dw_dma = dws->dma_priv;
47 	struct intel_mid_dma_slave *rxs, *txs;
48 	dma_cap_mask_t mask;
49 
50 	/*
51 	 * Get pci device for DMA controller, currently it could only
52 	 * be the DMA controller of either Moorestown or Medfield
53 	 */
54 	dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
55 	if (!dws->dmac)
56 		dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
57 
58 	dma_cap_zero(mask);
59 	dma_cap_set(DMA_SLAVE, mask);
60 
61 	/* 1. Init rx channel */
62 	dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
63 	if (!dws->rxchan)
64 		goto err_exit;
65 	rxs = &dw_dma->dmas_rx;
66 	rxs->hs_mode = LNW_DMA_HW_HS;
67 	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
68 	dws->rxchan->private = rxs;
69 
70 	/* 2. Init tx channel */
71 	dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
72 	if (!dws->txchan)
73 		goto free_rxchan;
74 	txs = &dw_dma->dmas_tx;
75 	txs->hs_mode = LNW_DMA_HW_HS;
76 	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
77 	dws->txchan->private = txs;
78 
79 	dws->dma_inited = 1;
80 	return 0;
81 
82 free_rxchan:
83 	dma_release_channel(dws->rxchan);
84 err_exit:
85 	return -1;
86 
87 }
88 
mid_spi_dma_exit(struct dw_spi * dws)89 static void mid_spi_dma_exit(struct dw_spi *dws)
90 {
91 	dma_release_channel(dws->txchan);
92 	dma_release_channel(dws->rxchan);
93 }
94 
95 /*
96  * dws->dma_chan_done is cleared before the dma transfer starts,
97  * callback for rx/tx channel will each increment it by 1.
98  * Reaching 2 means the whole spi transaction is done.
99  */
dw_spi_dma_done(void * arg)100 static void dw_spi_dma_done(void *arg)
101 {
102 	struct dw_spi *dws = arg;
103 
104 	if (++dws->dma_chan_done != 2)
105 		return;
106 	dw_spi_xfer_done(dws);
107 }
108 
mid_spi_dma_transfer(struct dw_spi * dws,int cs_change)109 static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
110 {
111 	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
112 	struct dma_chan *txchan, *rxchan;
113 	struct dma_slave_config txconf, rxconf;
114 	u16 dma_ctrl = 0;
115 
116 	/* 1. setup DMA related registers */
117 	if (cs_change) {
118 		spi_enable_chip(dws, 0);
119 		dw_writew(dws, DW_SPI_DMARDLR, 0xf);
120 		dw_writew(dws, DW_SPI_DMATDLR, 0x10);
121 		if (dws->tx_dma)
122 			dma_ctrl |= 0x2;
123 		if (dws->rx_dma)
124 			dma_ctrl |= 0x1;
125 		dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
126 		spi_enable_chip(dws, 1);
127 	}
128 
129 	dws->dma_chan_done = 0;
130 	txchan = dws->txchan;
131 	rxchan = dws->rxchan;
132 
133 	/* 2. Prepare the TX dma transfer */
134 	txconf.direction = DMA_MEM_TO_DEV;
135 	txconf.dst_addr = dws->dma_addr;
136 	txconf.dst_maxburst = LNW_DMA_MSIZE_16;
137 	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
138 	txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
139 
140 	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
141 				       (unsigned long) &txconf);
142 
143 	memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
144 	dws->tx_sgl.dma_address = dws->tx_dma;
145 	dws->tx_sgl.length = dws->len;
146 
147 	txdesc = txchan->device->device_prep_slave_sg(txchan,
148 				&dws->tx_sgl,
149 				1,
150 				DMA_MEM_TO_DEV,
151 				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
152 	txdesc->callback = dw_spi_dma_done;
153 	txdesc->callback_param = dws;
154 
155 	/* 3. Prepare the RX dma transfer */
156 	rxconf.direction = DMA_DEV_TO_MEM;
157 	rxconf.src_addr = dws->dma_addr;
158 	rxconf.src_maxburst = LNW_DMA_MSIZE_16;
159 	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
160 	rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
161 
162 	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
163 				       (unsigned long) &rxconf);
164 
165 	memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
166 	dws->rx_sgl.dma_address = dws->rx_dma;
167 	dws->rx_sgl.length = dws->len;
168 
169 	rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
170 				&dws->rx_sgl,
171 				1,
172 				DMA_DEV_TO_MEM,
173 				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
174 	rxdesc->callback = dw_spi_dma_done;
175 	rxdesc->callback_param = dws;
176 
177 	/* rx must be started before tx due to spi instinct */
178 	rxdesc->tx_submit(rxdesc);
179 	txdesc->tx_submit(txdesc);
180 	return 0;
181 }
182 
183 static struct dw_spi_dma_ops mid_dma_ops = {
184 	.dma_init	= mid_spi_dma_init,
185 	.dma_exit	= mid_spi_dma_exit,
186 	.dma_transfer	= mid_spi_dma_transfer,
187 };
188 #endif
189 
190 /* Some specific info for SPI0 controller on Moorestown */
191 
192 /* HW info for MRST CLk Control Unit, one 32b reg */
193 #define MRST_SPI_CLK_BASE	100000000	/* 100m */
194 #define MRST_CLK_SPI0_REG	0xff11d86c
195 #define CLK_SPI_BDIV_OFFSET	0
196 #define CLK_SPI_BDIV_MASK	0x00000007
197 #define CLK_SPI_CDIV_OFFSET	9
198 #define CLK_SPI_CDIV_MASK	0x00000e00
199 #define CLK_SPI_DISABLE_OFFSET	8
200 
dw_spi_mid_init(struct dw_spi * dws)201 int dw_spi_mid_init(struct dw_spi *dws)
202 {
203 	void __iomem *clk_reg;
204 	u32 clk_cdiv;
205 
206 	clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
207 	if (!clk_reg)
208 		return -ENOMEM;
209 
210 	/* get SPI controller operating freq info */
211 	clk_cdiv  = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
212 	dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
213 	iounmap(clk_reg);
214 
215 	dws->num_cs = 16;
216 	dws->fifo_len = 40;	/* FIFO has 40 words buffer */
217 
218 #ifdef CONFIG_SPI_DW_MID_DMA
219 	dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
220 	if (!dws->dma_priv)
221 		return -ENOMEM;
222 	dws->dma_ops = &mid_dma_ops;
223 #endif
224 	return 0;
225 }
226