1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.xx has been used for developing this code.
5 *
6 * This contains the functions to handle the dma.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/io.h>
14 #include "dwmac4.h"
15 #include "dwmac4_dma.h"
16 #include "stmmac.h"
17
dwmac4_dma_axi(void __iomem * ioaddr,struct stmmac_axi * axi)18 static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
19 {
20 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
21
22 pr_info("dwmac4: Master AXI performs %s burst length\n",
23 (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
24
25 if (axi->axi_lpi_en)
26 value |= DMA_AXI_EN_LPI;
27 if (axi->axi_xit_frm)
28 value |= DMA_AXI_LPI_XIT_FRM;
29
30 value = u32_replace_bits(value, axi->axi_wr_osr_lmt,
31 DMA_AXI_WR_OSR_LMT);
32 value = u32_replace_bits(value, axi->axi_rd_osr_lmt,
33 DMA_AXI_RD_OSR_LMT);
34
35 /* Depending on the UNDEF bit the Master AXI will perform any burst
36 * length according to the BLEN programmed (by default all BLEN are
37 * set). Note that the UNDEF bit is readonly, and is the inverse of
38 * Bus Mode bit 16.
39 */
40 value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
41
42 writel(value, ioaddr + DMA_SYS_BUS_MODE);
43 }
44
dwmac4_dma_init_rx_chan(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,dma_addr_t dma_rx_phy,u32 chan)45 static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv,
46 void __iomem *ioaddr,
47 struct stmmac_dma_cfg *dma_cfg,
48 dma_addr_t dma_rx_phy, u32 chan)
49 {
50 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
51 u32 value;
52 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
53
54 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
55 value = value | FIELD_PREP(DMA_CHAN_RX_CTRL_RXPBL_MASK, rxpbl);
56 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
57
58 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
59 writel(upper_32_bits(dma_rx_phy),
60 ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, chan));
61
62 writel(lower_32_bits(dma_rx_phy),
63 ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, chan));
64 }
65
dwmac4_dma_init_tx_chan(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,dma_addr_t dma_tx_phy,u32 chan)66 static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv,
67 void __iomem *ioaddr,
68 struct stmmac_dma_cfg *dma_cfg,
69 dma_addr_t dma_tx_phy, u32 chan)
70 {
71 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
72 u32 value;
73 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
74
75 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
76 value = value | FIELD_PREP(DMA_CHAN_TX_CTRL_TXPBL_MASK, txpbl);
77
78 /* Enable OSP to get best performance */
79 value |= DMA_CONTROL_OSP;
80
81 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
82
83 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
84 writel(upper_32_bits(dma_tx_phy),
85 ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, chan));
86
87 writel(lower_32_bits(dma_tx_phy),
88 ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, chan));
89 }
90
dwmac4_dma_init_channel(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,u32 chan)91 static void dwmac4_dma_init_channel(struct stmmac_priv *priv,
92 void __iomem *ioaddr,
93 struct stmmac_dma_cfg *dma_cfg, u32 chan)
94 {
95 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
96 u32 value;
97
98 /* common channel control register config */
99 value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
100 if (dma_cfg->pblx8)
101 value = value | DMA_CHAN_CTRL_PBLX8;
102 writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
103
104 /* Mask interrupts by writing to CSR7 */
105 writel(DMA_CHAN_INTR_DEFAULT_MASK,
106 ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
107 }
108
dwmac410_dma_init_channel(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,u32 chan)109 static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
110 void __iomem *ioaddr,
111 struct stmmac_dma_cfg *dma_cfg, u32 chan)
112 {
113 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
114 u32 value;
115
116 /* common channel control register config */
117 value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
118 if (dma_cfg->pblx8)
119 value = value | DMA_CHAN_CTRL_PBLX8;
120
121 writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
122
123 /* Mask interrupts by writing to CSR7 */
124 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
125 ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
126 }
127
dwmac4_dma_init(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg)128 static void dwmac4_dma_init(void __iomem *ioaddr,
129 struct stmmac_dma_cfg *dma_cfg)
130 {
131 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
132
133 /* Set the Fixed burst mode */
134 if (dma_cfg->fixed_burst)
135 value |= DMA_SYS_BUS_FB;
136
137 /* Mixed Burst has no effect when fb is set */
138 if (dma_cfg->mixed_burst)
139 value |= DMA_SYS_BUS_MB;
140
141 if (dma_cfg->aal)
142 value |= DMA_SYS_BUS_AAL;
143
144 if (dma_cfg->eame)
145 value |= DMA_SYS_BUS_EAME;
146
147 writel(value, ioaddr + DMA_SYS_BUS_MODE);
148
149 value = readl(ioaddr + DMA_BUS_MODE);
150
151 if (dma_cfg->multi_msi_en)
152 value = u32_replace_bits(value, DMA_BUS_MODE_INTM_MODE1,
153 DMA_BUS_MODE_INTM_MASK);
154
155 if (dma_cfg->dche)
156 value |= DMA_BUS_MODE_DCHE;
157
158 writel(value, ioaddr + DMA_BUS_MODE);
159
160 }
161
_dwmac4_dump_dma_regs(struct stmmac_priv * priv,void __iomem * ioaddr,u32 channel,u32 * reg_space)162 static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
163 void __iomem *ioaddr, u32 channel,
164 u32 *reg_space)
165 {
166 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
167 const struct dwmac4_addrs *default_addrs = NULL;
168
169 /* Purposely save the registers in the "normal" layout, regardless of
170 * platform modifications, to keep reg_space size constant
171 */
172 reg_space[DMA_CHAN_CONTROL(default_addrs, channel) / 4] =
173 readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, channel));
174 reg_space[DMA_CHAN_TX_CONTROL(default_addrs, channel) / 4] =
175 readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel));
176 reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] =
177 readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel));
178 reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] =
179 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel));
180 reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] =
181 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel));
182 reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] =
183 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel));
184 reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] =
185 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel));
186 reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] =
187 readl(ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, channel));
188 reg_space[DMA_CHAN_RX_END_ADDR(default_addrs, channel) / 4] =
189 readl(ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, channel));
190 reg_space[DMA_CHAN_TX_RING_LEN(default_addrs, channel) / 4] =
191 readl(ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, channel));
192 reg_space[DMA_CHAN_RX_RING_LEN(default_addrs, channel) / 4] =
193 readl(ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, channel));
194 reg_space[DMA_CHAN_INTR_ENA(default_addrs, channel) / 4] =
195 readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, channel));
196 reg_space[DMA_CHAN_RX_WATCHDOG(default_addrs, channel) / 4] =
197 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, channel));
198 reg_space[DMA_CHAN_SLOT_CTRL_STATUS(default_addrs, channel) / 4] =
199 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(dwmac4_addrs, channel));
200 reg_space[DMA_CHAN_CUR_TX_DESC(default_addrs, channel) / 4] =
201 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel));
202 reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] =
203 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel));
204 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] =
205 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel));
206 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] =
207 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel));
208 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] =
209 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel));
210 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] =
211 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel));
212 reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] =
213 readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, channel));
214 }
215
dwmac4_dump_dma_regs(struct stmmac_priv * priv,void __iomem * ioaddr,u32 * reg_space)216 static void dwmac4_dump_dma_regs(struct stmmac_priv *priv, void __iomem *ioaddr,
217 u32 *reg_space)
218 {
219 int i;
220
221 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
222 _dwmac4_dump_dma_regs(priv, ioaddr, i, reg_space);
223 }
224
dwmac4_rx_watchdog(struct stmmac_priv * priv,void __iomem * ioaddr,u32 riwt,u32 queue)225 static void dwmac4_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
226 u32 riwt, u32 queue)
227 {
228 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
229
230 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, queue));
231 }
232
dwmac4_dma_rx_chan_op_mode(struct stmmac_priv * priv,void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)233 static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
234 void __iomem *ioaddr, int mode,
235 u32 channel, int fifosz, u8 qmode)
236 {
237 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
238 unsigned int rqs = fifosz / 256 - 1;
239 u32 mtl_rx_op;
240
241 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
242
243 mtl_rx_op |= MTL_OP_MODE_DIS_TCP_EF;
244
245 if (mode == SF_DMA_MODE) {
246 pr_debug("GMAC: enable RX store and forward mode\n");
247 mtl_rx_op |= MTL_OP_MODE_RSF;
248 } else {
249 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
250 mtl_rx_op &= ~MTL_OP_MODE_RSF;
251 mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK;
252 if (mode <= 32)
253 mtl_rx_op |= MTL_OP_MODE_RTC_32;
254 else if (mode <= 64)
255 mtl_rx_op |= MTL_OP_MODE_RTC_64;
256 else if (mode <= 96)
257 mtl_rx_op |= MTL_OP_MODE_RTC_96;
258 else
259 mtl_rx_op |= MTL_OP_MODE_RTC_128;
260 }
261
262 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
263 mtl_rx_op |= FIELD_PREP(MTL_OP_MODE_RQS_MASK, rqs);
264
265 /* Enable flow control only if each channel gets 4 KiB or more FIFO and
266 * only if channel is not an AVB channel.
267 */
268 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
269 unsigned int rfd, rfa;
270
271 mtl_rx_op |= MTL_OP_MODE_EHFC;
272
273 /* Set Threshold for Activating Flow Control to min 2 frames,
274 * i.e. 1500 * 2 = 3000 bytes.
275 *
276 * Set Threshold for Deactivating Flow Control to min 1 frame,
277 * i.e. 1500 bytes.
278 */
279 switch (fifosz) {
280 case 4096:
281 /* This violates the above formula because of FIFO size
282 * limit therefore overflow may occur in spite of this.
283 */
284 rfd = 0x03; /* Full-2.5K */
285 rfa = 0x01; /* Full-1.5K */
286 break;
287
288 default:
289 rfd = 0x07; /* Full-4.5K */
290 rfa = 0x04; /* Full-3K */
291 break;
292 }
293
294 mtl_rx_op = u32_replace_bits(mtl_rx_op, rfd,
295 MTL_OP_MODE_RFD_MASK);
296 mtl_rx_op = u32_replace_bits(mtl_rx_op, rfa,
297 MTL_OP_MODE_RFA_MASK);
298 }
299
300 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
301 }
302
dwmac4_dma_tx_chan_op_mode(struct stmmac_priv * priv,void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)303 static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
304 void __iomem *ioaddr, int mode,
305 u32 channel, int fifosz, u8 qmode)
306 {
307 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
308 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs,
309 channel));
310 unsigned int tqs = fifosz / 256 - 1;
311
312 if (mode == SF_DMA_MODE) {
313 pr_debug("GMAC: enable TX store and forward mode\n");
314 /* Transmit COE type 2 cannot be done in cut-through mode. */
315 mtl_tx_op |= MTL_OP_MODE_TSF;
316 } else {
317 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
318 mtl_tx_op &= ~MTL_OP_MODE_TSF;
319 mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK;
320 /* Set the transmit threshold */
321 if (mode <= 32)
322 mtl_tx_op |= MTL_OP_MODE_TTC_32;
323 else if (mode <= 64)
324 mtl_tx_op |= MTL_OP_MODE_TTC_64;
325 else if (mode <= 96)
326 mtl_tx_op |= MTL_OP_MODE_TTC_96;
327 else if (mode <= 128)
328 mtl_tx_op |= MTL_OP_MODE_TTC_128;
329 else if (mode <= 192)
330 mtl_tx_op |= MTL_OP_MODE_TTC_192;
331 else if (mode <= 256)
332 mtl_tx_op |= MTL_OP_MODE_TTC_256;
333 else if (mode <= 384)
334 mtl_tx_op |= MTL_OP_MODE_TTC_384;
335 else
336 mtl_tx_op |= MTL_OP_MODE_TTC_512;
337 }
338 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
339 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
340 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
341 * with reset values: TXQEN off, TQS 256 bytes.
342 *
343 * TXQEN must be written for multi-channel operation and TQS must
344 * reflect the available fifo size per queue (total fifo size / number
345 * of enabled queues).
346 */
347 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
348 if (qmode != MTL_QUEUE_AVB)
349 mtl_tx_op |= MTL_OP_MODE_TXQEN;
350 else
351 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
352
353 mtl_tx_op = u32_replace_bits(mtl_tx_op, tqs, MTL_OP_MODE_TQS_MASK);
354
355 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
356 }
357
dwmac4_get_hw_feature(void __iomem * ioaddr,struct dma_features * dma_cap)358 static int dwmac4_get_hw_feature(void __iomem *ioaddr,
359 struct dma_features *dma_cap)
360 {
361 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
362
363 /* MAC HW feature0 */
364 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
365 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
366 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
367 dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
368 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
369 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
370 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
371 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
372 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
373 /* MMC */
374 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
375 /* IEEE 1588-2008 */
376 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
377 /* 802.3az - Energy-Efficient Ethernet (EEE) */
378 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
379 /* TX and RX csum */
380 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
381 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
382 dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27;
383 dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9;
384
385 dma_cap->actphyif = FIELD_GET(DMA_HW_FEAT_ACTPHYIF, hw_cap);
386
387 /* MAC HW feature1 */
388 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
389 dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
390 dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
391 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
392 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
393 dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
394
395 dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
396 switch (dma_cap->addr64) {
397 case 0:
398 dma_cap->addr64 = 32;
399 break;
400 case 1:
401 dma_cap->addr64 = 40;
402 break;
403 case 2:
404 dma_cap->addr64 = 48;
405 break;
406 default:
407 dma_cap->addr64 = 32;
408 break;
409 }
410
411 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
412 * shifting and store the sizes in bytes.
413 */
414 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
415 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
416 /* MAC HW feature2 */
417 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
418 /* TX and RX number of channels */
419 dma_cap->number_rx_channel =
420 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
421 dma_cap->number_tx_channel =
422 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
423 /* TX and RX number of queues */
424 dma_cap->number_rx_queues =
425 ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
426 dma_cap->number_tx_queues =
427 ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
428 /* PPS output */
429 dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24;
430
431 /* IEEE 1588-2002 */
432 dma_cap->time_stamp = 0;
433 /* Number of Auxiliary Snapshot Inputs */
434 dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28;
435
436 /* MAC HW feature3 */
437 hw_cap = readl(ioaddr + GMAC_HW_FEATURE3);
438
439 /* 5.10 Features */
440 dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
441 dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
442 dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
443 dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
444 dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
445 dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16;
446 dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
447 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
448 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
449 dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
450
451 return 0;
452 }
453
454 /* Enable/disable TSO feature and set MSS */
dwmac4_enable_tso(struct stmmac_priv * priv,void __iomem * ioaddr,bool en,u32 chan)455 static void dwmac4_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
456 bool en, u32 chan)
457 {
458 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
459 u32 value;
460
461 if (en) {
462 /* enable TSO */
463 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
464 writel(value | DMA_CONTROL_TSE,
465 ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
466 } else {
467 /* enable TSO */
468 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
469 writel(value & ~DMA_CONTROL_TSE,
470 ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
471 }
472 }
473
dwmac4_qmode(struct stmmac_priv * priv,void __iomem * ioaddr,u32 channel,u8 qmode)474 static void dwmac4_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
475 u32 channel, u8 qmode)
476 {
477 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
478 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs,
479 channel));
480
481 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
482 if (qmode != MTL_QUEUE_AVB)
483 mtl_tx_op |= MTL_OP_MODE_TXQEN;
484 else
485 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
486
487 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
488 }
489
dwmac4_set_bfsize(struct stmmac_priv * priv,void __iomem * ioaddr,int bfsize,u32 chan)490 static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
491 int bfsize, u32 chan)
492 {
493 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
494 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
495
496 value = u32_replace_bits(value, bfsize, DMA_RBSZ_MASK);
497
498 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
499 }
500
dwmac4_enable_sph(struct stmmac_priv * priv,void __iomem * ioaddr,bool en,u32 chan)501 static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
502 bool en, u32 chan)
503 {
504 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
505 u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
506
507 value &= ~GMAC_CONFIG_HDSMS;
508 value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
509 writel(value, ioaddr + GMAC_EXT_CONFIG);
510
511 value = readl(ioaddr + GMAC_EXT_CFG1);
512 value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */
513 value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */
514 writel(value, ioaddr + GMAC_EXT_CFG1);
515
516 value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
517 if (en)
518 value |= DMA_CONTROL_SPH;
519 else
520 value &= ~DMA_CONTROL_SPH;
521 writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
522 }
523
dwmac4_enable_tbs(struct stmmac_priv * priv,void __iomem * ioaddr,bool en,u32 chan)524 static int dwmac4_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
525 bool en, u32 chan)
526 {
527 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
528 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
529
530 if (en)
531 value |= DMA_CONTROL_EDSE;
532 else
533 value &= ~DMA_CONTROL_EDSE;
534
535 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
536
537 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs,
538 chan)) & DMA_CONTROL_EDSE;
539 if (en && !value)
540 return -EIO;
541
542 writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
543 return 0;
544 }
545
546 const struct stmmac_dma_ops dwmac4_dma_ops = {
547 .reset = dwmac4_dma_reset,
548 .init = dwmac4_dma_init,
549 .init_chan = dwmac4_dma_init_channel,
550 .init_rx_chan = dwmac4_dma_init_rx_chan,
551 .init_tx_chan = dwmac4_dma_init_tx_chan,
552 .axi = dwmac4_dma_axi,
553 .dump_regs = dwmac4_dump_dma_regs,
554 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
555 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
556 .enable_dma_irq = dwmac4_enable_dma_irq,
557 .disable_dma_irq = dwmac4_disable_dma_irq,
558 .start_tx = dwmac4_dma_start_tx,
559 .stop_tx = dwmac4_dma_stop_tx,
560 .start_rx = dwmac4_dma_start_rx,
561 .stop_rx = dwmac4_dma_stop_rx,
562 .dma_interrupt = dwmac4_dma_interrupt,
563 .get_hw_feature = dwmac4_get_hw_feature,
564 .rx_watchdog = dwmac4_rx_watchdog,
565 .set_rx_ring_len = dwmac4_set_rx_ring_len,
566 .set_tx_ring_len = dwmac4_set_tx_ring_len,
567 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
568 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
569 .enable_tso = dwmac4_enable_tso,
570 .qmode = dwmac4_qmode,
571 .set_bfsize = dwmac4_set_bfsize,
572 .enable_sph = dwmac4_enable_sph,
573 };
574
575 const struct stmmac_dma_ops dwmac410_dma_ops = {
576 .reset = dwmac4_dma_reset,
577 .init = dwmac4_dma_init,
578 .init_chan = dwmac410_dma_init_channel,
579 .init_rx_chan = dwmac4_dma_init_rx_chan,
580 .init_tx_chan = dwmac4_dma_init_tx_chan,
581 .axi = dwmac4_dma_axi,
582 .dump_regs = dwmac4_dump_dma_regs,
583 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
584 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
585 .enable_dma_irq = dwmac410_enable_dma_irq,
586 .disable_dma_irq = dwmac4_disable_dma_irq,
587 .start_tx = dwmac4_dma_start_tx,
588 .stop_tx = dwmac4_dma_stop_tx,
589 .start_rx = dwmac4_dma_start_rx,
590 .stop_rx = dwmac4_dma_stop_rx,
591 .dma_interrupt = dwmac4_dma_interrupt,
592 .get_hw_feature = dwmac4_get_hw_feature,
593 .rx_watchdog = dwmac4_rx_watchdog,
594 .set_rx_ring_len = dwmac4_set_rx_ring_len,
595 .set_tx_ring_len = dwmac4_set_tx_ring_len,
596 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
597 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
598 .enable_tso = dwmac4_enable_tso,
599 .qmode = dwmac4_qmode,
600 .set_bfsize = dwmac4_set_bfsize,
601 .enable_sph = dwmac4_enable_sph,
602 .enable_tbs = dwmac4_enable_tbs,
603 };
604