1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include "stmmac.h"
19 #include "stmmac_fpe.h"
20 #include "stmmac_pcs.h"
21 #include "stmmac_vlan.h"
22 #include "dwmac4.h"
23 #include "dwmac5.h"
24
dwmac4_pcs_init(struct stmmac_priv * priv)25 static int dwmac4_pcs_init(struct stmmac_priv *priv)
26 {
27 if (!priv->dma_cap.pcs)
28 return 0;
29
30 return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE,
31 GMAC_INT_PCS_LINK | GMAC_INT_PCS_ANE);
32 }
33
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)34 static void dwmac4_core_init(struct mac_device_info *hw,
35 struct net_device *dev)
36 {
37 struct stmmac_priv *priv = netdev_priv(dev);
38 void __iomem *ioaddr = hw->pcsr;
39 unsigned long clk_rate;
40 u32 value;
41
42 value = readl(ioaddr + GMAC_CONFIG);
43 writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONFIG);
44
45 /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
46 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
47 writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
48
49 /* Enable GMAC interrupts */
50 writel(GMAC_INT_DEFAULT_ENABLE, ioaddr + GMAC_INT_EN);
51
52 if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
53 init_waitqueue_head(&priv->tstamp_busy_wait);
54 }
55
dwmac4_irq_modify(struct mac_device_info * hw,u32 disable,u32 enable)56 static void dwmac4_irq_modify(struct mac_device_info *hw, u32 disable,
57 u32 enable)
58 {
59 void __iomem *int_mask = hw->pcsr + GMAC_INT_EN;
60 unsigned long flags;
61 u32 value;
62
63 spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
64 value = readl(int_mask) & ~disable;
65 value |= enable;
66 writel(value, int_mask);
67 spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
68 }
69
dwmac4_update_caps(struct stmmac_priv * priv)70 static void dwmac4_update_caps(struct stmmac_priv *priv)
71 {
72 if (priv->plat->tx_queues_to_use > 1)
73 priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
74 else
75 priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
76 }
77
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)78 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
79 u8 mode, u32 queue)
80 {
81 void __iomem *ioaddr = hw->pcsr;
82 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
83
84 value &= GMAC_RX_QUEUE_CLEAR(queue);
85 if (mode == MTL_QUEUE_AVB)
86 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
87 else if (mode == MTL_QUEUE_DCB)
88 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
89
90 writel(value, ioaddr + GMAC_RXQ_CTRL0);
91 }
92
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)93 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
94 u32 prio, u32 queue)
95 {
96 void __iomem *ioaddr = hw->pcsr;
97 u32 clear_mask = 0;
98 u32 ctrl2, ctrl3;
99 int i;
100
101 ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
102 ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
103
104 /* The software must ensure that the same priority
105 * is not mapped to multiple Rx queues
106 */
107 for (i = 0; i < 4; i++)
108 clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
109 GMAC_RXQCTRL_PSRQX_MASK(i));
110
111 ctrl2 &= ~clear_mask;
112 ctrl3 &= ~clear_mask;
113
114 /* First assign new priorities to a queue, then
115 * clear them from others queues
116 */
117 if (queue < 4) {
118 ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
119 GMAC_RXQCTRL_PSRQX_MASK(queue);
120
121 writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
122 writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
123 } else {
124 queue -= 4;
125
126 ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
127 GMAC_RXQCTRL_PSRQX_MASK(queue);
128
129 writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
130 writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
131 }
132 }
133
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)134 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
135 u32 prio, u32 queue)
136 {
137 void __iomem *ioaddr = hw->pcsr;
138 u32 base_register;
139 u32 value;
140
141 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
142 if (queue >= 4)
143 queue -= 4;
144
145 value = readl(ioaddr + base_register);
146
147 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
148 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
149 GMAC_TXQCTRL_PSTQX_MASK(queue);
150
151 writel(value, ioaddr + base_register);
152 }
153
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)154 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
155 u8 packet, u32 queue)
156 {
157 void __iomem *ioaddr = hw->pcsr;
158 u32 value;
159
160 static const struct stmmac_rx_routing route_possibilities[] = {
161 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
162 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
163 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
164 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
165 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
166 };
167
168 value = readl(ioaddr + GMAC_RXQ_CTRL1);
169
170 /* routing configuration */
171 value &= ~route_possibilities[packet - 1].reg_mask;
172 value |= (queue << route_possibilities[packet-1].reg_shift) &
173 route_possibilities[packet - 1].reg_mask;
174
175 /* some packets require extra ops */
176 if (packet == PACKET_AVCPQ) {
177 value &= ~GMAC_RXQCTRL_TACPQE;
178 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
179 } else if (packet == PACKET_MCBCQ) {
180 value &= ~GMAC_RXQCTRL_MCBCQEN;
181 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
182 }
183
184 writel(value, ioaddr + GMAC_RXQ_CTRL1);
185 }
186
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)187 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
188 u32 rx_alg)
189 {
190 void __iomem *ioaddr = hw->pcsr;
191 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
192
193 value &= ~MTL_OPERATION_RAA;
194 switch (rx_alg) {
195 case MTL_RX_ALGORITHM_SP:
196 value |= MTL_OPERATION_RAA_SP;
197 break;
198 case MTL_RX_ALGORITHM_WSP:
199 value |= MTL_OPERATION_RAA_WSP;
200 break;
201 default:
202 break;
203 }
204
205 writel(value, ioaddr + MTL_OPERATION_MODE);
206 }
207
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)208 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
209 u32 tx_alg)
210 {
211 void __iomem *ioaddr = hw->pcsr;
212 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
213
214 value &= ~MTL_OPERATION_SCHALG_MASK;
215 switch (tx_alg) {
216 case MTL_TX_ALGORITHM_WRR:
217 value |= MTL_OPERATION_SCHALG_WRR;
218 break;
219 case MTL_TX_ALGORITHM_WFQ:
220 value |= MTL_OPERATION_SCHALG_WFQ;
221 break;
222 case MTL_TX_ALGORITHM_DWRR:
223 value |= MTL_OPERATION_SCHALG_DWRR;
224 break;
225 case MTL_TX_ALGORITHM_SP:
226 value |= MTL_OPERATION_SCHALG_SP;
227 break;
228 default:
229 break;
230 }
231
232 writel(value, ioaddr + MTL_OPERATION_MODE);
233 }
234
dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)235 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
236 struct mac_device_info *hw,
237 u32 weight, u32 queue)
238 {
239 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
240 void __iomem *ioaddr = hw->pcsr;
241 u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
242 queue));
243
244 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
245 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
246 writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
247 }
248
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)249 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
250 {
251 void __iomem *ioaddr = hw->pcsr;
252 u32 value;
253
254 if (queue < 4) {
255 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
256 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
257 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
258 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
259 } else {
260 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
261 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
262 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
263 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
264 }
265 }
266
dwmac4_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)267 static void dwmac4_config_cbs(struct stmmac_priv *priv,
268 struct mac_device_info *hw,
269 u32 send_slope, u32 idle_slope,
270 u32 high_credit, u32 low_credit, u32 queue)
271 {
272 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
273 void __iomem *ioaddr = hw->pcsr;
274 u32 value;
275
276 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
277 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
278 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
279 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
280 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
281
282 /* enable AV algorithm */
283 value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
284 value |= MTL_ETS_CTRL_AVALG;
285 value |= MTL_ETS_CTRL_CC;
286 writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
287
288 /* configure send slope */
289 value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
290 queue));
291 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
292 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
293 writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
294 queue));
295
296 /* configure idle slope (same register as tx weight) */
297 dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
298
299 /* configure high credit */
300 value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
301 value &= ~MTL_HIGH_CRED_HC_MASK;
302 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
303 writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
304
305 /* configure high credit */
306 value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
307 value &= ~MTL_HIGH_CRED_LC_MASK;
308 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
309 writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
310 }
311
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)312 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
313 {
314 void __iomem *ioaddr = hw->pcsr;
315 int i;
316
317 for (i = 0; i < GMAC_REG_NUM; i++)
318 reg_space[i] = readl(ioaddr + i * 4);
319 }
320
dwmac4_rx_ipc_enable(struct mac_device_info * hw)321 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
322 {
323 void __iomem *ioaddr = hw->pcsr;
324 u32 value = readl(ioaddr + GMAC_CONFIG);
325
326 if (hw->rx_csum)
327 value |= GMAC_CONFIG_IPC;
328 else
329 value &= ~GMAC_CONFIG_IPC;
330
331 writel(value, ioaddr + GMAC_CONFIG);
332
333 value = readl(ioaddr + GMAC_CONFIG);
334
335 return !!(value & GMAC_CONFIG_IPC);
336 }
337
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)338 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
339 {
340 void __iomem *ioaddr = hw->pcsr;
341 unsigned int pmt = 0;
342 u32 config;
343
344 if (mode & WAKE_MAGIC) {
345 pr_debug("GMAC: WOL Magic frame\n");
346 pmt |= power_down | magic_pkt_en;
347 }
348 if (mode & WAKE_UCAST) {
349 pr_debug("GMAC: WOL on global unicast\n");
350 pmt |= power_down | global_unicast | wake_up_frame_en;
351 }
352
353 if (pmt) {
354 /* The receiver must be enabled for WOL before powering down */
355 config = readl(ioaddr + GMAC_CONFIG);
356 config |= GMAC_CONFIG_RE;
357 writel(config, ioaddr + GMAC_CONFIG);
358 }
359 writel(pmt, ioaddr + GMAC_PMT);
360 }
361
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)362 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
363 const unsigned char *addr, unsigned int reg_n)
364 {
365 void __iomem *ioaddr = hw->pcsr;
366
367 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
368 GMAC_ADDR_LOW(reg_n));
369 }
370
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)371 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
372 unsigned char *addr, unsigned int reg_n)
373 {
374 void __iomem *ioaddr = hw->pcsr;
375
376 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
377 GMAC_ADDR_LOW(reg_n));
378 }
379
dwmac4_set_lpi_mode(struct mac_device_info * hw,enum stmmac_lpi_mode mode,bool en_tx_lpi_clockgating,u32 et)380 static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
381 enum stmmac_lpi_mode mode,
382 bool en_tx_lpi_clockgating, u32 et)
383 {
384 void __iomem *ioaddr = hw->pcsr;
385 u32 value, mask;
386
387 if (mode == STMMAC_LPI_DISABLE) {
388 value = 0;
389 } else {
390 value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
391
392 if (mode == STMMAC_LPI_TIMER) {
393 /* Return ERANGE if the timer is larger than the
394 * register field.
395 */
396 if (et > STMMAC_ET_MAX)
397 return -ERANGE;
398
399 /* Set the hardware LPI entry timer */
400 writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
401
402 /* Interpret a zero LPI entry timer to mean
403 * immediate entry into LPI mode.
404 */
405 if (et)
406 value |= LPI_CTRL_STATUS_LPIATE;
407 }
408
409 if (en_tx_lpi_clockgating)
410 value |= LPI_CTRL_STATUS_LPITCSE;
411 }
412
413 mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
414 LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
415
416 value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
417 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
418
419 return 0;
420 }
421
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)422 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
423 {
424 void __iomem *ioaddr = hw->pcsr;
425 u32 value;
426
427 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
428
429 if (link)
430 value |= LPI_CTRL_STATUS_PLS;
431 else
432 value &= ~LPI_CTRL_STATUS_PLS;
433
434 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
435 }
436
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)437 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
438 {
439 void __iomem *ioaddr = hw->pcsr;
440 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
441
442 /* Program the timers in the LPI timer control register:
443 * LS: minimum time (ms) for which the link
444 * status from PHY should be ok before transmitting
445 * the LPI pattern.
446 * TW: minimum time (us) for which the core waits
447 * after it has stopped transmitting the LPI pattern.
448 */
449 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
450 }
451
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)452 static void dwmac4_set_filter(struct mac_device_info *hw,
453 struct net_device *dev)
454 {
455 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
456 int numhashregs = (hw->multicast_filter_bins >> 5);
457 int mcbitslog2 = hw->mcast_bits_log2;
458 unsigned int value;
459 u32 mc_filter[8];
460 int i;
461
462 memset(mc_filter, 0, sizeof(mc_filter));
463
464 value = readl(ioaddr + GMAC_PACKET_FILTER);
465 value &= ~GMAC_PACKET_FILTER_HMC;
466 value &= ~GMAC_PACKET_FILTER_HPF;
467 value &= ~GMAC_PACKET_FILTER_PCF;
468 value &= ~GMAC_PACKET_FILTER_PM;
469 value &= ~GMAC_PACKET_FILTER_PR;
470 value &= ~GMAC_PACKET_FILTER_RA;
471 if (dev->flags & IFF_PROMISC) {
472 /* VLAN Tag Filter Fail Packets Queuing */
473 if (hw->vlan_fail_q_en) {
474 value = readl(ioaddr + GMAC_RXQ_CTRL4);
475 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
476 value |= GMAC_RXQCTRL_VFFQE |
477 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
478 writel(value, ioaddr + GMAC_RXQ_CTRL4);
479 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
480 } else {
481 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
482 }
483
484 } else if ((dev->flags & IFF_ALLMULTI) ||
485 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
486 /* Pass all multi */
487 value |= GMAC_PACKET_FILTER_PM;
488 /* Set all the bits of the HASH tab */
489 memset(mc_filter, 0xff, sizeof(mc_filter));
490 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
491 struct netdev_hw_addr *ha;
492
493 /* Hash filter for multicast */
494 value |= GMAC_PACKET_FILTER_HMC;
495
496 netdev_for_each_mc_addr(ha, dev) {
497 /* The upper n bits of the calculated CRC are used to
498 * index the contents of the hash table. The number of
499 * bits used depends on the hardware configuration
500 * selected at core configuration time.
501 */
502 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
503 ETH_ALEN)) >> (32 - mcbitslog2);
504 /* The most significant bit determines the register to
505 * use (H/L) while the other 5 bits determine the bit
506 * within the register.
507 */
508 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
509 }
510 }
511
512 for (i = 0; i < numhashregs; i++)
513 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
514
515 value |= GMAC_PACKET_FILTER_HPF;
516
517 /* Handle multiple unicast addresses */
518 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
519 /* Switch to promiscuous mode if more than 128 addrs
520 * are required
521 */
522 value |= GMAC_PACKET_FILTER_PR;
523 } else {
524 struct netdev_hw_addr *ha;
525 int reg = 1;
526
527 netdev_for_each_uc_addr(ha, dev) {
528 dwmac4_set_umac_addr(hw, ha->addr, reg);
529 reg++;
530 }
531
532 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
533 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
534 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
535 reg++;
536 }
537 }
538
539 /* VLAN filtering */
540 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
541 value &= ~GMAC_PACKET_FILTER_VTFE;
542 else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
543 value |= GMAC_PACKET_FILTER_VTFE;
544
545 writel(value, ioaddr + GMAC_PACKET_FILTER);
546 }
547
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)548 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
549 unsigned int fc, unsigned int pause_time,
550 u32 tx_cnt)
551 {
552 void __iomem *ioaddr = hw->pcsr;
553 unsigned int flow = 0;
554 u32 queue = 0;
555
556 pr_debug("GMAC Flow-Control:\n");
557 if (fc & FLOW_RX) {
558 pr_debug("\tReceive Flow-Control ON\n");
559 flow |= GMAC_RX_FLOW_CTRL_RFE;
560 } else {
561 pr_debug("\tReceive Flow-Control OFF\n");
562 }
563 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
564
565 if (fc & FLOW_TX) {
566 pr_debug("\tTransmit Flow-Control ON\n");
567
568 if (duplex)
569 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
570
571 for (queue = 0; queue < tx_cnt; queue++) {
572 flow = GMAC_TX_FLOW_CTRL_TFE;
573
574 if (duplex)
575 flow |= FIELD_PREP(GMAC_TX_FLOW_CTRL_PT_MASK,
576 pause_time);
577
578 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
579 }
580 } else {
581 for (queue = 0; queue < tx_cnt; queue++)
582 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
583 }
584 }
585
dwmac4_ctrl_ane(struct stmmac_priv * priv,bool ane,bool srgmi_ral)586 static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral)
587 {
588 dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral);
589 }
590
dwmac4_irq_mtl_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)591 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
592 struct mac_device_info *hw, u32 chan)
593 {
594 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
595 void __iomem *ioaddr = hw->pcsr;
596 u32 mtl_int_qx_status;
597 int ret = 0;
598
599 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
600
601 /* Check MTL Interrupt */
602 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
603 /* read Queue x Interrupt status */
604 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
605 chan));
606
607 if (status & MTL_RX_OVERFLOW_INT) {
608 /* clear Interrupt */
609 writel(status | MTL_RX_OVERFLOW_INT,
610 ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
611 ret = CORE_IRQ_MTL_RX_OVERFLOW;
612 }
613 }
614
615 return ret;
616 }
617
dwmac4_irq_status(struct stmmac_priv * priv,struct stmmac_extra_stats * x)618 static int dwmac4_irq_status(struct stmmac_priv *priv,
619 struct stmmac_extra_stats *x)
620 {
621 void __iomem *ioaddr = priv->hw->pcsr;
622 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
623 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
624 int ret = 0;
625
626 /* Discard disabled bits */
627 intr_status &= intr_enable;
628
629 /* Not used events (e.g. MMC interrupts) are not handled. */
630 if ((intr_status & mmc_tx_irq))
631 x->mmc_tx_irq_n++;
632 if (unlikely(intr_status & mmc_rx_irq))
633 x->mmc_rx_irq_n++;
634 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
635 x->mmc_rx_csum_offload_irq_n++;
636 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
637 if (unlikely(intr_status & pmt_irq)) {
638 readl(ioaddr + GMAC_PMT);
639 x->irq_receive_pmt_irq_n++;
640 }
641
642 /* MAC tx/rx EEE LPI entry/exit interrupts */
643 if (intr_status & lpi_irq) {
644 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
645 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
646
647 if (status & LPI_CTRL_STATUS_TLPIEN) {
648 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
649 x->irq_tx_path_in_lpi_mode_n++;
650 }
651 if (status & LPI_CTRL_STATUS_TLPIEX) {
652 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
653 x->irq_tx_path_exit_lpi_mode_n++;
654 }
655 if (status & LPI_CTRL_STATUS_RLPIEN)
656 x->irq_rx_path_in_lpi_mode_n++;
657 if (status & LPI_CTRL_STATUS_RLPIEX)
658 x->irq_rx_path_exit_lpi_mode_n++;
659 }
660
661 if (intr_status & (PCS_ANE_IRQ | PCS_LINK_IRQ))
662 stmmac_integrated_pcs_irq(priv, intr_status, x);
663
664 return ret;
665 }
666
dwmac4_debug(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)667 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
668 struct stmmac_extra_stats *x,
669 u32 rx_queues, u32 tx_queues)
670 {
671 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
672 u32 value;
673 u32 queue;
674
675 for (queue = 0; queue < tx_queues; queue++) {
676 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
677
678 if (value & MTL_DEBUG_TXSTSFSTS)
679 x->mtl_tx_status_fifo_full++;
680 if (value & MTL_DEBUG_TXFSTS)
681 x->mtl_tx_fifo_not_empty++;
682 if (value & MTL_DEBUG_TWCSTS)
683 x->mmtl_fifo_ctrl++;
684 if (value & MTL_DEBUG_TRCSTS_MASK) {
685 u32 trcsts = FIELD_GET(MTL_DEBUG_TRCSTS_MASK, value);
686
687 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
688 x->mtl_tx_fifo_read_ctrl_write++;
689 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
690 x->mtl_tx_fifo_read_ctrl_wait++;
691 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
692 x->mtl_tx_fifo_read_ctrl_read++;
693 else
694 x->mtl_tx_fifo_read_ctrl_idle++;
695 }
696 if (value & MTL_DEBUG_TXPAUSED)
697 x->mac_tx_in_pause++;
698 }
699
700 for (queue = 0; queue < rx_queues; queue++) {
701 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
702
703 if (value & MTL_DEBUG_RXFSTS_MASK) {
704 u32 rxfsts = FIELD_GET(MTL_DEBUG_RXFSTS_MASK, value);
705
706 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
707 x->mtl_rx_fifo_fill_level_full++;
708 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
709 x->mtl_rx_fifo_fill_above_thresh++;
710 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
711 x->mtl_rx_fifo_fill_below_thresh++;
712 else
713 x->mtl_rx_fifo_fill_level_empty++;
714 }
715 if (value & MTL_DEBUG_RRCSTS_MASK) {
716 u32 rrcsts = FIELD_GET(MTL_DEBUG_RRCSTS_MASK, value);
717
718 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
719 x->mtl_rx_fifo_read_ctrl_flush++;
720 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
721 x->mtl_rx_fifo_read_ctrl_read_data++;
722 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
723 x->mtl_rx_fifo_read_ctrl_status++;
724 else
725 x->mtl_rx_fifo_read_ctrl_idle++;
726 }
727 if (value & MTL_DEBUG_RWCSTS)
728 x->mtl_rx_fifo_ctrl_active++;
729 }
730
731 /* GMAC debug */
732 value = readl(ioaddr + GMAC_DEBUG);
733
734 if (value & GMAC_DEBUG_TFCSTS_MASK) {
735 u32 tfcsts = FIELD_GET(GMAC_DEBUG_TFCSTS_MASK, value);
736
737 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
738 x->mac_tx_frame_ctrl_xfer++;
739 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
740 x->mac_tx_frame_ctrl_pause++;
741 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
742 x->mac_tx_frame_ctrl_wait++;
743 else
744 x->mac_tx_frame_ctrl_idle++;
745 }
746 if (value & GMAC_DEBUG_TPESTS)
747 x->mac_gmii_tx_proto_engine++;
748 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
749 x->mac_rx_frame_ctrl_fifo = FIELD_GET(GMAC_DEBUG_RFCFCSTS_MASK,
750 value);
751 if (value & GMAC_DEBUG_RPESTS)
752 x->mac_gmii_rx_proto_engine++;
753 }
754
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)755 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
756 {
757 u32 value = readl(ioaddr + GMAC_CONFIG);
758
759 if (enable)
760 value |= GMAC_CONFIG_LM;
761 else
762 value &= ~GMAC_CONFIG_LM;
763
764 writel(value, ioaddr + GMAC_CONFIG);
765 }
766
dwmac4_sarc_configure(void __iomem * ioaddr,int val)767 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
768 {
769 u32 value = readl(ioaddr + GMAC_CONFIG);
770
771 value = u32_replace_bits(value, val, GMAC_CONFIG_SARC);
772
773 writel(value, ioaddr + GMAC_CONFIG);
774 }
775
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)776 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
777 u32 addr)
778 {
779 void __iomem *ioaddr = hw->pcsr;
780 u32 value;
781
782 writel(addr, ioaddr + GMAC_ARP_ADDR);
783
784 value = readl(ioaddr + GMAC_CONFIG);
785 if (en)
786 value |= GMAC_CONFIG_ARPEN;
787 else
788 value &= ~GMAC_CONFIG_ARPEN;
789 writel(value, ioaddr + GMAC_CONFIG);
790 }
791
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)792 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
793 bool en, bool ipv6, bool sa, bool inv,
794 u32 match)
795 {
796 void __iomem *ioaddr = hw->pcsr;
797 u32 value;
798
799 value = readl(ioaddr + GMAC_PACKET_FILTER);
800 value |= GMAC_PACKET_FILTER_IPFE;
801 writel(value, ioaddr + GMAC_PACKET_FILTER);
802
803 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
804
805 /* For IPv6 not both SA/DA filters can be active */
806 if (ipv6) {
807 value |= GMAC_L3PEN0;
808 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
809 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
810 if (sa) {
811 value |= GMAC_L3SAM0;
812 if (inv)
813 value |= GMAC_L3SAIM0;
814 } else {
815 value |= GMAC_L3DAM0;
816 if (inv)
817 value |= GMAC_L3DAIM0;
818 }
819 } else {
820 value &= ~GMAC_L3PEN0;
821 if (sa) {
822 value |= GMAC_L3SAM0;
823 if (inv)
824 value |= GMAC_L3SAIM0;
825 } else {
826 value |= GMAC_L3DAM0;
827 if (inv)
828 value |= GMAC_L3DAIM0;
829 }
830 }
831
832 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
833
834 if (sa) {
835 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
836 } else {
837 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
838 }
839
840 if (!en)
841 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
842
843 return 0;
844 }
845
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)846 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
847 bool en, bool udp, bool sa, bool inv,
848 u32 match)
849 {
850 void __iomem *ioaddr = hw->pcsr;
851 u32 value;
852
853 value = readl(ioaddr + GMAC_PACKET_FILTER);
854 value |= GMAC_PACKET_FILTER_IPFE;
855 writel(value, ioaddr + GMAC_PACKET_FILTER);
856
857 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
858 if (udp) {
859 value |= GMAC_L4PEN0;
860 } else {
861 value &= ~GMAC_L4PEN0;
862 }
863
864 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
865 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
866 if (sa) {
867 value |= GMAC_L4SPM0;
868 if (inv)
869 value |= GMAC_L4SPIM0;
870 } else {
871 value |= GMAC_L4DPM0;
872 if (inv)
873 value |= GMAC_L4DPIM0;
874 }
875
876 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
877
878 if (sa) {
879 value = FIELD_PREP(GMAC_L4SP0, match);
880 } else {
881 value = FIELD_PREP(GMAC_L4DP0, match);
882 }
883
884 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
885
886 if (!en)
887 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
888
889 return 0;
890 }
891
892 const struct stmmac_ops dwmac4_ops = {
893 .pcs_init = dwmac4_pcs_init,
894 .core_init = dwmac4_core_init,
895 .irq_modify = dwmac4_irq_modify,
896 .update_caps = dwmac4_update_caps,
897 .set_mac = stmmac_set_mac,
898 .rx_ipc = dwmac4_rx_ipc_enable,
899 .rx_queue_enable = dwmac4_rx_queue_enable,
900 .rx_queue_prio = dwmac4_rx_queue_priority,
901 .tx_queue_prio = dwmac4_tx_queue_priority,
902 .rx_queue_routing = dwmac4_rx_queue_routing,
903 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
904 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
905 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
906 .map_mtl_to_dma = dwmac4_map_mtl_dma,
907 .config_cbs = dwmac4_config_cbs,
908 .dump_regs = dwmac4_dump_regs,
909 .host_irq_status = dwmac4_irq_status,
910 .host_mtl_irq_status = dwmac4_irq_mtl_status,
911 .flow_ctrl = dwmac4_flow_ctrl,
912 .pmt = dwmac4_pmt,
913 .set_umac_addr = dwmac4_set_umac_addr,
914 .get_umac_addr = dwmac4_get_umac_addr,
915 .set_lpi_mode = dwmac4_set_lpi_mode,
916 .set_eee_timer = dwmac4_set_eee_timer,
917 .set_eee_pls = dwmac4_set_eee_pls,
918 .pcs_ctrl_ane = dwmac4_ctrl_ane,
919 .debug = dwmac4_debug,
920 .set_filter = dwmac4_set_filter,
921 .set_mac_loopback = dwmac4_set_mac_loopback,
922 .sarc_configure = dwmac4_sarc_configure,
923 .set_arp_offload = dwmac4_set_arp_offload,
924 .config_l3_filter = dwmac4_config_l3_filter,
925 .config_l4_filter = dwmac4_config_l4_filter,
926 };
927
928 const struct stmmac_ops dwmac410_ops = {
929 .pcs_init = dwmac4_pcs_init,
930 .core_init = dwmac4_core_init,
931 .irq_modify = dwmac4_irq_modify,
932 .update_caps = dwmac4_update_caps,
933 .set_mac = stmmac_dwmac4_set_mac,
934 .rx_ipc = dwmac4_rx_ipc_enable,
935 .rx_queue_enable = dwmac4_rx_queue_enable,
936 .rx_queue_prio = dwmac4_rx_queue_priority,
937 .tx_queue_prio = dwmac4_tx_queue_priority,
938 .rx_queue_routing = dwmac4_rx_queue_routing,
939 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
940 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
941 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
942 .map_mtl_to_dma = dwmac4_map_mtl_dma,
943 .config_cbs = dwmac4_config_cbs,
944 .dump_regs = dwmac4_dump_regs,
945 .host_irq_status = dwmac4_irq_status,
946 .host_mtl_irq_status = dwmac4_irq_mtl_status,
947 .flow_ctrl = dwmac4_flow_ctrl,
948 .pmt = dwmac4_pmt,
949 .set_umac_addr = dwmac4_set_umac_addr,
950 .get_umac_addr = dwmac4_get_umac_addr,
951 .set_lpi_mode = dwmac4_set_lpi_mode,
952 .set_eee_timer = dwmac4_set_eee_timer,
953 .set_eee_pls = dwmac4_set_eee_pls,
954 .pcs_ctrl_ane = dwmac4_ctrl_ane,
955 .debug = dwmac4_debug,
956 .set_filter = dwmac4_set_filter,
957 .flex_pps_config = dwmac5_flex_pps_config,
958 .set_mac_loopback = dwmac4_set_mac_loopback,
959 .sarc_configure = dwmac4_sarc_configure,
960 .set_arp_offload = dwmac4_set_arp_offload,
961 .config_l3_filter = dwmac4_config_l3_filter,
962 .config_l4_filter = dwmac4_config_l4_filter,
963 .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
964 };
965
966 const struct stmmac_ops dwmac510_ops = {
967 .pcs_init = dwmac4_pcs_init,
968 .core_init = dwmac4_core_init,
969 .irq_modify = dwmac4_irq_modify,
970 .update_caps = dwmac4_update_caps,
971 .set_mac = stmmac_dwmac4_set_mac,
972 .rx_ipc = dwmac4_rx_ipc_enable,
973 .rx_queue_enable = dwmac4_rx_queue_enable,
974 .rx_queue_prio = dwmac4_rx_queue_priority,
975 .tx_queue_prio = dwmac4_tx_queue_priority,
976 .rx_queue_routing = dwmac4_rx_queue_routing,
977 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
978 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
979 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
980 .map_mtl_to_dma = dwmac4_map_mtl_dma,
981 .config_cbs = dwmac4_config_cbs,
982 .dump_regs = dwmac4_dump_regs,
983 .host_irq_status = dwmac4_irq_status,
984 .host_mtl_irq_status = dwmac4_irq_mtl_status,
985 .flow_ctrl = dwmac4_flow_ctrl,
986 .pmt = dwmac4_pmt,
987 .set_umac_addr = dwmac4_set_umac_addr,
988 .get_umac_addr = dwmac4_get_umac_addr,
989 .set_lpi_mode = dwmac4_set_lpi_mode,
990 .set_eee_timer = dwmac4_set_eee_timer,
991 .set_eee_pls = dwmac4_set_eee_pls,
992 .pcs_ctrl_ane = dwmac4_ctrl_ane,
993 .debug = dwmac4_debug,
994 .set_filter = dwmac4_set_filter,
995 .safety_feat_config = dwmac5_safety_feat_config,
996 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
997 .safety_feat_dump = dwmac5_safety_feat_dump,
998 .rxp_config = dwmac5_rxp_config,
999 .flex_pps_config = dwmac5_flex_pps_config,
1000 .set_mac_loopback = dwmac4_set_mac_loopback,
1001 .sarc_configure = dwmac4_sarc_configure,
1002 .set_arp_offload = dwmac4_set_arp_offload,
1003 .config_l3_filter = dwmac4_config_l3_filter,
1004 .config_l4_filter = dwmac4_config_l4_filter,
1005 .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1006 };
1007
dwmac4_setup(struct stmmac_priv * priv)1008 int dwmac4_setup(struct stmmac_priv *priv)
1009 {
1010 struct mac_device_info *mac = priv->hw;
1011
1012 dev_info(priv->device, "\tDWMAC4/5\n");
1013
1014 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1015 mac->pcsr = priv->ioaddr;
1016 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1017 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1018 mac->mcast_bits_log2 = 0;
1019
1020 if (mac->multicast_filter_bins)
1021 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1022
1023 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1024 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
1025 mac->link.duplex = GMAC_CONFIG_DM;
1026 mac->link.speed10 = GMAC_CONFIG_PS;
1027 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1028 mac->link.speed1000 = 0;
1029 mac->link.speed2500 = GMAC_CONFIG_FES;
1030 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1031 mac->mii.addr = GMAC_MDIO_ADDR;
1032 mac->mii.data = GMAC_MDIO_DATA;
1033 mac->mii.addr_shift = 21;
1034 mac->mii.addr_mask = GENMASK(25, 21);
1035 mac->mii.reg_shift = 16;
1036 mac->mii.reg_mask = GENMASK(20, 16);
1037 mac->mii.clk_csr_shift = 8;
1038 mac->mii.clk_csr_mask = GENMASK(11, 8);
1039 mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
1040
1041 return 0;
1042 }
1043