xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c (revision b803c4a4f78834b31ebfbbcea350473333760559)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include "stmmac.h"
19 #include "stmmac_fpe.h"
20 #include "stmmac_pcs.h"
21 #include "stmmac_vlan.h"
22 #include "dwmac4.h"
23 #include "dwmac5.h"
24 
25 static void dwmac4_core_init(struct mac_device_info *hw,
26 			     struct net_device *dev)
27 {
28 	struct stmmac_priv *priv = netdev_priv(dev);
29 	void __iomem *ioaddr = hw->pcsr;
30 	u32 value = readl(ioaddr + GMAC_CONFIG);
31 	unsigned long clk_rate;
32 
33 	value |= GMAC_CORE_INIT;
34 
35 	if (hw->ps) {
36 		value |= GMAC_CONFIG_TE;
37 
38 		value &= hw->link.speed_mask;
39 		switch (hw->ps) {
40 		case SPEED_1000:
41 			value |= hw->link.speed1000;
42 			break;
43 		case SPEED_100:
44 			value |= hw->link.speed100;
45 			break;
46 		case SPEED_10:
47 			value |= hw->link.speed10;
48 			break;
49 		}
50 	}
51 
52 	writel(value, ioaddr + GMAC_CONFIG);
53 
54 	/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
55 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
56 	writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
57 
58 	/* Enable GMAC interrupts */
59 	value = GMAC_INT_DEFAULT_ENABLE;
60 
61 	if (hw->pcs)
62 		value |= GMAC_PCS_IRQ_DEFAULT;
63 
64 	writel(value, ioaddr + GMAC_INT_EN);
65 
66 	if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
67 		init_waitqueue_head(&priv->tstamp_busy_wait);
68 }
69 
70 static void dwmac4_update_caps(struct stmmac_priv *priv)
71 {
72 	if (priv->plat->tx_queues_to_use > 1)
73 		priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
74 	else
75 		priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
76 }
77 
78 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
79 				   u8 mode, u32 queue)
80 {
81 	void __iomem *ioaddr = hw->pcsr;
82 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
83 
84 	value &= GMAC_RX_QUEUE_CLEAR(queue);
85 	if (mode == MTL_QUEUE_AVB)
86 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
87 	else if (mode == MTL_QUEUE_DCB)
88 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
89 
90 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
91 }
92 
93 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
94 				     u32 prio, u32 queue)
95 {
96 	void __iomem *ioaddr = hw->pcsr;
97 	u32 clear_mask = 0;
98 	u32 ctrl2, ctrl3;
99 	int i;
100 
101 	ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
102 	ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
103 
104 	/* The software must ensure that the same priority
105 	 * is not mapped to multiple Rx queues
106 	 */
107 	for (i = 0; i < 4; i++)
108 		clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
109 						GMAC_RXQCTRL_PSRQX_MASK(i));
110 
111 	ctrl2 &= ~clear_mask;
112 	ctrl3 &= ~clear_mask;
113 
114 	/* First assign new priorities to a queue, then
115 	 * clear them from others queues
116 	 */
117 	if (queue < 4) {
118 		ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
119 						GMAC_RXQCTRL_PSRQX_MASK(queue);
120 
121 		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
122 		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
123 	} else {
124 		queue -= 4;
125 
126 		ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
127 						GMAC_RXQCTRL_PSRQX_MASK(queue);
128 
129 		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
130 		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
131 	}
132 }
133 
134 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
135 				     u32 prio, u32 queue)
136 {
137 	void __iomem *ioaddr = hw->pcsr;
138 	u32 base_register;
139 	u32 value;
140 
141 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
142 	if (queue >= 4)
143 		queue -= 4;
144 
145 	value = readl(ioaddr + base_register);
146 
147 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
148 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
149 						GMAC_TXQCTRL_PSTQX_MASK(queue);
150 
151 	writel(value, ioaddr + base_register);
152 }
153 
154 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
155 				    u8 packet, u32 queue)
156 {
157 	void __iomem *ioaddr = hw->pcsr;
158 	u32 value;
159 
160 	static const struct stmmac_rx_routing route_possibilities[] = {
161 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
162 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
163 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
164 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
165 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
166 	};
167 
168 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
169 
170 	/* routing configuration */
171 	value &= ~route_possibilities[packet - 1].reg_mask;
172 	value |= (queue << route_possibilities[packet-1].reg_shift) &
173 		 route_possibilities[packet - 1].reg_mask;
174 
175 	/* some packets require extra ops */
176 	if (packet == PACKET_AVCPQ) {
177 		value &= ~GMAC_RXQCTRL_TACPQE;
178 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
179 	} else if (packet == PACKET_MCBCQ) {
180 		value &= ~GMAC_RXQCTRL_MCBCQEN;
181 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
182 	}
183 
184 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
185 }
186 
187 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
188 					  u32 rx_alg)
189 {
190 	void __iomem *ioaddr = hw->pcsr;
191 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
192 
193 	value &= ~MTL_OPERATION_RAA;
194 	switch (rx_alg) {
195 	case MTL_RX_ALGORITHM_SP:
196 		value |= MTL_OPERATION_RAA_SP;
197 		break;
198 	case MTL_RX_ALGORITHM_WSP:
199 		value |= MTL_OPERATION_RAA_WSP;
200 		break;
201 	default:
202 		break;
203 	}
204 
205 	writel(value, ioaddr + MTL_OPERATION_MODE);
206 }
207 
208 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
209 					  u32 tx_alg)
210 {
211 	void __iomem *ioaddr = hw->pcsr;
212 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
213 
214 	value &= ~MTL_OPERATION_SCHALG_MASK;
215 	switch (tx_alg) {
216 	case MTL_TX_ALGORITHM_WRR:
217 		value |= MTL_OPERATION_SCHALG_WRR;
218 		break;
219 	case MTL_TX_ALGORITHM_WFQ:
220 		value |= MTL_OPERATION_SCHALG_WFQ;
221 		break;
222 	case MTL_TX_ALGORITHM_DWRR:
223 		value |= MTL_OPERATION_SCHALG_DWRR;
224 		break;
225 	case MTL_TX_ALGORITHM_SP:
226 		value |= MTL_OPERATION_SCHALG_SP;
227 		break;
228 	default:
229 		break;
230 	}
231 
232 	writel(value, ioaddr + MTL_OPERATION_MODE);
233 }
234 
235 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
236 					   struct mac_device_info *hw,
237 					   u32 weight, u32 queue)
238 {
239 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
240 	void __iomem *ioaddr = hw->pcsr;
241 	u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
242 							     queue));
243 
244 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
245 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
246 	writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
247 }
248 
249 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
250 {
251 	void __iomem *ioaddr = hw->pcsr;
252 	u32 value;
253 
254 	if (queue < 4) {
255 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
256 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
257 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
258 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
259 	} else {
260 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
261 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
262 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
263 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
264 	}
265 }
266 
267 static void dwmac4_config_cbs(struct stmmac_priv *priv,
268 			      struct mac_device_info *hw,
269 			      u32 send_slope, u32 idle_slope,
270 			      u32 high_credit, u32 low_credit, u32 queue)
271 {
272 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
273 	void __iomem *ioaddr = hw->pcsr;
274 	u32 value;
275 
276 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
277 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
278 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
279 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
280 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
281 
282 	/* enable AV algorithm */
283 	value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
284 	value |= MTL_ETS_CTRL_AVALG;
285 	value |= MTL_ETS_CTRL_CC;
286 	writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
287 
288 	/* configure send slope */
289 	value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
290 							    queue));
291 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
292 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
293 	writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
294 							    queue));
295 
296 	/* configure idle slope (same register as tx weight) */
297 	dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
298 
299 	/* configure high credit */
300 	value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
301 	value &= ~MTL_HIGH_CRED_HC_MASK;
302 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
303 	writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
304 
305 	/* configure high credit */
306 	value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
307 	value &= ~MTL_HIGH_CRED_LC_MASK;
308 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
309 	writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
310 }
311 
312 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
313 {
314 	void __iomem *ioaddr = hw->pcsr;
315 	int i;
316 
317 	for (i = 0; i < GMAC_REG_NUM; i++)
318 		reg_space[i] = readl(ioaddr + i * 4);
319 }
320 
321 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
322 {
323 	void __iomem *ioaddr = hw->pcsr;
324 	u32 value = readl(ioaddr + GMAC_CONFIG);
325 
326 	if (hw->rx_csum)
327 		value |= GMAC_CONFIG_IPC;
328 	else
329 		value &= ~GMAC_CONFIG_IPC;
330 
331 	writel(value, ioaddr + GMAC_CONFIG);
332 
333 	value = readl(ioaddr + GMAC_CONFIG);
334 
335 	return !!(value & GMAC_CONFIG_IPC);
336 }
337 
338 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
339 {
340 	void __iomem *ioaddr = hw->pcsr;
341 	unsigned int pmt = 0;
342 	u32 config;
343 
344 	if (mode & WAKE_MAGIC) {
345 		pr_debug("GMAC: WOL Magic frame\n");
346 		pmt |= power_down | magic_pkt_en;
347 	}
348 	if (mode & WAKE_UCAST) {
349 		pr_debug("GMAC: WOL on global unicast\n");
350 		pmt |= power_down | global_unicast | wake_up_frame_en;
351 	}
352 
353 	if (pmt) {
354 		/* The receiver must be enabled for WOL before powering down */
355 		config = readl(ioaddr + GMAC_CONFIG);
356 		config |= GMAC_CONFIG_RE;
357 		writel(config, ioaddr + GMAC_CONFIG);
358 	}
359 	writel(pmt, ioaddr + GMAC_PMT);
360 }
361 
362 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
363 				 const unsigned char *addr, unsigned int reg_n)
364 {
365 	void __iomem *ioaddr = hw->pcsr;
366 
367 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
368 				   GMAC_ADDR_LOW(reg_n));
369 }
370 
371 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
372 				 unsigned char *addr, unsigned int reg_n)
373 {
374 	void __iomem *ioaddr = hw->pcsr;
375 
376 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
377 				   GMAC_ADDR_LOW(reg_n));
378 }
379 
380 static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
381 			       enum stmmac_lpi_mode mode,
382 			       bool en_tx_lpi_clockgating, u32 et)
383 {
384 	void __iomem *ioaddr = hw->pcsr;
385 	u32 value, mask;
386 
387 	if (mode == STMMAC_LPI_DISABLE) {
388 		value = 0;
389 	} else {
390 		value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
391 
392 		if (mode == STMMAC_LPI_TIMER) {
393 			/* Return ERANGE if the timer is larger than the
394 			 * register field.
395 			 */
396 			if (et > STMMAC_ET_MAX)
397 				return -ERANGE;
398 
399 			/* Set the hardware LPI entry timer */
400 			writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
401 
402 			/* Interpret a zero LPI entry timer to mean
403 			 * immediate entry into LPI mode.
404 			 */
405 			if (et)
406 				value |= LPI_CTRL_STATUS_LPIATE;
407 		}
408 
409 		if (en_tx_lpi_clockgating)
410 			value |= LPI_CTRL_STATUS_LPITCSE;
411 	}
412 
413 	mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
414 	       LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
415 
416 	value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
417 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
418 
419 	return 0;
420 }
421 
422 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
423 {
424 	void __iomem *ioaddr = hw->pcsr;
425 	u32 value;
426 
427 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
428 
429 	if (link)
430 		value |= LPI_CTRL_STATUS_PLS;
431 	else
432 		value &= ~LPI_CTRL_STATUS_PLS;
433 
434 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
435 }
436 
437 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
438 {
439 	void __iomem *ioaddr = hw->pcsr;
440 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
441 
442 	/* Program the timers in the LPI timer control register:
443 	 * LS: minimum time (ms) for which the link
444 	 *  status from PHY should be ok before transmitting
445 	 *  the LPI pattern.
446 	 * TW: minimum time (us) for which the core waits
447 	 *  after it has stopped transmitting the LPI pattern.
448 	 */
449 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
450 }
451 
452 static void dwmac4_set_filter(struct mac_device_info *hw,
453 			      struct net_device *dev)
454 {
455 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
456 	int numhashregs = (hw->multicast_filter_bins >> 5);
457 	int mcbitslog2 = hw->mcast_bits_log2;
458 	unsigned int value;
459 	u32 mc_filter[8];
460 	int i;
461 
462 	memset(mc_filter, 0, sizeof(mc_filter));
463 
464 	value = readl(ioaddr + GMAC_PACKET_FILTER);
465 	value &= ~GMAC_PACKET_FILTER_HMC;
466 	value &= ~GMAC_PACKET_FILTER_HPF;
467 	value &= ~GMAC_PACKET_FILTER_PCF;
468 	value &= ~GMAC_PACKET_FILTER_PM;
469 	value &= ~GMAC_PACKET_FILTER_PR;
470 	value &= ~GMAC_PACKET_FILTER_RA;
471 	if (dev->flags & IFF_PROMISC) {
472 		/* VLAN Tag Filter Fail Packets Queuing */
473 		if (hw->vlan_fail_q_en) {
474 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
475 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
476 			value |= GMAC_RXQCTRL_VFFQE |
477 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
478 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
479 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
480 		} else {
481 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
482 		}
483 
484 	} else if ((dev->flags & IFF_ALLMULTI) ||
485 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
486 		/* Pass all multi */
487 		value |= GMAC_PACKET_FILTER_PM;
488 		/* Set all the bits of the HASH tab */
489 		memset(mc_filter, 0xff, sizeof(mc_filter));
490 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
491 		struct netdev_hw_addr *ha;
492 
493 		/* Hash filter for multicast */
494 		value |= GMAC_PACKET_FILTER_HMC;
495 
496 		netdev_for_each_mc_addr(ha, dev) {
497 			/* The upper n bits of the calculated CRC are used to
498 			 * index the contents of the hash table. The number of
499 			 * bits used depends on the hardware configuration
500 			 * selected at core configuration time.
501 			 */
502 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
503 					ETH_ALEN)) >> (32 - mcbitslog2);
504 			/* The most significant bit determines the register to
505 			 * use (H/L) while the other 5 bits determine the bit
506 			 * within the register.
507 			 */
508 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
509 		}
510 	}
511 
512 	for (i = 0; i < numhashregs; i++)
513 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
514 
515 	value |= GMAC_PACKET_FILTER_HPF;
516 
517 	/* Handle multiple unicast addresses */
518 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
519 		/* Switch to promiscuous mode if more than 128 addrs
520 		 * are required
521 		 */
522 		value |= GMAC_PACKET_FILTER_PR;
523 	} else {
524 		struct netdev_hw_addr *ha;
525 		int reg = 1;
526 
527 		netdev_for_each_uc_addr(ha, dev) {
528 			dwmac4_set_umac_addr(hw, ha->addr, reg);
529 			reg++;
530 		}
531 
532 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
533 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
534 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
535 			reg++;
536 		}
537 	}
538 
539 	/* VLAN filtering */
540 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
541 		value &= ~GMAC_PACKET_FILTER_VTFE;
542 	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
543 		value |= GMAC_PACKET_FILTER_VTFE;
544 
545 	writel(value, ioaddr + GMAC_PACKET_FILTER);
546 }
547 
548 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
549 			     unsigned int fc, unsigned int pause_time,
550 			     u32 tx_cnt)
551 {
552 	void __iomem *ioaddr = hw->pcsr;
553 	unsigned int flow = 0;
554 	u32 queue = 0;
555 
556 	pr_debug("GMAC Flow-Control:\n");
557 	if (fc & FLOW_RX) {
558 		pr_debug("\tReceive Flow-Control ON\n");
559 		flow |= GMAC_RX_FLOW_CTRL_RFE;
560 	} else {
561 		pr_debug("\tReceive Flow-Control OFF\n");
562 	}
563 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
564 
565 	if (fc & FLOW_TX) {
566 		pr_debug("\tTransmit Flow-Control ON\n");
567 
568 		if (duplex)
569 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
570 
571 		for (queue = 0; queue < tx_cnt; queue++) {
572 			flow = GMAC_TX_FLOW_CTRL_TFE;
573 
574 			if (duplex)
575 				flow |=
576 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
577 
578 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
579 		}
580 	} else {
581 		for (queue = 0; queue < tx_cnt; queue++)
582 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
583 	}
584 }
585 
586 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
587 			    bool loopback)
588 {
589 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
590 }
591 
592 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
593 {
594 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
595 }
596 
597 /* RGMII or SMII interface */
598 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
599 {
600 	u32 status;
601 
602 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
603 	x->irq_rgmii_n++;
604 
605 	/* Check the link status */
606 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
607 		int speed_value;
608 
609 		x->pcs_link = 1;
610 
611 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
612 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
613 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
614 			x->pcs_speed = SPEED_1000;
615 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
616 			x->pcs_speed = SPEED_100;
617 		else
618 			x->pcs_speed = SPEED_10;
619 
620 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD);
621 
622 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
623 			x->pcs_duplex ? "Full" : "Half");
624 	} else {
625 		x->pcs_link = 0;
626 		pr_info("Link is Down\n");
627 	}
628 }
629 
630 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
631 				 struct mac_device_info *hw, u32 chan)
632 {
633 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
634 	void __iomem *ioaddr = hw->pcsr;
635 	u32 mtl_int_qx_status;
636 	int ret = 0;
637 
638 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
639 
640 	/* Check MTL Interrupt */
641 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
642 		/* read Queue x Interrupt status */
643 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
644 							      chan));
645 
646 		if (status & MTL_RX_OVERFLOW_INT) {
647 			/*  clear Interrupt */
648 			writel(status | MTL_RX_OVERFLOW_INT,
649 			       ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
650 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
651 		}
652 	}
653 
654 	return ret;
655 }
656 
657 static int dwmac4_irq_status(struct mac_device_info *hw,
658 			     struct stmmac_extra_stats *x)
659 {
660 	void __iomem *ioaddr = hw->pcsr;
661 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
662 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
663 	int ret = 0;
664 
665 	/* Discard disabled bits */
666 	intr_status &= intr_enable;
667 
668 	/* Not used events (e.g. MMC interrupts) are not handled. */
669 	if ((intr_status & mmc_tx_irq))
670 		x->mmc_tx_irq_n++;
671 	if (unlikely(intr_status & mmc_rx_irq))
672 		x->mmc_rx_irq_n++;
673 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
674 		x->mmc_rx_csum_offload_irq_n++;
675 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
676 	if (unlikely(intr_status & pmt_irq)) {
677 		readl(ioaddr + GMAC_PMT);
678 		x->irq_receive_pmt_irq_n++;
679 	}
680 
681 	/* MAC tx/rx EEE LPI entry/exit interrupts */
682 	if (intr_status & lpi_irq) {
683 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
684 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
685 
686 		if (status & LPI_CTRL_STATUS_TLPIEN) {
687 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
688 			x->irq_tx_path_in_lpi_mode_n++;
689 		}
690 		if (status & LPI_CTRL_STATUS_TLPIEX) {
691 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
692 			x->irq_tx_path_exit_lpi_mode_n++;
693 		}
694 		if (status & LPI_CTRL_STATUS_RLPIEN)
695 			x->irq_rx_path_in_lpi_mode_n++;
696 		if (status & LPI_CTRL_STATUS_RLPIEX)
697 			x->irq_rx_path_exit_lpi_mode_n++;
698 	}
699 
700 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
701 	if (intr_status & PCS_RGSMIIIS_IRQ)
702 		dwmac4_phystatus(ioaddr, x);
703 
704 	return ret;
705 }
706 
707 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
708 			 struct stmmac_extra_stats *x,
709 			 u32 rx_queues, u32 tx_queues)
710 {
711 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
712 	u32 value;
713 	u32 queue;
714 
715 	for (queue = 0; queue < tx_queues; queue++) {
716 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
717 
718 		if (value & MTL_DEBUG_TXSTSFSTS)
719 			x->mtl_tx_status_fifo_full++;
720 		if (value & MTL_DEBUG_TXFSTS)
721 			x->mtl_tx_fifo_not_empty++;
722 		if (value & MTL_DEBUG_TWCSTS)
723 			x->mmtl_fifo_ctrl++;
724 		if (value & MTL_DEBUG_TRCSTS_MASK) {
725 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
726 				     >> MTL_DEBUG_TRCSTS_SHIFT;
727 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
728 				x->mtl_tx_fifo_read_ctrl_write++;
729 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
730 				x->mtl_tx_fifo_read_ctrl_wait++;
731 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
732 				x->mtl_tx_fifo_read_ctrl_read++;
733 			else
734 				x->mtl_tx_fifo_read_ctrl_idle++;
735 		}
736 		if (value & MTL_DEBUG_TXPAUSED)
737 			x->mac_tx_in_pause++;
738 	}
739 
740 	for (queue = 0; queue < rx_queues; queue++) {
741 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
742 
743 		if (value & MTL_DEBUG_RXFSTS_MASK) {
744 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
745 				     >> MTL_DEBUG_RRCSTS_SHIFT;
746 
747 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
748 				x->mtl_rx_fifo_fill_level_full++;
749 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
750 				x->mtl_rx_fifo_fill_above_thresh++;
751 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
752 				x->mtl_rx_fifo_fill_below_thresh++;
753 			else
754 				x->mtl_rx_fifo_fill_level_empty++;
755 		}
756 		if (value & MTL_DEBUG_RRCSTS_MASK) {
757 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
758 				     MTL_DEBUG_RRCSTS_SHIFT;
759 
760 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
761 				x->mtl_rx_fifo_read_ctrl_flush++;
762 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
763 				x->mtl_rx_fifo_read_ctrl_read_data++;
764 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
765 				x->mtl_rx_fifo_read_ctrl_status++;
766 			else
767 				x->mtl_rx_fifo_read_ctrl_idle++;
768 		}
769 		if (value & MTL_DEBUG_RWCSTS)
770 			x->mtl_rx_fifo_ctrl_active++;
771 	}
772 
773 	/* GMAC debug */
774 	value = readl(ioaddr + GMAC_DEBUG);
775 
776 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
777 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
778 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
779 
780 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
781 			x->mac_tx_frame_ctrl_xfer++;
782 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
783 			x->mac_tx_frame_ctrl_pause++;
784 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
785 			x->mac_tx_frame_ctrl_wait++;
786 		else
787 			x->mac_tx_frame_ctrl_idle++;
788 	}
789 	if (value & GMAC_DEBUG_TPESTS)
790 		x->mac_gmii_tx_proto_engine++;
791 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
792 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
793 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
794 	if (value & GMAC_DEBUG_RPESTS)
795 		x->mac_gmii_rx_proto_engine++;
796 }
797 
798 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
799 {
800 	u32 value = readl(ioaddr + GMAC_CONFIG);
801 
802 	if (enable)
803 		value |= GMAC_CONFIG_LM;
804 	else
805 		value &= ~GMAC_CONFIG_LM;
806 
807 	writel(value, ioaddr + GMAC_CONFIG);
808 }
809 
810 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
811 {
812 	u32 value = readl(ioaddr + GMAC_CONFIG);
813 
814 	value &= ~GMAC_CONFIG_SARC;
815 	value |= val << GMAC_CONFIG_SARC_SHIFT;
816 
817 	writel(value, ioaddr + GMAC_CONFIG);
818 }
819 
820 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
821 				   u32 addr)
822 {
823 	void __iomem *ioaddr = hw->pcsr;
824 	u32 value;
825 
826 	writel(addr, ioaddr + GMAC_ARP_ADDR);
827 
828 	value = readl(ioaddr + GMAC_CONFIG);
829 	if (en)
830 		value |= GMAC_CONFIG_ARPEN;
831 	else
832 		value &= ~GMAC_CONFIG_ARPEN;
833 	writel(value, ioaddr + GMAC_CONFIG);
834 }
835 
836 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
837 				   bool en, bool ipv6, bool sa, bool inv,
838 				   u32 match)
839 {
840 	void __iomem *ioaddr = hw->pcsr;
841 	u32 value;
842 
843 	value = readl(ioaddr + GMAC_PACKET_FILTER);
844 	value |= GMAC_PACKET_FILTER_IPFE;
845 	writel(value, ioaddr + GMAC_PACKET_FILTER);
846 
847 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
848 
849 	/* For IPv6 not both SA/DA filters can be active */
850 	if (ipv6) {
851 		value |= GMAC_L3PEN0;
852 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
853 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
854 		if (sa) {
855 			value |= GMAC_L3SAM0;
856 			if (inv)
857 				value |= GMAC_L3SAIM0;
858 		} else {
859 			value |= GMAC_L3DAM0;
860 			if (inv)
861 				value |= GMAC_L3DAIM0;
862 		}
863 	} else {
864 		value &= ~GMAC_L3PEN0;
865 		if (sa) {
866 			value |= GMAC_L3SAM0;
867 			if (inv)
868 				value |= GMAC_L3SAIM0;
869 		} else {
870 			value |= GMAC_L3DAM0;
871 			if (inv)
872 				value |= GMAC_L3DAIM0;
873 		}
874 	}
875 
876 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
877 
878 	if (sa) {
879 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
880 	} else {
881 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
882 	}
883 
884 	if (!en)
885 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
886 
887 	return 0;
888 }
889 
890 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
891 				   bool en, bool udp, bool sa, bool inv,
892 				   u32 match)
893 {
894 	void __iomem *ioaddr = hw->pcsr;
895 	u32 value;
896 
897 	value = readl(ioaddr + GMAC_PACKET_FILTER);
898 	value |= GMAC_PACKET_FILTER_IPFE;
899 	writel(value, ioaddr + GMAC_PACKET_FILTER);
900 
901 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
902 	if (udp) {
903 		value |= GMAC_L4PEN0;
904 	} else {
905 		value &= ~GMAC_L4PEN0;
906 	}
907 
908 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
909 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
910 	if (sa) {
911 		value |= GMAC_L4SPM0;
912 		if (inv)
913 			value |= GMAC_L4SPIM0;
914 	} else {
915 		value |= GMAC_L4DPM0;
916 		if (inv)
917 			value |= GMAC_L4DPIM0;
918 	}
919 
920 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
921 
922 	if (sa) {
923 		value = match & GMAC_L4SP0;
924 	} else {
925 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
926 	}
927 
928 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
929 
930 	if (!en)
931 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
932 
933 	return 0;
934 }
935 
936 const struct stmmac_ops dwmac4_ops = {
937 	.core_init = dwmac4_core_init,
938 	.update_caps = dwmac4_update_caps,
939 	.set_mac = stmmac_set_mac,
940 	.rx_ipc = dwmac4_rx_ipc_enable,
941 	.rx_queue_enable = dwmac4_rx_queue_enable,
942 	.rx_queue_prio = dwmac4_rx_queue_priority,
943 	.tx_queue_prio = dwmac4_tx_queue_priority,
944 	.rx_queue_routing = dwmac4_rx_queue_routing,
945 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
946 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
947 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
948 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
949 	.config_cbs = dwmac4_config_cbs,
950 	.dump_regs = dwmac4_dump_regs,
951 	.host_irq_status = dwmac4_irq_status,
952 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
953 	.flow_ctrl = dwmac4_flow_ctrl,
954 	.pmt = dwmac4_pmt,
955 	.set_umac_addr = dwmac4_set_umac_addr,
956 	.get_umac_addr = dwmac4_get_umac_addr,
957 	.set_lpi_mode = dwmac4_set_lpi_mode,
958 	.set_eee_timer = dwmac4_set_eee_timer,
959 	.set_eee_pls = dwmac4_set_eee_pls,
960 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
961 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
962 	.debug = dwmac4_debug,
963 	.set_filter = dwmac4_set_filter,
964 	.set_mac_loopback = dwmac4_set_mac_loopback,
965 	.sarc_configure = dwmac4_sarc_configure,
966 	.set_arp_offload = dwmac4_set_arp_offload,
967 	.config_l3_filter = dwmac4_config_l3_filter,
968 	.config_l4_filter = dwmac4_config_l4_filter,
969 };
970 
971 const struct stmmac_ops dwmac410_ops = {
972 	.core_init = dwmac4_core_init,
973 	.update_caps = dwmac4_update_caps,
974 	.set_mac = stmmac_dwmac4_set_mac,
975 	.rx_ipc = dwmac4_rx_ipc_enable,
976 	.rx_queue_enable = dwmac4_rx_queue_enable,
977 	.rx_queue_prio = dwmac4_rx_queue_priority,
978 	.tx_queue_prio = dwmac4_tx_queue_priority,
979 	.rx_queue_routing = dwmac4_rx_queue_routing,
980 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
981 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
982 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
983 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
984 	.config_cbs = dwmac4_config_cbs,
985 	.dump_regs = dwmac4_dump_regs,
986 	.host_irq_status = dwmac4_irq_status,
987 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
988 	.flow_ctrl = dwmac4_flow_ctrl,
989 	.pmt = dwmac4_pmt,
990 	.set_umac_addr = dwmac4_set_umac_addr,
991 	.get_umac_addr = dwmac4_get_umac_addr,
992 	.set_lpi_mode = dwmac4_set_lpi_mode,
993 	.set_eee_timer = dwmac4_set_eee_timer,
994 	.set_eee_pls = dwmac4_set_eee_pls,
995 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
996 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
997 	.debug = dwmac4_debug,
998 	.set_filter = dwmac4_set_filter,
999 	.flex_pps_config = dwmac5_flex_pps_config,
1000 	.set_mac_loopback = dwmac4_set_mac_loopback,
1001 	.sarc_configure = dwmac4_sarc_configure,
1002 	.set_arp_offload = dwmac4_set_arp_offload,
1003 	.config_l3_filter = dwmac4_config_l3_filter,
1004 	.config_l4_filter = dwmac4_config_l4_filter,
1005 	.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1006 };
1007 
1008 const struct stmmac_ops dwmac510_ops = {
1009 	.core_init = dwmac4_core_init,
1010 	.update_caps = dwmac4_update_caps,
1011 	.set_mac = stmmac_dwmac4_set_mac,
1012 	.rx_ipc = dwmac4_rx_ipc_enable,
1013 	.rx_queue_enable = dwmac4_rx_queue_enable,
1014 	.rx_queue_prio = dwmac4_rx_queue_priority,
1015 	.tx_queue_prio = dwmac4_tx_queue_priority,
1016 	.rx_queue_routing = dwmac4_rx_queue_routing,
1017 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1018 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1019 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1020 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1021 	.config_cbs = dwmac4_config_cbs,
1022 	.dump_regs = dwmac4_dump_regs,
1023 	.host_irq_status = dwmac4_irq_status,
1024 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1025 	.flow_ctrl = dwmac4_flow_ctrl,
1026 	.pmt = dwmac4_pmt,
1027 	.set_umac_addr = dwmac4_set_umac_addr,
1028 	.get_umac_addr = dwmac4_get_umac_addr,
1029 	.set_lpi_mode = dwmac4_set_lpi_mode,
1030 	.set_eee_timer = dwmac4_set_eee_timer,
1031 	.set_eee_pls = dwmac4_set_eee_pls,
1032 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1033 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1034 	.debug = dwmac4_debug,
1035 	.set_filter = dwmac4_set_filter,
1036 	.safety_feat_config = dwmac5_safety_feat_config,
1037 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1038 	.safety_feat_dump = dwmac5_safety_feat_dump,
1039 	.rxp_config = dwmac5_rxp_config,
1040 	.flex_pps_config = dwmac5_flex_pps_config,
1041 	.set_mac_loopback = dwmac4_set_mac_loopback,
1042 	.sarc_configure = dwmac4_sarc_configure,
1043 	.set_arp_offload = dwmac4_set_arp_offload,
1044 	.config_l3_filter = dwmac4_config_l3_filter,
1045 	.config_l4_filter = dwmac4_config_l4_filter,
1046 	.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1047 };
1048 
1049 int dwmac4_setup(struct stmmac_priv *priv)
1050 {
1051 	struct mac_device_info *mac = priv->hw;
1052 
1053 	dev_info(priv->device, "\tDWMAC4/5\n");
1054 
1055 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1056 	mac->pcsr = priv->ioaddr;
1057 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1058 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1059 	mac->mcast_bits_log2 = 0;
1060 
1061 	if (mac->multicast_filter_bins)
1062 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1063 
1064 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1065 			 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
1066 	mac->link.duplex = GMAC_CONFIG_DM;
1067 	mac->link.speed10 = GMAC_CONFIG_PS;
1068 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1069 	mac->link.speed1000 = 0;
1070 	mac->link.speed2500 = GMAC_CONFIG_FES;
1071 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1072 	mac->mii.addr = GMAC_MDIO_ADDR;
1073 	mac->mii.data = GMAC_MDIO_DATA;
1074 	mac->mii.addr_shift = 21;
1075 	mac->mii.addr_mask = GENMASK(25, 21);
1076 	mac->mii.reg_shift = 16;
1077 	mac->mii.reg_mask = GENMASK(20, 16);
1078 	mac->mii.clk_csr_shift = 8;
1079 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1080 	mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
1081 
1082 	return 0;
1083 }
1084