1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
5 */
6
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_fpe.h"
12 #include "stmmac_ptp.h"
13 #include "stmmac_vlan.h"
14 #include "dwxlgmac2.h"
15 #include "dwxgmac2.h"
16
dwxgmac2_core_init(struct mac_device_info * hw,struct net_device * dev)17 static void dwxgmac2_core_init(struct mac_device_info *hw,
18 struct net_device *dev)
19 {
20 void __iomem *ioaddr = hw->pcsr;
21 u32 tx, rx;
22
23 tx = readl(ioaddr + XGMAC_TX_CONFIG);
24 rx = readl(ioaddr + XGMAC_RX_CONFIG);
25
26 tx |= XGMAC_CORE_INIT_TX;
27 rx |= XGMAC_CORE_INIT_RX;
28
29 if (hw->ps) {
30 tx |= XGMAC_CONFIG_TE;
31 tx &= ~hw->link.speed_mask;
32
33 switch (hw->ps) {
34 case SPEED_10000:
35 tx |= hw->link.xgmii.speed10000;
36 break;
37 case SPEED_2500:
38 tx |= hw->link.speed2500;
39 break;
40 case SPEED_1000:
41 default:
42 tx |= hw->link.speed1000;
43 break;
44 }
45 }
46
47 writel(tx, ioaddr + XGMAC_TX_CONFIG);
48 writel(rx, ioaddr + XGMAC_RX_CONFIG);
49 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
50 }
51
dwxgmac2_set_mac(void __iomem * ioaddr,bool enable)52 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
53 {
54 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
55 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
56
57 if (enable) {
58 tx |= XGMAC_CONFIG_TE;
59 rx |= XGMAC_CONFIG_RE;
60 } else {
61 tx &= ~XGMAC_CONFIG_TE;
62 rx &= ~XGMAC_CONFIG_RE;
63 }
64
65 writel(tx, ioaddr + XGMAC_TX_CONFIG);
66 writel(rx, ioaddr + XGMAC_RX_CONFIG);
67 }
68
dwxgmac2_rx_ipc(struct mac_device_info * hw)69 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
70 {
71 void __iomem *ioaddr = hw->pcsr;
72 u32 value;
73
74 value = readl(ioaddr + XGMAC_RX_CONFIG);
75 if (hw->rx_csum)
76 value |= XGMAC_CONFIG_IPC;
77 else
78 value &= ~XGMAC_CONFIG_IPC;
79 writel(value, ioaddr + XGMAC_RX_CONFIG);
80
81 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
82 }
83
dwxgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)84 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
85 u32 queue)
86 {
87 void __iomem *ioaddr = hw->pcsr;
88 u32 value;
89
90 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
91 if (mode == MTL_QUEUE_AVB)
92 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
93 else if (mode == MTL_QUEUE_DCB)
94 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
95 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
96 }
97
dwxgmac2_rx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)98 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
99 u32 queue)
100 {
101 void __iomem *ioaddr = hw->pcsr;
102 u32 clear_mask = 0;
103 u32 ctrl2, ctrl3;
104 int i;
105
106 ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
107 ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
108
109 /* The software must ensure that the same priority
110 * is not mapped to multiple Rx queues
111 */
112 for (i = 0; i < 4; i++)
113 clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
114 XGMAC_PSRQ(i));
115
116 ctrl2 &= ~clear_mask;
117 ctrl3 &= ~clear_mask;
118
119 /* First assign new priorities to a queue, then
120 * clear them from others queues
121 */
122 if (queue < 4) {
123 ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
124 XGMAC_PSRQ(queue);
125
126 writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
127 writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
128 } else {
129 queue -= 4;
130
131 ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
132 XGMAC_PSRQ(queue);
133
134 writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
135 writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
136 }
137 }
138
dwxgmac2_tx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)139 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
140 u32 queue)
141 {
142 void __iomem *ioaddr = hw->pcsr;
143 u32 value, reg;
144
145 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
146 if (queue >= 4)
147 queue -= 4;
148
149 value = readl(ioaddr + reg);
150 value &= ~XGMAC_PSTC(queue);
151 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
152
153 writel(value, ioaddr + reg);
154 }
155
dwxgmac2_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)156 static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
157 u8 packet, u32 queue)
158 {
159 void __iomem *ioaddr = hw->pcsr;
160 u32 value;
161
162 static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
163 { XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
164 { XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
165 { XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
166 { XGMAC_UPQ, XGMAC_UPQ_SHIFT },
167 { XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
168 };
169
170 value = readl(ioaddr + XGMAC_RXQ_CTRL1);
171
172 /* routing configuration */
173 value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
174 value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
175 dwxgmac2_route_possibilities[packet - 1].reg_mask;
176
177 /* some packets require extra ops */
178 if (packet == PACKET_AVCPQ)
179 value |= FIELD_PREP(XGMAC_TACPQE, 1);
180 else if (packet == PACKET_MCBCQ)
181 value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
182
183 writel(value, ioaddr + XGMAC_RXQ_CTRL1);
184 }
185
dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)186 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
187 u32 rx_alg)
188 {
189 void __iomem *ioaddr = hw->pcsr;
190 u32 value;
191
192 value = readl(ioaddr + XGMAC_MTL_OPMODE);
193 value &= ~XGMAC_RAA;
194
195 switch (rx_alg) {
196 case MTL_RX_ALGORITHM_SP:
197 break;
198 case MTL_RX_ALGORITHM_WSP:
199 value |= XGMAC_RAA;
200 break;
201 default:
202 break;
203 }
204
205 writel(value, ioaddr + XGMAC_MTL_OPMODE);
206 }
207
dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)208 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
209 u32 tx_alg)
210 {
211 void __iomem *ioaddr = hw->pcsr;
212 bool ets = true;
213 u32 value;
214 int i;
215
216 value = readl(ioaddr + XGMAC_MTL_OPMODE);
217 value &= ~XGMAC_ETSALG;
218
219 switch (tx_alg) {
220 case MTL_TX_ALGORITHM_WRR:
221 value |= XGMAC_WRR;
222 break;
223 case MTL_TX_ALGORITHM_WFQ:
224 value |= XGMAC_WFQ;
225 break;
226 case MTL_TX_ALGORITHM_DWRR:
227 value |= XGMAC_DWRR;
228 break;
229 default:
230 ets = false;
231 break;
232 }
233
234 writel(value, ioaddr + XGMAC_MTL_OPMODE);
235
236 /* Set ETS if desired */
237 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
238 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
239 value &= ~XGMAC_TSA;
240 if (ets)
241 value |= XGMAC_ETS;
242 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
243 }
244 }
245
dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)246 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
247 struct mac_device_info *hw,
248 u32 weight, u32 queue)
249 {
250 void __iomem *ioaddr = hw->pcsr;
251
252 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
253 }
254
dwxgmac2_map_mtl_to_dma(struct mac_device_info * hw,u32 queue,u32 chan)255 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
256 u32 chan)
257 {
258 void __iomem *ioaddr = hw->pcsr;
259 u32 value, reg;
260
261 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
262 if (queue >= 4)
263 queue -= 4;
264
265 value = readl(ioaddr + reg);
266 value &= ~XGMAC_QxMDMACH(queue);
267 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
268
269 writel(value, ioaddr + reg);
270 }
271
dwxgmac2_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)272 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
273 struct mac_device_info *hw,
274 u32 send_slope, u32 idle_slope,
275 u32 high_credit, u32 low_credit, u32 queue)
276 {
277 void __iomem *ioaddr = hw->pcsr;
278 u32 value;
279
280 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
281 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
282 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
283 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
284
285 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
286 value &= ~XGMAC_TSA;
287 value |= XGMAC_CC | XGMAC_CBS;
288 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
289 }
290
dwxgmac2_dump_regs(struct mac_device_info * hw,u32 * reg_space)291 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
292 {
293 void __iomem *ioaddr = hw->pcsr;
294 int i;
295
296 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
297 reg_space[i] = readl(ioaddr + i * 4);
298 }
299
dwxgmac2_host_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)300 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
301 struct stmmac_extra_stats *x)
302 {
303 void __iomem *ioaddr = hw->pcsr;
304 u32 stat, en;
305 int ret = 0;
306
307 en = readl(ioaddr + XGMAC_INT_EN);
308 stat = readl(ioaddr + XGMAC_INT_STATUS);
309
310 stat &= en;
311
312 if (stat & XGMAC_PMTIS) {
313 x->irq_receive_pmt_irq_n++;
314 readl(ioaddr + XGMAC_PMT);
315 }
316
317 if (stat & XGMAC_LPIIS) {
318 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
319
320 if (lpi & LPI_CTRL_STATUS_TLPIEN) {
321 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
322 x->irq_tx_path_in_lpi_mode_n++;
323 }
324 if (lpi & LPI_CTRL_STATUS_TLPIEX) {
325 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
326 x->irq_tx_path_exit_lpi_mode_n++;
327 }
328 if (lpi & LPI_CTRL_STATUS_RLPIEN)
329 x->irq_rx_path_in_lpi_mode_n++;
330 if (lpi & LPI_CTRL_STATUS_RLPIEX)
331 x->irq_rx_path_exit_lpi_mode_n++;
332 }
333
334 return ret;
335 }
336
dwxgmac2_host_mtl_irq_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)337 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
338 struct mac_device_info *hw, u32 chan)
339 {
340 void __iomem *ioaddr = hw->pcsr;
341 int ret = 0;
342 u32 status;
343
344 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
345 if (status & BIT(chan)) {
346 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
347
348 if (chan_status & XGMAC_RXOVFIS)
349 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
350
351 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
352 }
353
354 return ret;
355 }
356
dwxgmac2_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)357 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
358 unsigned int fc, unsigned int pause_time,
359 u32 tx_cnt)
360 {
361 void __iomem *ioaddr = hw->pcsr;
362 u32 i;
363
364 if (fc & FLOW_RX)
365 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
366 if (fc & FLOW_TX) {
367 for (i = 0; i < tx_cnt; i++) {
368 u32 value = XGMAC_TFE;
369
370 if (duplex)
371 value |= pause_time << XGMAC_PT_SHIFT;
372
373 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
374 }
375 }
376 }
377
dwxgmac2_pmt(struct mac_device_info * hw,unsigned long mode)378 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
379 {
380 void __iomem *ioaddr = hw->pcsr;
381 u32 val = 0x0;
382
383 if (mode & WAKE_MAGIC)
384 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
385 if (mode & WAKE_UCAST)
386 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
387 if (val) {
388 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
389 cfg |= XGMAC_CONFIG_RE;
390 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
391 }
392
393 writel(val, ioaddr + XGMAC_PMT);
394 }
395
dwxgmac2_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)396 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
397 const unsigned char *addr,
398 unsigned int reg_n)
399 {
400 void __iomem *ioaddr = hw->pcsr;
401 u32 value;
402
403 value = (addr[5] << 8) | addr[4];
404 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
405
406 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
407 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
408 }
409
dwxgmac2_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)410 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
411 unsigned char *addr, unsigned int reg_n)
412 {
413 void __iomem *ioaddr = hw->pcsr;
414 u32 hi_addr, lo_addr;
415
416 /* Read the MAC address from the hardware */
417 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
418 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
419
420 /* Extract the MAC address from the high and low words */
421 addr[0] = lo_addr & 0xff;
422 addr[1] = (lo_addr >> 8) & 0xff;
423 addr[2] = (lo_addr >> 16) & 0xff;
424 addr[3] = (lo_addr >> 24) & 0xff;
425 addr[4] = hi_addr & 0xff;
426 addr[5] = (hi_addr >> 8) & 0xff;
427 }
428
dwxgmac2_set_lpi_mode(struct mac_device_info * hw,enum stmmac_lpi_mode mode,bool en_tx_lpi_clockgating,u32 et)429 static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
430 enum stmmac_lpi_mode mode,
431 bool en_tx_lpi_clockgating, u32 et)
432 {
433 void __iomem *ioaddr = hw->pcsr;
434 u32 value;
435
436 if (mode == STMMAC_LPI_TIMER)
437 return -EOPNOTSUPP;
438
439 value = readl(ioaddr + XGMAC_LPI_CTRL);
440 if (mode == STMMAC_LPI_FORCED) {
441 value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
442 if (en_tx_lpi_clockgating)
443 value |= LPI_CTRL_STATUS_LPITCSE;
444 } else {
445 value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
446 LPI_CTRL_STATUS_LPITCSE);
447 }
448 writel(value, ioaddr + XGMAC_LPI_CTRL);
449
450 return 0;
451 }
452
dwxgmac2_set_eee_pls(struct mac_device_info * hw,int link)453 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
454 {
455 void __iomem *ioaddr = hw->pcsr;
456 u32 value;
457
458 value = readl(ioaddr + XGMAC_LPI_CTRL);
459 if (link)
460 value |= LPI_CTRL_STATUS_PLS;
461 else
462 value &= ~LPI_CTRL_STATUS_PLS;
463 writel(value, ioaddr + XGMAC_LPI_CTRL);
464 }
465
dwxgmac2_set_eee_timer(struct mac_device_info * hw,int ls,int tw)466 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
467 {
468 void __iomem *ioaddr = hw->pcsr;
469 u32 value;
470
471 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
472 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
473 }
474
dwxgmac2_set_mchash(void __iomem * ioaddr,u32 * mcfilterbits,int mcbitslog2)475 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
476 int mcbitslog2)
477 {
478 int numhashregs, regs;
479
480 switch (mcbitslog2) {
481 case 6:
482 numhashregs = 2;
483 break;
484 case 7:
485 numhashregs = 4;
486 break;
487 case 8:
488 numhashregs = 8;
489 break;
490 default:
491 return;
492 }
493
494 for (regs = 0; regs < numhashregs; regs++)
495 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
496 }
497
dwxgmac2_set_filter(struct mac_device_info * hw,struct net_device * dev)498 static void dwxgmac2_set_filter(struct mac_device_info *hw,
499 struct net_device *dev)
500 {
501 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
502 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
503 int mcbitslog2 = hw->mcast_bits_log2;
504 u32 mc_filter[8];
505 int i;
506
507 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
508 value |= XGMAC_FILTER_HPF;
509
510 memset(mc_filter, 0, sizeof(mc_filter));
511
512 if (dev->flags & IFF_PROMISC) {
513 value |= XGMAC_FILTER_PR;
514 value |= XGMAC_FILTER_PCF;
515 } else if ((dev->flags & IFF_ALLMULTI) ||
516 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
517 value |= XGMAC_FILTER_PM;
518
519 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
520 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
521 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
522 struct netdev_hw_addr *ha;
523
524 value |= XGMAC_FILTER_HMC;
525
526 netdev_for_each_mc_addr(ha, dev) {
527 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
528 (32 - mcbitslog2));
529 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
530 }
531 }
532
533 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
534
535 /* Handle multiple unicast addresses */
536 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
537 value |= XGMAC_FILTER_PR;
538 } else {
539 struct netdev_hw_addr *ha;
540 int reg = 1;
541
542 netdev_for_each_uc_addr(ha, dev) {
543 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
544 reg++;
545 }
546
547 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
548 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
549 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
550 }
551 }
552
553 writel(value, ioaddr + XGMAC_PACKET_FILTER);
554 }
555
dwxgmac2_set_mac_loopback(void __iomem * ioaddr,bool enable)556 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
557 {
558 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
559
560 if (enable)
561 value |= XGMAC_CONFIG_LM;
562 else
563 value &= ~XGMAC_CONFIG_LM;
564
565 writel(value, ioaddr + XGMAC_RX_CONFIG);
566 }
567
dwxgmac2_rss_write_reg(void __iomem * ioaddr,bool is_key,int idx,u32 val)568 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
569 u32 val)
570 {
571 u32 ctrl = 0;
572
573 writel(val, ioaddr + XGMAC_RSS_DATA);
574 ctrl |= idx << XGMAC_RSSIA_SHIFT;
575 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
576 ctrl |= XGMAC_OB;
577 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
578
579 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
580 !(ctrl & XGMAC_OB), 100, 10000);
581 }
582
dwxgmac2_rss_configure(struct mac_device_info * hw,struct stmmac_rss * cfg,u32 num_rxq)583 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
584 struct stmmac_rss *cfg, u32 num_rxq)
585 {
586 void __iomem *ioaddr = hw->pcsr;
587 u32 value, *key;
588 int i, ret;
589
590 value = readl(ioaddr + XGMAC_RSS_CTRL);
591 if (!cfg || !cfg->enable) {
592 value &= ~XGMAC_RSSE;
593 writel(value, ioaddr + XGMAC_RSS_CTRL);
594 return 0;
595 }
596
597 key = (u32 *)cfg->key;
598 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
599 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
600 if (ret)
601 return ret;
602 }
603
604 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
605 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
606 if (ret)
607 return ret;
608 }
609
610 for (i = 0; i < num_rxq; i++)
611 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
612
613 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
614 writel(value, ioaddr + XGMAC_RSS_CTRL);
615 return 0;
616 }
617
618 struct dwxgmac3_error_desc {
619 bool valid;
620 const char *desc;
621 const char *detailed_desc;
622 };
623
624 #define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
625
dwxgmac3_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwxgmac3_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)626 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
627 const char *module_name,
628 const struct dwxgmac3_error_desc *desc,
629 unsigned long field_offset,
630 struct stmmac_safety_stats *stats)
631 {
632 unsigned long loc, mask;
633 u8 *bptr = (u8 *)stats;
634 unsigned long *ptr;
635
636 ptr = (unsigned long *)(bptr + field_offset);
637
638 mask = value;
639 for_each_set_bit(loc, &mask, 32) {
640 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
641 "correctable" : "uncorrectable", module_name,
642 desc[loc].desc, desc[loc].detailed_desc);
643
644 /* Update counters */
645 ptr[loc]++;
646 }
647 }
648
649 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
650 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
651 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
652 { true, "TPES", "TSO Data Path Parity Check Error" },
653 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
654 { true, "MTPES", "MTL Data Path Parity Check Error" },
655 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
656 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
657 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
658 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
659 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
660 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
661 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
662 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
663 { true, "TTES", "TX FSM Timeout Error" },
664 { true, "RTES", "RX FSM Timeout Error" },
665 { true, "CTES", "CSR FSM Timeout Error" },
666 { true, "ATES", "APP FSM Timeout Error" },
667 { true, "PTES", "PTP FSM Timeout Error" },
668 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
669 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
670 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
671 { true, "MSTTES", "Master Read/Write Timeout Error" },
672 { true, "SLVTES", "Slave Read/Write Timeout Error" },
673 { true, "ATITES", "Application Timeout on ATI Interface Error" },
674 { true, "ARITES", "Application Timeout on ARI Interface Error" },
675 { true, "FSMPES", "FSM State Parity Error" },
676 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
677 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
678 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
679 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
680 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
681 { true, "CPI", "Control Register Parity Check Error" },
682 };
683
dwxgmac3_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)684 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
685 void __iomem *ioaddr, bool correctable,
686 struct stmmac_safety_stats *stats)
687 {
688 u32 value;
689
690 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
691 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
692
693 dwxgmac3_log_error(ndev, value, correctable, "MAC",
694 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
695 }
696
697 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
698 { true, "TXCES", "MTL TX Memory Error" },
699 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
700 { true, "TXUES", "MTL TX Memory Error" },
701 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
702 { true, "RXCES", "MTL RX Memory Error" },
703 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
704 { true, "RXUES", "MTL RX Memory Error" },
705 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
706 { true, "ECES", "MTL EST Memory Error" },
707 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
708 { true, "EUES", "MTL EST Memory Error" },
709 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
710 { true, "RPCES", "MTL RX Parser Memory Error" },
711 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
712 { true, "RPUES", "MTL RX Parser Memory Error" },
713 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
714 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
715 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
716 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
717 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
718 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
719 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
720 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
721 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
722 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
723 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
724 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
725 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
726 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
727 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
728 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
729 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
730 };
731
dwxgmac3_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)732 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
733 void __iomem *ioaddr, bool correctable,
734 struct stmmac_safety_stats *stats)
735 {
736 u32 value;
737
738 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
739 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
740
741 dwxgmac3_log_error(ndev, value, correctable, "MTL",
742 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
743 }
744
745 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
746 { true, "TCES", "DMA TSO Memory Error" },
747 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
748 { true, "TUES", "DMA TSO Memory Error" },
749 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
750 { true, "DCES", "DMA DCACHE Memory Error" },
751 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
752 { true, "DUES", "DMA DCACHE Memory Error" },
753 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
754 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
755 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
756 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
757 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
758 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
759 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
760 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
761 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
762 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
763 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
764 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
765 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
766 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
767 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
768 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
769 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
770 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
771 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
772 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
773 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
774 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
775 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
776 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
777 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
778 };
779
780 static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
781 static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
782 static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
783 { true, "TDPES0", dpp_tx_err },
784 { true, "TDPES1", dpp_tx_err },
785 { true, "TDPES2", dpp_tx_err },
786 { true, "TDPES3", dpp_tx_err },
787 { true, "TDPES4", dpp_tx_err },
788 { true, "TDPES5", dpp_tx_err },
789 { true, "TDPES6", dpp_tx_err },
790 { true, "TDPES7", dpp_tx_err },
791 { true, "TDPES8", dpp_tx_err },
792 { true, "TDPES9", dpp_tx_err },
793 { true, "TDPES10", dpp_tx_err },
794 { true, "TDPES11", dpp_tx_err },
795 { true, "TDPES12", dpp_tx_err },
796 { true, "TDPES13", dpp_tx_err },
797 { true, "TDPES14", dpp_tx_err },
798 { true, "TDPES15", dpp_tx_err },
799 { true, "RDPES0", dpp_rx_err },
800 { true, "RDPES1", dpp_rx_err },
801 { true, "RDPES2", dpp_rx_err },
802 { true, "RDPES3", dpp_rx_err },
803 { true, "RDPES4", dpp_rx_err },
804 { true, "RDPES5", dpp_rx_err },
805 { true, "RDPES6", dpp_rx_err },
806 { true, "RDPES7", dpp_rx_err },
807 { true, "RDPES8", dpp_rx_err },
808 { true, "RDPES9", dpp_rx_err },
809 { true, "RDPES10", dpp_rx_err },
810 { true, "RDPES11", dpp_rx_err },
811 { true, "RDPES12", dpp_rx_err },
812 { true, "RDPES13", dpp_rx_err },
813 { true, "RDPES14", dpp_rx_err },
814 { true, "RDPES15", dpp_rx_err },
815 };
816
dwxgmac3_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)817 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
818 void __iomem *ioaddr, bool correctable,
819 struct stmmac_safety_stats *stats)
820 {
821 u32 value;
822
823 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
824 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
825
826 dwxgmac3_log_error(ndev, value, correctable, "DMA",
827 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
828
829 value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
830 writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
831
832 dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
833 dwxgmac3_dma_dpp_errors,
834 STAT_OFF(dma_dpp_errors), stats);
835 }
836
837 static int
dwxgmac3_safety_feat_config(void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_feature_cfg * safety_cfg)838 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
839 struct stmmac_safety_feature_cfg *safety_cfg)
840 {
841 u32 value;
842
843 if (!asp)
844 return -EINVAL;
845
846 /* 1. Enable Safety Features */
847 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
848
849 /* 2. Enable MTL Safety Interrupts */
850 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
851 value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
852 value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
853 value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
854 value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
855 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
856
857 /* 3. Enable DMA Safety Interrupts */
858 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
859 value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
860 value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
861 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
862
863 /* 0x2: Without ECC or Parity Ports on External Application Interface
864 * 0x4: Only ECC Protection for External Memory feature is selected
865 */
866 if (asp == 0x2 || asp == 0x4)
867 return 0;
868
869 /* 4. Enable Parity and Timeout for FSM */
870 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
871 value |= XGMAC_PRTYEN; /* FSM Parity Feature */
872 value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
873 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
874
875 /* 5. Enable Data Path Parity Protection */
876 value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
877 /* already enabled by default, explicit enable it again */
878 value &= ~XGMAC_DPP_DISABLE;
879 writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
880
881 return 0;
882 }
883
dwxgmac3_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)884 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
885 void __iomem *ioaddr,
886 unsigned int asp,
887 struct stmmac_safety_stats *stats)
888 {
889 bool err, corr;
890 u32 mtl, dma;
891 int ret = 0;
892
893 if (!asp)
894 return -EINVAL;
895
896 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
897 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
898
899 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
900 corr = false;
901 if (err) {
902 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
903 ret |= !corr;
904 }
905
906 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
907 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
908 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
909 if (err) {
910 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
911 ret |= !corr;
912 }
913
914 /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
915 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
916 * Parity Errors here
917 */
918 err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
919 corr = dma & XGMAC_DECIS;
920 if (err) {
921 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
922 ret |= !corr;
923 }
924
925 return ret;
926 }
927
928 static const struct dwxgmac3_error {
929 const struct dwxgmac3_error_desc *desc;
930 } dwxgmac3_all_errors[] = {
931 { dwxgmac3_mac_errors },
932 { dwxgmac3_mtl_errors },
933 { dwxgmac3_dma_errors },
934 { dwxgmac3_dma_dpp_errors },
935 };
936
dwxgmac3_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)937 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
938 int index, unsigned long *count,
939 const char **desc)
940 {
941 int module = index / 32, offset = index % 32;
942 unsigned long *ptr = (unsigned long *)stats;
943
944 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
945 return -EINVAL;
946 if (!dwxgmac3_all_errors[module].desc[offset].valid)
947 return -EINVAL;
948 if (count)
949 *count = *(ptr + index);
950 if (desc)
951 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
952 return 0;
953 }
954
dwxgmac3_rxp_disable(void __iomem * ioaddr)955 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
956 {
957 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
958
959 val &= ~XGMAC_FRPE;
960 writel(val, ioaddr + XGMAC_MTL_OPMODE);
961
962 return 0;
963 }
964
dwxgmac3_rxp_enable(void __iomem * ioaddr)965 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
966 {
967 u32 val;
968
969 val = readl(ioaddr + XGMAC_MTL_OPMODE);
970 val |= XGMAC_FRPE;
971 writel(val, ioaddr + XGMAC_MTL_OPMODE);
972 }
973
dwxgmac3_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)974 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
975 struct stmmac_tc_entry *entry,
976 int pos)
977 {
978 int ret, i;
979
980 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
981 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
982 u32 val;
983
984 /* Wait for ready */
985 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
986 val, !(val & XGMAC_STARTBUSY), 1, 10000);
987 if (ret)
988 return ret;
989
990 /* Write data */
991 val = *((u32 *)&entry->val + i);
992 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
993
994 /* Write pos */
995 val = real_pos & XGMAC_ADDR;
996 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
997
998 /* Write OP */
999 val |= XGMAC_WRRDN;
1000 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1001
1002 /* Start Write */
1003 val |= XGMAC_STARTBUSY;
1004 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1005
1006 /* Wait for done */
1007 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1008 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1009 if (ret)
1010 return ret;
1011 }
1012
1013 return 0;
1014 }
1015
1016 static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)1017 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1018 unsigned int count, u32 curr_prio)
1019 {
1020 struct stmmac_tc_entry *entry;
1021 u32 min_prio = ~0x0;
1022 int i, min_prio_idx;
1023 bool found = false;
1024
1025 for (i = count - 1; i >= 0; i--) {
1026 entry = &entries[i];
1027
1028 /* Do not update unused entries */
1029 if (!entry->in_use)
1030 continue;
1031 /* Do not update already updated entries (i.e. fragments) */
1032 if (entry->in_hw)
1033 continue;
1034 /* Let last entry be updated last */
1035 if (entry->is_last)
1036 continue;
1037 /* Do not return fragments */
1038 if (entry->is_frag)
1039 continue;
1040 /* Check if we already checked this prio */
1041 if (entry->prio < curr_prio)
1042 continue;
1043 /* Check if this is the minimum prio */
1044 if (entry->prio < min_prio) {
1045 min_prio = entry->prio;
1046 min_prio_idx = i;
1047 found = true;
1048 }
1049 }
1050
1051 if (found)
1052 return &entries[min_prio_idx];
1053 return NULL;
1054 }
1055
dwxgmac3_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)1056 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1057 struct stmmac_tc_entry *entries,
1058 unsigned int count)
1059 {
1060 struct stmmac_tc_entry *entry, *frag;
1061 int i, ret, nve = 0;
1062 u32 curr_prio = 0;
1063 u32 old_val, val;
1064
1065 /* Force disable RX */
1066 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1067 val = old_val & ~XGMAC_CONFIG_RE;
1068 writel(val, ioaddr + XGMAC_RX_CONFIG);
1069
1070 /* Disable RX Parser */
1071 ret = dwxgmac3_rxp_disable(ioaddr);
1072 if (ret)
1073 goto re_enable;
1074
1075 /* Set all entries as NOT in HW */
1076 for (i = 0; i < count; i++) {
1077 entry = &entries[i];
1078 entry->in_hw = false;
1079 }
1080
1081 /* Update entries by reverse order */
1082 while (1) {
1083 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1084 if (!entry)
1085 break;
1086
1087 curr_prio = entry->prio;
1088 frag = entry->frag_ptr;
1089
1090 /* Set special fragment requirements */
1091 if (frag) {
1092 entry->val.af = 0;
1093 entry->val.rf = 0;
1094 entry->val.nc = 1;
1095 entry->val.ok_index = nve + 2;
1096 }
1097
1098 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1099 if (ret)
1100 goto re_enable;
1101
1102 entry->table_pos = nve++;
1103 entry->in_hw = true;
1104
1105 if (frag && !frag->in_hw) {
1106 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1107 if (ret)
1108 goto re_enable;
1109 frag->table_pos = nve++;
1110 frag->in_hw = true;
1111 }
1112 }
1113
1114 if (!nve)
1115 goto re_enable;
1116
1117 /* Update all pass entry */
1118 for (i = 0; i < count; i++) {
1119 entry = &entries[i];
1120 if (!entry->is_last)
1121 continue;
1122
1123 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1124 if (ret)
1125 goto re_enable;
1126
1127 entry->table_pos = nve++;
1128 }
1129
1130 /* Assume n. of parsable entries == n. of valid entries */
1131 val = (nve << 16) & XGMAC_NPE;
1132 val |= nve & XGMAC_NVE;
1133 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1134
1135 /* Enable RX Parser */
1136 dwxgmac3_rxp_enable(ioaddr);
1137
1138 re_enable:
1139 /* Re-enable RX */
1140 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1141 return ret;
1142 }
1143
dwxgmac2_get_mac_tx_timestamp(struct mac_device_info * hw,u64 * ts)1144 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1145 {
1146 void __iomem *ioaddr = hw->pcsr;
1147 u32 value;
1148
1149 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1150 value, value & XGMAC_TXTSC, 100, 10000))
1151 return -EBUSY;
1152
1153 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1154 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1155 return 0;
1156 }
1157
dwxgmac2_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)1158 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1159 struct stmmac_pps_cfg *cfg, bool enable,
1160 u32 sub_second_inc, u32 systime_flags)
1161 {
1162 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1163 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1164 u64 period;
1165
1166 if (!cfg->available)
1167 return -EINVAL;
1168 if (tnsec & XGMAC_TRGTBUSY0)
1169 return -EBUSY;
1170 if (!sub_second_inc || !systime_flags)
1171 return -EINVAL;
1172
1173 val &= ~XGMAC_PPSx_MASK(index);
1174
1175 if (!enable) {
1176 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1177 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1178 return 0;
1179 }
1180
1181 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1182 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1183
1184 /* XGMAC Core has 4 PPS outputs at most.
1185 *
1186 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1187 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1188 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1189 * read-only reserved to 0.
1190 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1191 *
1192 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1193 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1194 */
1195 val |= XGMAC_PPSENx(index);
1196
1197 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1198
1199 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1200 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1201 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1202
1203 period = cfg->period.tv_sec * 1000000000;
1204 period += cfg->period.tv_nsec;
1205
1206 do_div(period, sub_second_inc);
1207
1208 if (period <= 1)
1209 return -EINVAL;
1210
1211 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1212
1213 period >>= 1;
1214 if (period <= 1)
1215 return -EINVAL;
1216
1217 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1218
1219 /* Finally, activate it */
1220 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1221 return 0;
1222 }
1223
dwxgmac2_sarc_configure(void __iomem * ioaddr,int val)1224 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1225 {
1226 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1227
1228 value &= ~XGMAC_CONFIG_SARC;
1229 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1230
1231 writel(value, ioaddr + XGMAC_TX_CONFIG);
1232 }
1233
dwxgmac2_filter_wait(struct mac_device_info * hw)1234 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1235 {
1236 void __iomem *ioaddr = hw->pcsr;
1237 u32 value;
1238
1239 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1240 !(value & XGMAC_XB), 100, 10000))
1241 return -EBUSY;
1242 return 0;
1243 }
1244
dwxgmac2_filter_read(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 * data)1245 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1246 u8 reg, u32 *data)
1247 {
1248 void __iomem *ioaddr = hw->pcsr;
1249 u32 value;
1250 int ret;
1251
1252 ret = dwxgmac2_filter_wait(hw);
1253 if (ret)
1254 return ret;
1255
1256 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1257 value |= XGMAC_TT | XGMAC_XB;
1258 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1259
1260 ret = dwxgmac2_filter_wait(hw);
1261 if (ret)
1262 return ret;
1263
1264 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1265 return 0;
1266 }
1267
dwxgmac2_filter_write(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 data)1268 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1269 u8 reg, u32 data)
1270 {
1271 void __iomem *ioaddr = hw->pcsr;
1272 u32 value;
1273 int ret;
1274
1275 ret = dwxgmac2_filter_wait(hw);
1276 if (ret)
1277 return ret;
1278
1279 writel(data, ioaddr + XGMAC_L3L4_DATA);
1280
1281 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1282 value |= XGMAC_XB;
1283 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1284
1285 return dwxgmac2_filter_wait(hw);
1286 }
1287
dwxgmac2_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1288 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1289 bool en, bool ipv6, bool sa, bool inv,
1290 u32 match)
1291 {
1292 void __iomem *ioaddr = hw->pcsr;
1293 u32 value;
1294 int ret;
1295
1296 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1297 value |= XGMAC_FILTER_IPFE;
1298 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1299
1300 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1301 if (ret)
1302 return ret;
1303
1304 /* For IPv6 not both SA/DA filters can be active */
1305 if (ipv6) {
1306 value |= XGMAC_L3PEN0;
1307 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1308 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1309 if (sa) {
1310 value |= XGMAC_L3SAM0;
1311 if (inv)
1312 value |= XGMAC_L3SAIM0;
1313 } else {
1314 value |= XGMAC_L3DAM0;
1315 if (inv)
1316 value |= XGMAC_L3DAIM0;
1317 }
1318 } else {
1319 value &= ~XGMAC_L3PEN0;
1320 if (sa) {
1321 value |= XGMAC_L3SAM0;
1322 if (inv)
1323 value |= XGMAC_L3SAIM0;
1324 } else {
1325 value |= XGMAC_L3DAM0;
1326 if (inv)
1327 value |= XGMAC_L3DAIM0;
1328 }
1329 }
1330
1331 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1332 if (ret)
1333 return ret;
1334
1335 if (sa) {
1336 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1337 if (ret)
1338 return ret;
1339 } else {
1340 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1341 if (ret)
1342 return ret;
1343 }
1344
1345 if (!en)
1346 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1347
1348 return 0;
1349 }
1350
dwxgmac2_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1351 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1352 bool en, bool udp, bool sa, bool inv,
1353 u32 match)
1354 {
1355 void __iomem *ioaddr = hw->pcsr;
1356 u32 value;
1357 int ret;
1358
1359 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1360 value |= XGMAC_FILTER_IPFE;
1361 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1362
1363 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1364 if (ret)
1365 return ret;
1366
1367 if (udp) {
1368 value |= XGMAC_L4PEN0;
1369 } else {
1370 value &= ~XGMAC_L4PEN0;
1371 }
1372
1373 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1374 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1375 if (sa) {
1376 value |= XGMAC_L4SPM0;
1377 if (inv)
1378 value |= XGMAC_L4SPIM0;
1379 } else {
1380 value |= XGMAC_L4DPM0;
1381 if (inv)
1382 value |= XGMAC_L4DPIM0;
1383 }
1384
1385 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1386 if (ret)
1387 return ret;
1388
1389 if (sa) {
1390 value = match & XGMAC_L4SP0;
1391
1392 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1393 if (ret)
1394 return ret;
1395 } else {
1396 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1397
1398 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1399 if (ret)
1400 return ret;
1401 }
1402
1403 if (!en)
1404 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1405
1406 return 0;
1407 }
1408
dwxgmac2_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1409 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1410 u32 addr)
1411 {
1412 void __iomem *ioaddr = hw->pcsr;
1413 u32 value;
1414
1415 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1416
1417 value = readl(ioaddr + XGMAC_RX_CONFIG);
1418 if (en)
1419 value |= XGMAC_CONFIG_ARPEN;
1420 else
1421 value &= ~XGMAC_CONFIG_ARPEN;
1422 writel(value, ioaddr + XGMAC_RX_CONFIG);
1423 }
1424
1425 const struct stmmac_ops dwxgmac210_ops = {
1426 .core_init = dwxgmac2_core_init,
1427 .set_mac = dwxgmac2_set_mac,
1428 .rx_ipc = dwxgmac2_rx_ipc,
1429 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1430 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1431 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1432 .rx_queue_routing = dwxgmac2_rx_queue_routing,
1433 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1434 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1435 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1436 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1437 .config_cbs = dwxgmac2_config_cbs,
1438 .dump_regs = dwxgmac2_dump_regs,
1439 .host_irq_status = dwxgmac2_host_irq_status,
1440 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1441 .flow_ctrl = dwxgmac2_flow_ctrl,
1442 .pmt = dwxgmac2_pmt,
1443 .set_umac_addr = dwxgmac2_set_umac_addr,
1444 .get_umac_addr = dwxgmac2_get_umac_addr,
1445 .set_lpi_mode = dwxgmac2_set_lpi_mode,
1446 .set_eee_timer = dwxgmac2_set_eee_timer,
1447 .set_eee_pls = dwxgmac2_set_eee_pls,
1448 .debug = NULL,
1449 .set_filter = dwxgmac2_set_filter,
1450 .safety_feat_config = dwxgmac3_safety_feat_config,
1451 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1452 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1453 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1454 .rss_configure = dwxgmac2_rss_configure,
1455 .rxp_config = dwxgmac3_rxp_config,
1456 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1457 .flex_pps_config = dwxgmac2_flex_pps_config,
1458 .sarc_configure = dwxgmac2_sarc_configure,
1459 .config_l3_filter = dwxgmac2_config_l3_filter,
1460 .config_l4_filter = dwxgmac2_config_l4_filter,
1461 .set_arp_offload = dwxgmac2_set_arp_offload,
1462 .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
1463 };
1464
dwxlgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)1465 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1466 u32 queue)
1467 {
1468 void __iomem *ioaddr = hw->pcsr;
1469 u32 value;
1470
1471 value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1472 if (mode == MTL_QUEUE_AVB)
1473 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1474 else if (mode == MTL_QUEUE_DCB)
1475 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1476 writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1477 }
1478
1479 const struct stmmac_ops dwxlgmac2_ops = {
1480 .core_init = dwxgmac2_core_init,
1481 .set_mac = dwxgmac2_set_mac,
1482 .rx_ipc = dwxgmac2_rx_ipc,
1483 .rx_queue_enable = dwxlgmac2_rx_queue_enable,
1484 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1485 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1486 .rx_queue_routing = dwxgmac2_rx_queue_routing,
1487 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1488 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1489 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1490 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1491 .config_cbs = dwxgmac2_config_cbs,
1492 .dump_regs = dwxgmac2_dump_regs,
1493 .host_irq_status = dwxgmac2_host_irq_status,
1494 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1495 .flow_ctrl = dwxgmac2_flow_ctrl,
1496 .pmt = dwxgmac2_pmt,
1497 .set_umac_addr = dwxgmac2_set_umac_addr,
1498 .get_umac_addr = dwxgmac2_get_umac_addr,
1499 .set_lpi_mode = dwxgmac2_set_lpi_mode,
1500 .set_eee_timer = dwxgmac2_set_eee_timer,
1501 .set_eee_pls = dwxgmac2_set_eee_pls,
1502 .debug = NULL,
1503 .set_filter = dwxgmac2_set_filter,
1504 .safety_feat_config = dwxgmac3_safety_feat_config,
1505 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1506 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1507 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1508 .rss_configure = dwxgmac2_rss_configure,
1509 .rxp_config = dwxgmac3_rxp_config,
1510 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1511 .flex_pps_config = dwxgmac2_flex_pps_config,
1512 .sarc_configure = dwxgmac2_sarc_configure,
1513 .config_l3_filter = dwxgmac2_config_l3_filter,
1514 .config_l4_filter = dwxgmac2_config_l4_filter,
1515 .set_arp_offload = dwxgmac2_set_arp_offload,
1516 .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
1517 };
1518
dwxgmac2_setup(struct stmmac_priv * priv)1519 int dwxgmac2_setup(struct stmmac_priv *priv)
1520 {
1521 struct mac_device_info *mac = priv->hw;
1522
1523 dev_info(priv->device, "\tXGMAC2\n");
1524
1525 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1526 mac->pcsr = priv->ioaddr;
1527 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1528 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1529 mac->mcast_bits_log2 = 0;
1530
1531 if (mac->multicast_filter_bins)
1532 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1533
1534 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1535 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1536 MAC_10000FD;
1537 mac->link.duplex = 0;
1538 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1539 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1540 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1541 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1542 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1543 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1544 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1545 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1546
1547 mac->mii.addr = XGMAC_MDIO_ADDR;
1548 mac->mii.data = XGMAC_MDIO_DATA;
1549 mac->mii.addr_shift = 16;
1550 mac->mii.addr_mask = GENMASK(20, 16);
1551 mac->mii.reg_shift = 0;
1552 mac->mii.reg_mask = GENMASK(15, 0);
1553 mac->mii.clk_csr_shift = 19;
1554 mac->mii.clk_csr_mask = GENMASK(21, 19);
1555 mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
1556
1557 return 0;
1558 }
1559
dwxlgmac2_setup(struct stmmac_priv * priv)1560 int dwxlgmac2_setup(struct stmmac_priv *priv)
1561 {
1562 struct mac_device_info *mac = priv->hw;
1563
1564 dev_info(priv->device, "\tXLGMAC\n");
1565
1566 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1567 mac->pcsr = priv->ioaddr;
1568 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1569 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1570 mac->mcast_bits_log2 = 0;
1571
1572 if (mac->multicast_filter_bins)
1573 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1574
1575 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1576 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1577 MAC_10000FD | MAC_25000FD |
1578 MAC_40000FD | MAC_50000FD |
1579 MAC_100000FD;
1580 mac->link.duplex = 0;
1581 mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1582 mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1583 mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1584 mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1585 mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1586 mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1587 mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1588 mac->link.speed_mask = XLGMAC_CONFIG_SS;
1589
1590 mac->mii.addr = XGMAC_MDIO_ADDR;
1591 mac->mii.data = XGMAC_MDIO_DATA;
1592 mac->mii.addr_shift = 16;
1593 mac->mii.addr_mask = GENMASK(20, 16);
1594 mac->mii.reg_shift = 0;
1595 mac->mii.reg_mask = GENMASK(15, 0);
1596 mac->mii.clk_csr_shift = 19;
1597 mac->mii.clk_csr_mask = GENMASK(21, 19);
1598
1599 return 0;
1600 }
1601