1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include "stmmac.h"
19 #include "stmmac_fpe.h"
20 #include "stmmac_pcs.h"
21 #include "dwmac4.h"
22 #include "dwmac5.h"
23
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)24 static void dwmac4_core_init(struct mac_device_info *hw,
25 struct net_device *dev)
26 {
27 struct stmmac_priv *priv = netdev_priv(dev);
28 void __iomem *ioaddr = hw->pcsr;
29 u32 value = readl(ioaddr + GMAC_CONFIG);
30 unsigned long clk_rate;
31
32 value |= GMAC_CORE_INIT;
33
34 if (hw->ps) {
35 value |= GMAC_CONFIG_TE;
36
37 value &= hw->link.speed_mask;
38 switch (hw->ps) {
39 case SPEED_1000:
40 value |= hw->link.speed1000;
41 break;
42 case SPEED_100:
43 value |= hw->link.speed100;
44 break;
45 case SPEED_10:
46 value |= hw->link.speed10;
47 break;
48 }
49 }
50
51 writel(value, ioaddr + GMAC_CONFIG);
52
53 /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
54 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
55 writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
56
57 /* Enable GMAC interrupts */
58 value = GMAC_INT_DEFAULT_ENABLE;
59
60 if (hw->pcs)
61 value |= GMAC_PCS_IRQ_DEFAULT;
62
63 writel(value, ioaddr + GMAC_INT_EN);
64
65 if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
66 init_waitqueue_head(&priv->tstamp_busy_wait);
67 }
68
dwmac4_update_caps(struct stmmac_priv * priv)69 static void dwmac4_update_caps(struct stmmac_priv *priv)
70 {
71 if (priv->plat->tx_queues_to_use > 1)
72 priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
73 else
74 priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
75 }
76
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)77 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
78 u8 mode, u32 queue)
79 {
80 void __iomem *ioaddr = hw->pcsr;
81 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
82
83 value &= GMAC_RX_QUEUE_CLEAR(queue);
84 if (mode == MTL_QUEUE_AVB)
85 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
86 else if (mode == MTL_QUEUE_DCB)
87 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
88
89 writel(value, ioaddr + GMAC_RXQ_CTRL0);
90 }
91
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)92 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
93 u32 prio, u32 queue)
94 {
95 void __iomem *ioaddr = hw->pcsr;
96 u32 clear_mask = 0;
97 u32 ctrl2, ctrl3;
98 int i;
99
100 ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
101 ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
102
103 /* The software must ensure that the same priority
104 * is not mapped to multiple Rx queues
105 */
106 for (i = 0; i < 4; i++)
107 clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
108 GMAC_RXQCTRL_PSRQX_MASK(i));
109
110 ctrl2 &= ~clear_mask;
111 ctrl3 &= ~clear_mask;
112
113 /* First assign new priorities to a queue, then
114 * clear them from others queues
115 */
116 if (queue < 4) {
117 ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
118 GMAC_RXQCTRL_PSRQX_MASK(queue);
119
120 writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
121 writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
122 } else {
123 queue -= 4;
124
125 ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
126 GMAC_RXQCTRL_PSRQX_MASK(queue);
127
128 writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
129 writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
130 }
131 }
132
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)133 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
134 u32 prio, u32 queue)
135 {
136 void __iomem *ioaddr = hw->pcsr;
137 u32 base_register;
138 u32 value;
139
140 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
141 if (queue >= 4)
142 queue -= 4;
143
144 value = readl(ioaddr + base_register);
145
146 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
147 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
148 GMAC_TXQCTRL_PSTQX_MASK(queue);
149
150 writel(value, ioaddr + base_register);
151 }
152
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)153 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
154 u8 packet, u32 queue)
155 {
156 void __iomem *ioaddr = hw->pcsr;
157 u32 value;
158
159 static const struct stmmac_rx_routing route_possibilities[] = {
160 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
161 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
162 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
163 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
164 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
165 };
166
167 value = readl(ioaddr + GMAC_RXQ_CTRL1);
168
169 /* routing configuration */
170 value &= ~route_possibilities[packet - 1].reg_mask;
171 value |= (queue << route_possibilities[packet-1].reg_shift) &
172 route_possibilities[packet - 1].reg_mask;
173
174 /* some packets require extra ops */
175 if (packet == PACKET_AVCPQ) {
176 value &= ~GMAC_RXQCTRL_TACPQE;
177 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
178 } else if (packet == PACKET_MCBCQ) {
179 value &= ~GMAC_RXQCTRL_MCBCQEN;
180 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
181 }
182
183 writel(value, ioaddr + GMAC_RXQ_CTRL1);
184 }
185
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)186 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
187 u32 rx_alg)
188 {
189 void __iomem *ioaddr = hw->pcsr;
190 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
191
192 value &= ~MTL_OPERATION_RAA;
193 switch (rx_alg) {
194 case MTL_RX_ALGORITHM_SP:
195 value |= MTL_OPERATION_RAA_SP;
196 break;
197 case MTL_RX_ALGORITHM_WSP:
198 value |= MTL_OPERATION_RAA_WSP;
199 break;
200 default:
201 break;
202 }
203
204 writel(value, ioaddr + MTL_OPERATION_MODE);
205 }
206
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)207 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
208 u32 tx_alg)
209 {
210 void __iomem *ioaddr = hw->pcsr;
211 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
212
213 value &= ~MTL_OPERATION_SCHALG_MASK;
214 switch (tx_alg) {
215 case MTL_TX_ALGORITHM_WRR:
216 value |= MTL_OPERATION_SCHALG_WRR;
217 break;
218 case MTL_TX_ALGORITHM_WFQ:
219 value |= MTL_OPERATION_SCHALG_WFQ;
220 break;
221 case MTL_TX_ALGORITHM_DWRR:
222 value |= MTL_OPERATION_SCHALG_DWRR;
223 break;
224 case MTL_TX_ALGORITHM_SP:
225 value |= MTL_OPERATION_SCHALG_SP;
226 break;
227 default:
228 break;
229 }
230
231 writel(value, ioaddr + MTL_OPERATION_MODE);
232 }
233
dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)234 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
235 struct mac_device_info *hw,
236 u32 weight, u32 queue)
237 {
238 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
239 void __iomem *ioaddr = hw->pcsr;
240 u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
241 queue));
242
243 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
244 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
245 writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
246 }
247
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)248 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
249 {
250 void __iomem *ioaddr = hw->pcsr;
251 u32 value;
252
253 if (queue < 4) {
254 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
255 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
256 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
257 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
258 } else {
259 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
260 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
261 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
262 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
263 }
264 }
265
dwmac4_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)266 static void dwmac4_config_cbs(struct stmmac_priv *priv,
267 struct mac_device_info *hw,
268 u32 send_slope, u32 idle_slope,
269 u32 high_credit, u32 low_credit, u32 queue)
270 {
271 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
272 void __iomem *ioaddr = hw->pcsr;
273 u32 value;
274
275 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
276 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
277 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
278 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
279 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
280
281 /* enable AV algorithm */
282 value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
283 value |= MTL_ETS_CTRL_AVALG;
284 value |= MTL_ETS_CTRL_CC;
285 writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
286
287 /* configure send slope */
288 value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
289 queue));
290 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
291 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
292 writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
293 queue));
294
295 /* configure idle slope (same register as tx weight) */
296 dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
297
298 /* configure high credit */
299 value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
300 value &= ~MTL_HIGH_CRED_HC_MASK;
301 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
302 writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
303
304 /* configure high credit */
305 value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
306 value &= ~MTL_HIGH_CRED_LC_MASK;
307 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
308 writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
309 }
310
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)311 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
312 {
313 void __iomem *ioaddr = hw->pcsr;
314 int i;
315
316 for (i = 0; i < GMAC_REG_NUM; i++)
317 reg_space[i] = readl(ioaddr + i * 4);
318 }
319
dwmac4_rx_ipc_enable(struct mac_device_info * hw)320 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
321 {
322 void __iomem *ioaddr = hw->pcsr;
323 u32 value = readl(ioaddr + GMAC_CONFIG);
324
325 if (hw->rx_csum)
326 value |= GMAC_CONFIG_IPC;
327 else
328 value &= ~GMAC_CONFIG_IPC;
329
330 writel(value, ioaddr + GMAC_CONFIG);
331
332 value = readl(ioaddr + GMAC_CONFIG);
333
334 return !!(value & GMAC_CONFIG_IPC);
335 }
336
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)337 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
338 {
339 void __iomem *ioaddr = hw->pcsr;
340 unsigned int pmt = 0;
341 u32 config;
342
343 if (mode & WAKE_MAGIC) {
344 pr_debug("GMAC: WOL Magic frame\n");
345 pmt |= power_down | magic_pkt_en;
346 }
347 if (mode & WAKE_UCAST) {
348 pr_debug("GMAC: WOL on global unicast\n");
349 pmt |= power_down | global_unicast | wake_up_frame_en;
350 }
351
352 if (pmt) {
353 /* The receiver must be enabled for WOL before powering down */
354 config = readl(ioaddr + GMAC_CONFIG);
355 config |= GMAC_CONFIG_RE;
356 writel(config, ioaddr + GMAC_CONFIG);
357 }
358 writel(pmt, ioaddr + GMAC_PMT);
359 }
360
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)361 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
362 const unsigned char *addr, unsigned int reg_n)
363 {
364 void __iomem *ioaddr = hw->pcsr;
365
366 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
367 GMAC_ADDR_LOW(reg_n));
368 }
369
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)370 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
371 unsigned char *addr, unsigned int reg_n)
372 {
373 void __iomem *ioaddr = hw->pcsr;
374
375 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
376 GMAC_ADDR_LOW(reg_n));
377 }
378
dwmac4_set_lpi_mode(struct mac_device_info * hw,enum stmmac_lpi_mode mode,bool en_tx_lpi_clockgating,u32 et)379 static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
380 enum stmmac_lpi_mode mode,
381 bool en_tx_lpi_clockgating, u32 et)
382 {
383 void __iomem *ioaddr = hw->pcsr;
384 u32 value, mask;
385
386 if (mode == STMMAC_LPI_DISABLE) {
387 value = 0;
388 } else {
389 value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
390
391 if (mode == STMMAC_LPI_TIMER) {
392 /* Return ERANGE if the timer is larger than the
393 * register field.
394 */
395 if (et > STMMAC_ET_MAX)
396 return -ERANGE;
397
398 /* Set the hardware LPI entry timer */
399 writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
400
401 /* Interpret a zero LPI entry timer to mean
402 * immediate entry into LPI mode.
403 */
404 if (et)
405 value |= LPI_CTRL_STATUS_LPIATE;
406 }
407
408 if (en_tx_lpi_clockgating)
409 value |= LPI_CTRL_STATUS_LPITCSE;
410 }
411
412 mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
413 LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
414
415 value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
416 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
417
418 return 0;
419 }
420
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)421 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
422 {
423 void __iomem *ioaddr = hw->pcsr;
424 u32 value;
425
426 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
427
428 if (link)
429 value |= LPI_CTRL_STATUS_PLS;
430 else
431 value &= ~LPI_CTRL_STATUS_PLS;
432
433 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
434 }
435
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)436 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
437 {
438 void __iomem *ioaddr = hw->pcsr;
439 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
440
441 /* Program the timers in the LPI timer control register:
442 * LS: minimum time (ms) for which the link
443 * status from PHY should be ok before transmitting
444 * the LPI pattern.
445 * TW: minimum time (us) for which the core waits
446 * after it has stopped transmitting the LPI pattern.
447 */
448 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
449 }
450
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)451 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
452 {
453 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
454 u32 val;
455
456 val = readl(ioaddr + GMAC_VLAN_TAG);
457 val &= ~GMAC_VLAN_TAG_VID;
458 val |= GMAC_VLAN_TAG_ETV | vid;
459
460 writel(val, ioaddr + GMAC_VLAN_TAG);
461 }
462
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)463 static int dwmac4_write_vlan_filter(struct net_device *dev,
464 struct mac_device_info *hw,
465 u8 index, u32 data)
466 {
467 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
468 int ret;
469 u32 val;
470
471 if (index >= hw->num_vlan)
472 return -EINVAL;
473
474 writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
475
476 val = readl(ioaddr + GMAC_VLAN_TAG);
477 val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
478 GMAC_VLAN_TAG_CTRL_CT |
479 GMAC_VLAN_TAG_CTRL_OB);
480 val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
481
482 writel(val, ioaddr + GMAC_VLAN_TAG);
483
484 ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
485 !(val & GMAC_VLAN_TAG_CTRL_OB),
486 1000, 500000);
487 if (ret) {
488 netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
489 return -EBUSY;
490 }
491
492 return 0;
493 }
494
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)495 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
496 struct mac_device_info *hw,
497 __be16 proto, u16 vid)
498 {
499 int index = -1;
500 u32 val = 0;
501 int i, ret;
502
503 if (vid > 4095)
504 return -EINVAL;
505
506 /* Single Rx VLAN Filter */
507 if (hw->num_vlan == 1) {
508 /* For single VLAN filter, VID 0 means VLAN promiscuous */
509 if (vid == 0) {
510 netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
511 return -EPERM;
512 }
513
514 if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
515 netdev_err(dev, "Only single VLAN ID supported\n");
516 return -EPERM;
517 }
518
519 hw->vlan_filter[0] = vid;
520 dwmac4_write_single_vlan(dev, vid);
521
522 return 0;
523 }
524
525 /* Extended Rx VLAN Filter Enable */
526 val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
527
528 for (i = 0; i < hw->num_vlan; i++) {
529 if (hw->vlan_filter[i] == val)
530 return 0;
531 else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
532 index = i;
533 }
534
535 if (index == -1) {
536 netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
537 hw->num_vlan);
538 return -EPERM;
539 }
540
541 ret = dwmac4_write_vlan_filter(dev, hw, index, val);
542
543 if (!ret)
544 hw->vlan_filter[index] = val;
545
546 return ret;
547 }
548
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)549 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
550 struct mac_device_info *hw,
551 __be16 proto, u16 vid)
552 {
553 int i, ret = 0;
554
555 /* Single Rx VLAN Filter */
556 if (hw->num_vlan == 1) {
557 if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
558 hw->vlan_filter[0] = 0;
559 dwmac4_write_single_vlan(dev, 0);
560 }
561 return 0;
562 }
563
564 /* Extended Rx VLAN Filter Enable */
565 for (i = 0; i < hw->num_vlan; i++) {
566 if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
567 ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
568
569 if (!ret)
570 hw->vlan_filter[i] = 0;
571 else
572 return ret;
573 }
574 }
575
576 return ret;
577 }
578
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)579 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
580 struct mac_device_info *hw)
581 {
582 void __iomem *ioaddr = hw->pcsr;
583 u32 value;
584 u32 hash;
585 u32 val;
586 int i;
587
588 /* Single Rx VLAN Filter */
589 if (hw->num_vlan == 1) {
590 dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
591 return;
592 }
593
594 /* Extended Rx VLAN Filter Enable */
595 for (i = 0; i < hw->num_vlan; i++) {
596 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
597 val = hw->vlan_filter[i];
598 dwmac4_write_vlan_filter(dev, hw, i, val);
599 }
600 }
601
602 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
603 if (hash & GMAC_VLAN_VLHT) {
604 value = readl(ioaddr + GMAC_VLAN_TAG);
605 value |= GMAC_VLAN_VTHM;
606 writel(value, ioaddr + GMAC_VLAN_TAG);
607 }
608 }
609
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)610 static void dwmac4_set_filter(struct mac_device_info *hw,
611 struct net_device *dev)
612 {
613 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
614 int numhashregs = (hw->multicast_filter_bins >> 5);
615 int mcbitslog2 = hw->mcast_bits_log2;
616 unsigned int value;
617 u32 mc_filter[8];
618 int i;
619
620 memset(mc_filter, 0, sizeof(mc_filter));
621
622 value = readl(ioaddr + GMAC_PACKET_FILTER);
623 value &= ~GMAC_PACKET_FILTER_HMC;
624 value &= ~GMAC_PACKET_FILTER_HPF;
625 value &= ~GMAC_PACKET_FILTER_PCF;
626 value &= ~GMAC_PACKET_FILTER_PM;
627 value &= ~GMAC_PACKET_FILTER_PR;
628 value &= ~GMAC_PACKET_FILTER_RA;
629 if (dev->flags & IFF_PROMISC) {
630 /* VLAN Tag Filter Fail Packets Queuing */
631 if (hw->vlan_fail_q_en) {
632 value = readl(ioaddr + GMAC_RXQ_CTRL4);
633 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
634 value |= GMAC_RXQCTRL_VFFQE |
635 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
636 writel(value, ioaddr + GMAC_RXQ_CTRL4);
637 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
638 } else {
639 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
640 }
641
642 } else if ((dev->flags & IFF_ALLMULTI) ||
643 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
644 /* Pass all multi */
645 value |= GMAC_PACKET_FILTER_PM;
646 /* Set all the bits of the HASH tab */
647 memset(mc_filter, 0xff, sizeof(mc_filter));
648 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
649 struct netdev_hw_addr *ha;
650
651 /* Hash filter for multicast */
652 value |= GMAC_PACKET_FILTER_HMC;
653
654 netdev_for_each_mc_addr(ha, dev) {
655 /* The upper n bits of the calculated CRC are used to
656 * index the contents of the hash table. The number of
657 * bits used depends on the hardware configuration
658 * selected at core configuration time.
659 */
660 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
661 ETH_ALEN)) >> (32 - mcbitslog2);
662 /* The most significant bit determines the register to
663 * use (H/L) while the other 5 bits determine the bit
664 * within the register.
665 */
666 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
667 }
668 }
669
670 for (i = 0; i < numhashregs; i++)
671 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
672
673 value |= GMAC_PACKET_FILTER_HPF;
674
675 /* Handle multiple unicast addresses */
676 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
677 /* Switch to promiscuous mode if more than 128 addrs
678 * are required
679 */
680 value |= GMAC_PACKET_FILTER_PR;
681 } else {
682 struct netdev_hw_addr *ha;
683 int reg = 1;
684
685 netdev_for_each_uc_addr(ha, dev) {
686 dwmac4_set_umac_addr(hw, ha->addr, reg);
687 reg++;
688 }
689
690 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
691 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
692 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
693 reg++;
694 }
695 }
696
697 /* VLAN filtering */
698 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
699 value &= ~GMAC_PACKET_FILTER_VTFE;
700 else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
701 value |= GMAC_PACKET_FILTER_VTFE;
702
703 writel(value, ioaddr + GMAC_PACKET_FILTER);
704 }
705
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)706 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
707 unsigned int fc, unsigned int pause_time,
708 u32 tx_cnt)
709 {
710 void __iomem *ioaddr = hw->pcsr;
711 unsigned int flow = 0;
712 u32 queue = 0;
713
714 pr_debug("GMAC Flow-Control:\n");
715 if (fc & FLOW_RX) {
716 pr_debug("\tReceive Flow-Control ON\n");
717 flow |= GMAC_RX_FLOW_CTRL_RFE;
718 } else {
719 pr_debug("\tReceive Flow-Control OFF\n");
720 }
721 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
722
723 if (fc & FLOW_TX) {
724 pr_debug("\tTransmit Flow-Control ON\n");
725
726 if (duplex)
727 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
728
729 for (queue = 0; queue < tx_cnt; queue++) {
730 flow = GMAC_TX_FLOW_CTRL_TFE;
731
732 if (duplex)
733 flow |=
734 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
735
736 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
737 }
738 } else {
739 for (queue = 0; queue < tx_cnt; queue++)
740 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
741 }
742 }
743
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)744 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
745 bool loopback)
746 {
747 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
748 }
749
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)750 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
751 {
752 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
753 }
754
755 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)756 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
757 {
758 u32 status;
759
760 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
761 x->irq_rgmii_n++;
762
763 /* Check the link status */
764 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
765 int speed_value;
766
767 x->pcs_link = 1;
768
769 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
770 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
771 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
772 x->pcs_speed = SPEED_1000;
773 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
774 x->pcs_speed = SPEED_100;
775 else
776 x->pcs_speed = SPEED_10;
777
778 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD);
779
780 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
781 x->pcs_duplex ? "Full" : "Half");
782 } else {
783 x->pcs_link = 0;
784 pr_info("Link is Down\n");
785 }
786 }
787
dwmac4_irq_mtl_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)788 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
789 struct mac_device_info *hw, u32 chan)
790 {
791 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
792 void __iomem *ioaddr = hw->pcsr;
793 u32 mtl_int_qx_status;
794 int ret = 0;
795
796 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
797
798 /* Check MTL Interrupt */
799 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
800 /* read Queue x Interrupt status */
801 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
802 chan));
803
804 if (status & MTL_RX_OVERFLOW_INT) {
805 /* clear Interrupt */
806 writel(status | MTL_RX_OVERFLOW_INT,
807 ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
808 ret = CORE_IRQ_MTL_RX_OVERFLOW;
809 }
810 }
811
812 return ret;
813 }
814
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)815 static int dwmac4_irq_status(struct mac_device_info *hw,
816 struct stmmac_extra_stats *x)
817 {
818 void __iomem *ioaddr = hw->pcsr;
819 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
820 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
821 int ret = 0;
822
823 /* Discard disabled bits */
824 intr_status &= intr_enable;
825
826 /* Not used events (e.g. MMC interrupts) are not handled. */
827 if ((intr_status & mmc_tx_irq))
828 x->mmc_tx_irq_n++;
829 if (unlikely(intr_status & mmc_rx_irq))
830 x->mmc_rx_irq_n++;
831 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
832 x->mmc_rx_csum_offload_irq_n++;
833 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
834 if (unlikely(intr_status & pmt_irq)) {
835 readl(ioaddr + GMAC_PMT);
836 x->irq_receive_pmt_irq_n++;
837 }
838
839 /* MAC tx/rx EEE LPI entry/exit interrupts */
840 if (intr_status & lpi_irq) {
841 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
842 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
843
844 if (status & LPI_CTRL_STATUS_TLPIEN) {
845 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
846 x->irq_tx_path_in_lpi_mode_n++;
847 }
848 if (status & LPI_CTRL_STATUS_TLPIEX) {
849 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
850 x->irq_tx_path_exit_lpi_mode_n++;
851 }
852 if (status & LPI_CTRL_STATUS_RLPIEN)
853 x->irq_rx_path_in_lpi_mode_n++;
854 if (status & LPI_CTRL_STATUS_RLPIEX)
855 x->irq_rx_path_exit_lpi_mode_n++;
856 }
857
858 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
859 if (intr_status & PCS_RGSMIIIS_IRQ)
860 dwmac4_phystatus(ioaddr, x);
861
862 return ret;
863 }
864
dwmac4_debug(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)865 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
866 struct stmmac_extra_stats *x,
867 u32 rx_queues, u32 tx_queues)
868 {
869 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
870 u32 value;
871 u32 queue;
872
873 for (queue = 0; queue < tx_queues; queue++) {
874 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
875
876 if (value & MTL_DEBUG_TXSTSFSTS)
877 x->mtl_tx_status_fifo_full++;
878 if (value & MTL_DEBUG_TXFSTS)
879 x->mtl_tx_fifo_not_empty++;
880 if (value & MTL_DEBUG_TWCSTS)
881 x->mmtl_fifo_ctrl++;
882 if (value & MTL_DEBUG_TRCSTS_MASK) {
883 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
884 >> MTL_DEBUG_TRCSTS_SHIFT;
885 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
886 x->mtl_tx_fifo_read_ctrl_write++;
887 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
888 x->mtl_tx_fifo_read_ctrl_wait++;
889 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
890 x->mtl_tx_fifo_read_ctrl_read++;
891 else
892 x->mtl_tx_fifo_read_ctrl_idle++;
893 }
894 if (value & MTL_DEBUG_TXPAUSED)
895 x->mac_tx_in_pause++;
896 }
897
898 for (queue = 0; queue < rx_queues; queue++) {
899 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
900
901 if (value & MTL_DEBUG_RXFSTS_MASK) {
902 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
903 >> MTL_DEBUG_RRCSTS_SHIFT;
904
905 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
906 x->mtl_rx_fifo_fill_level_full++;
907 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
908 x->mtl_rx_fifo_fill_above_thresh++;
909 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
910 x->mtl_rx_fifo_fill_below_thresh++;
911 else
912 x->mtl_rx_fifo_fill_level_empty++;
913 }
914 if (value & MTL_DEBUG_RRCSTS_MASK) {
915 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
916 MTL_DEBUG_RRCSTS_SHIFT;
917
918 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
919 x->mtl_rx_fifo_read_ctrl_flush++;
920 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
921 x->mtl_rx_fifo_read_ctrl_read_data++;
922 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
923 x->mtl_rx_fifo_read_ctrl_status++;
924 else
925 x->mtl_rx_fifo_read_ctrl_idle++;
926 }
927 if (value & MTL_DEBUG_RWCSTS)
928 x->mtl_rx_fifo_ctrl_active++;
929 }
930
931 /* GMAC debug */
932 value = readl(ioaddr + GMAC_DEBUG);
933
934 if (value & GMAC_DEBUG_TFCSTS_MASK) {
935 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
936 >> GMAC_DEBUG_TFCSTS_SHIFT;
937
938 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
939 x->mac_tx_frame_ctrl_xfer++;
940 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
941 x->mac_tx_frame_ctrl_pause++;
942 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
943 x->mac_tx_frame_ctrl_wait++;
944 else
945 x->mac_tx_frame_ctrl_idle++;
946 }
947 if (value & GMAC_DEBUG_TPESTS)
948 x->mac_gmii_tx_proto_engine++;
949 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
950 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
951 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
952 if (value & GMAC_DEBUG_RPESTS)
953 x->mac_gmii_rx_proto_engine++;
954 }
955
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)956 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
957 {
958 u32 value = readl(ioaddr + GMAC_CONFIG);
959
960 if (enable)
961 value |= GMAC_CONFIG_LM;
962 else
963 value &= ~GMAC_CONFIG_LM;
964
965 writel(value, ioaddr + GMAC_CONFIG);
966 }
967
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,u16 perfect_match,bool is_double)968 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
969 u16 perfect_match, bool is_double)
970 {
971 void __iomem *ioaddr = hw->pcsr;
972 u32 value;
973
974 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
975
976 value = readl(ioaddr + GMAC_VLAN_TAG);
977
978 if (hash) {
979 value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
980 if (is_double) {
981 value |= GMAC_VLAN_EDVLP;
982 value |= GMAC_VLAN_ESVL;
983 value |= GMAC_VLAN_DOVLTC;
984 }
985
986 writel(value, ioaddr + GMAC_VLAN_TAG);
987 } else if (perfect_match) {
988 u32 value = GMAC_VLAN_ETV;
989
990 if (is_double) {
991 value |= GMAC_VLAN_EDVLP;
992 value |= GMAC_VLAN_ESVL;
993 value |= GMAC_VLAN_DOVLTC;
994 }
995
996 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
997 } else {
998 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
999 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1000 value &= ~GMAC_VLAN_DOVLTC;
1001 value &= ~GMAC_VLAN_VID;
1002
1003 writel(value, ioaddr + GMAC_VLAN_TAG);
1004 }
1005 }
1006
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1007 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1008 {
1009 u32 value = readl(ioaddr + GMAC_CONFIG);
1010
1011 value &= ~GMAC_CONFIG_SARC;
1012 value |= val << GMAC_CONFIG_SARC_SHIFT;
1013
1014 writel(value, ioaddr + GMAC_CONFIG);
1015 }
1016
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1017 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1018 {
1019 void __iomem *ioaddr = hw->pcsr;
1020 u32 value;
1021
1022 value = readl(ioaddr + GMAC_VLAN_INCL);
1023 value |= GMAC_VLAN_VLTI;
1024 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1025 value &= ~GMAC_VLAN_VLC;
1026 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1027 writel(value, ioaddr + GMAC_VLAN_INCL);
1028 }
1029
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1030 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1031 u32 addr)
1032 {
1033 void __iomem *ioaddr = hw->pcsr;
1034 u32 value;
1035
1036 writel(addr, ioaddr + GMAC_ARP_ADDR);
1037
1038 value = readl(ioaddr + GMAC_CONFIG);
1039 if (en)
1040 value |= GMAC_CONFIG_ARPEN;
1041 else
1042 value &= ~GMAC_CONFIG_ARPEN;
1043 writel(value, ioaddr + GMAC_CONFIG);
1044 }
1045
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1046 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1047 bool en, bool ipv6, bool sa, bool inv,
1048 u32 match)
1049 {
1050 void __iomem *ioaddr = hw->pcsr;
1051 u32 value;
1052
1053 value = readl(ioaddr + GMAC_PACKET_FILTER);
1054 value |= GMAC_PACKET_FILTER_IPFE;
1055 writel(value, ioaddr + GMAC_PACKET_FILTER);
1056
1057 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1058
1059 /* For IPv6 not both SA/DA filters can be active */
1060 if (ipv6) {
1061 value |= GMAC_L3PEN0;
1062 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1063 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1064 if (sa) {
1065 value |= GMAC_L3SAM0;
1066 if (inv)
1067 value |= GMAC_L3SAIM0;
1068 } else {
1069 value |= GMAC_L3DAM0;
1070 if (inv)
1071 value |= GMAC_L3DAIM0;
1072 }
1073 } else {
1074 value &= ~GMAC_L3PEN0;
1075 if (sa) {
1076 value |= GMAC_L3SAM0;
1077 if (inv)
1078 value |= GMAC_L3SAIM0;
1079 } else {
1080 value |= GMAC_L3DAM0;
1081 if (inv)
1082 value |= GMAC_L3DAIM0;
1083 }
1084 }
1085
1086 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1087
1088 if (sa) {
1089 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1090 } else {
1091 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1092 }
1093
1094 if (!en)
1095 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1096
1097 return 0;
1098 }
1099
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1100 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1101 bool en, bool udp, bool sa, bool inv,
1102 u32 match)
1103 {
1104 void __iomem *ioaddr = hw->pcsr;
1105 u32 value;
1106
1107 value = readl(ioaddr + GMAC_PACKET_FILTER);
1108 value |= GMAC_PACKET_FILTER_IPFE;
1109 writel(value, ioaddr + GMAC_PACKET_FILTER);
1110
1111 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1112 if (udp) {
1113 value |= GMAC_L4PEN0;
1114 } else {
1115 value &= ~GMAC_L4PEN0;
1116 }
1117
1118 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1119 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1120 if (sa) {
1121 value |= GMAC_L4SPM0;
1122 if (inv)
1123 value |= GMAC_L4SPIM0;
1124 } else {
1125 value |= GMAC_L4DPM0;
1126 if (inv)
1127 value |= GMAC_L4DPIM0;
1128 }
1129
1130 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1131
1132 if (sa) {
1133 value = match & GMAC_L4SP0;
1134 } else {
1135 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1136 }
1137
1138 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1139
1140 if (!en)
1141 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1142
1143 return 0;
1144 }
1145
dwmac4_rx_hw_vlan(struct mac_device_info * hw,struct dma_desc * rx_desc,struct sk_buff * skb)1146 static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
1147 struct dma_desc *rx_desc, struct sk_buff *skb)
1148 {
1149 if (hw->desc->get_rx_vlan_valid(rx_desc)) {
1150 u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
1151
1152 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1153 }
1154 }
1155
dwmac4_set_hw_vlan_mode(struct mac_device_info * hw)1156 static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
1157 {
1158 void __iomem *ioaddr = hw->pcsr;
1159 u32 value = readl(ioaddr + GMAC_VLAN_TAG);
1160
1161 value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
1162
1163 if (hw->hw_vlan_en)
1164 /* Always strip VLAN on Receive */
1165 value |= GMAC_VLAN_TAG_STRIP_ALL;
1166 else
1167 /* Do not strip VLAN on Receive */
1168 value |= GMAC_VLAN_TAG_STRIP_NONE;
1169
1170 /* Enable outer VLAN Tag in Rx DMA descriptor */
1171 value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
1172 writel(value, ioaddr + GMAC_VLAN_TAG);
1173 }
1174
1175 const struct stmmac_ops dwmac4_ops = {
1176 .core_init = dwmac4_core_init,
1177 .update_caps = dwmac4_update_caps,
1178 .set_mac = stmmac_set_mac,
1179 .rx_ipc = dwmac4_rx_ipc_enable,
1180 .rx_queue_enable = dwmac4_rx_queue_enable,
1181 .rx_queue_prio = dwmac4_rx_queue_priority,
1182 .tx_queue_prio = dwmac4_tx_queue_priority,
1183 .rx_queue_routing = dwmac4_rx_queue_routing,
1184 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1185 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1186 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1187 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1188 .config_cbs = dwmac4_config_cbs,
1189 .dump_regs = dwmac4_dump_regs,
1190 .host_irq_status = dwmac4_irq_status,
1191 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1192 .flow_ctrl = dwmac4_flow_ctrl,
1193 .pmt = dwmac4_pmt,
1194 .set_umac_addr = dwmac4_set_umac_addr,
1195 .get_umac_addr = dwmac4_get_umac_addr,
1196 .set_lpi_mode = dwmac4_set_lpi_mode,
1197 .set_eee_timer = dwmac4_set_eee_timer,
1198 .set_eee_pls = dwmac4_set_eee_pls,
1199 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1200 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1201 .debug = dwmac4_debug,
1202 .set_filter = dwmac4_set_filter,
1203 .set_mac_loopback = dwmac4_set_mac_loopback,
1204 .update_vlan_hash = dwmac4_update_vlan_hash,
1205 .sarc_configure = dwmac4_sarc_configure,
1206 .enable_vlan = dwmac4_enable_vlan,
1207 .set_arp_offload = dwmac4_set_arp_offload,
1208 .config_l3_filter = dwmac4_config_l3_filter,
1209 .config_l4_filter = dwmac4_config_l4_filter,
1210 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1211 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1212 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1213 .rx_hw_vlan = dwmac4_rx_hw_vlan,
1214 .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
1215 };
1216
1217 const struct stmmac_ops dwmac410_ops = {
1218 .core_init = dwmac4_core_init,
1219 .update_caps = dwmac4_update_caps,
1220 .set_mac = stmmac_dwmac4_set_mac,
1221 .rx_ipc = dwmac4_rx_ipc_enable,
1222 .rx_queue_enable = dwmac4_rx_queue_enable,
1223 .rx_queue_prio = dwmac4_rx_queue_priority,
1224 .tx_queue_prio = dwmac4_tx_queue_priority,
1225 .rx_queue_routing = dwmac4_rx_queue_routing,
1226 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1227 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1228 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1229 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1230 .config_cbs = dwmac4_config_cbs,
1231 .dump_regs = dwmac4_dump_regs,
1232 .host_irq_status = dwmac4_irq_status,
1233 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1234 .flow_ctrl = dwmac4_flow_ctrl,
1235 .pmt = dwmac4_pmt,
1236 .set_umac_addr = dwmac4_set_umac_addr,
1237 .get_umac_addr = dwmac4_get_umac_addr,
1238 .set_lpi_mode = dwmac4_set_lpi_mode,
1239 .set_eee_timer = dwmac4_set_eee_timer,
1240 .set_eee_pls = dwmac4_set_eee_pls,
1241 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1242 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1243 .debug = dwmac4_debug,
1244 .set_filter = dwmac4_set_filter,
1245 .flex_pps_config = dwmac5_flex_pps_config,
1246 .set_mac_loopback = dwmac4_set_mac_loopback,
1247 .update_vlan_hash = dwmac4_update_vlan_hash,
1248 .sarc_configure = dwmac4_sarc_configure,
1249 .enable_vlan = dwmac4_enable_vlan,
1250 .set_arp_offload = dwmac4_set_arp_offload,
1251 .config_l3_filter = dwmac4_config_l3_filter,
1252 .config_l4_filter = dwmac4_config_l4_filter,
1253 .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1254 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1255 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1256 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1257 .rx_hw_vlan = dwmac4_rx_hw_vlan,
1258 .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
1259 };
1260
1261 const struct stmmac_ops dwmac510_ops = {
1262 .core_init = dwmac4_core_init,
1263 .update_caps = dwmac4_update_caps,
1264 .set_mac = stmmac_dwmac4_set_mac,
1265 .rx_ipc = dwmac4_rx_ipc_enable,
1266 .rx_queue_enable = dwmac4_rx_queue_enable,
1267 .rx_queue_prio = dwmac4_rx_queue_priority,
1268 .tx_queue_prio = dwmac4_tx_queue_priority,
1269 .rx_queue_routing = dwmac4_rx_queue_routing,
1270 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1271 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1272 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1273 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1274 .config_cbs = dwmac4_config_cbs,
1275 .dump_regs = dwmac4_dump_regs,
1276 .host_irq_status = dwmac4_irq_status,
1277 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1278 .flow_ctrl = dwmac4_flow_ctrl,
1279 .pmt = dwmac4_pmt,
1280 .set_umac_addr = dwmac4_set_umac_addr,
1281 .get_umac_addr = dwmac4_get_umac_addr,
1282 .set_lpi_mode = dwmac4_set_lpi_mode,
1283 .set_eee_timer = dwmac4_set_eee_timer,
1284 .set_eee_pls = dwmac4_set_eee_pls,
1285 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1286 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1287 .debug = dwmac4_debug,
1288 .set_filter = dwmac4_set_filter,
1289 .safety_feat_config = dwmac5_safety_feat_config,
1290 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1291 .safety_feat_dump = dwmac5_safety_feat_dump,
1292 .rxp_config = dwmac5_rxp_config,
1293 .flex_pps_config = dwmac5_flex_pps_config,
1294 .set_mac_loopback = dwmac4_set_mac_loopback,
1295 .update_vlan_hash = dwmac4_update_vlan_hash,
1296 .sarc_configure = dwmac4_sarc_configure,
1297 .enable_vlan = dwmac4_enable_vlan,
1298 .set_arp_offload = dwmac4_set_arp_offload,
1299 .config_l3_filter = dwmac4_config_l3_filter,
1300 .config_l4_filter = dwmac4_config_l4_filter,
1301 .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1302 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1303 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1304 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1305 .rx_hw_vlan = dwmac4_rx_hw_vlan,
1306 .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
1307 };
1308
dwmac4_get_num_vlan(void __iomem * ioaddr)1309 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1310 {
1311 u32 val, num_vlan;
1312
1313 val = readl(ioaddr + GMAC_HW_FEATURE3);
1314 switch (val & GMAC_HW_FEAT_NRVF) {
1315 case 0:
1316 num_vlan = 1;
1317 break;
1318 case 1:
1319 num_vlan = 4;
1320 break;
1321 case 2:
1322 num_vlan = 8;
1323 break;
1324 case 3:
1325 num_vlan = 16;
1326 break;
1327 case 4:
1328 num_vlan = 24;
1329 break;
1330 case 5:
1331 num_vlan = 32;
1332 break;
1333 default:
1334 num_vlan = 1;
1335 }
1336
1337 return num_vlan;
1338 }
1339
dwmac4_setup(struct stmmac_priv * priv)1340 int dwmac4_setup(struct stmmac_priv *priv)
1341 {
1342 struct mac_device_info *mac = priv->hw;
1343
1344 dev_info(priv->device, "\tDWMAC4/5\n");
1345
1346 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1347 mac->pcsr = priv->ioaddr;
1348 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1349 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1350 mac->mcast_bits_log2 = 0;
1351
1352 if (mac->multicast_filter_bins)
1353 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1354
1355 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1356 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
1357 mac->link.duplex = GMAC_CONFIG_DM;
1358 mac->link.speed10 = GMAC_CONFIG_PS;
1359 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1360 mac->link.speed1000 = 0;
1361 mac->link.speed2500 = GMAC_CONFIG_FES;
1362 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1363 mac->mii.addr = GMAC_MDIO_ADDR;
1364 mac->mii.data = GMAC_MDIO_DATA;
1365 mac->mii.addr_shift = 21;
1366 mac->mii.addr_mask = GENMASK(25, 21);
1367 mac->mii.reg_shift = 16;
1368 mac->mii.reg_mask = GENMASK(20, 16);
1369 mac->mii.clk_csr_shift = 8;
1370 mac->mii.clk_csr_mask = GENMASK(11, 8);
1371 mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1372
1373 return 0;
1374 }
1375