xref: /linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision a0b0f6c7d7f29f1ade9ec59699d02e3b153ee8e4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 #include <linux/genalloc.h>
31 
32 #include "mtk_eth_soc.h"
33 #include "mtk_wed.h"
34 
35 static int mtk_msg_level = -1;
36 module_param_named(msg_level, mtk_msg_level, int, 0);
37 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
38 
39 #define MTK_ETHTOOL_STAT(x) { #x, \
40 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
41 
42 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
43 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
44 				  sizeof(u64) }
45 
46 static const struct mtk_reg_map mtk_reg_map = {
47 	.tx_irq_mask		= 0x1a1c,
48 	.tx_irq_status		= 0x1a18,
49 	.pdma = {
50 		.rx_ptr		= 0x0900,
51 		.rx_cnt_cfg	= 0x0904,
52 		.pcrx_ptr	= 0x0908,
53 		.glo_cfg	= 0x0a04,
54 		.rst_idx	= 0x0a08,
55 		.delay_irq	= 0x0a0c,
56 		.irq_status	= 0x0a20,
57 		.irq_mask	= 0x0a28,
58 		.adma_rx_dbg0	= 0x0a38,
59 		.int_grp	= 0x0a50,
60 	},
61 	.qdma = {
62 		.qtx_cfg	= 0x1800,
63 		.qtx_sch	= 0x1804,
64 		.rx_ptr		= 0x1900,
65 		.rx_cnt_cfg	= 0x1904,
66 		.qcrx_ptr	= 0x1908,
67 		.glo_cfg	= 0x1a04,
68 		.rst_idx	= 0x1a08,
69 		.delay_irq	= 0x1a0c,
70 		.fc_th		= 0x1a10,
71 		.tx_sch_rate	= 0x1a14,
72 		.int_grp	= 0x1a20,
73 		.hred		= 0x1a44,
74 		.ctx_ptr	= 0x1b00,
75 		.dtx_ptr	= 0x1b04,
76 		.crx_ptr	= 0x1b10,
77 		.drx_ptr	= 0x1b14,
78 		.fq_head	= 0x1b20,
79 		.fq_tail	= 0x1b24,
80 		.fq_count	= 0x1b28,
81 		.fq_blen	= 0x1b2c,
82 	},
83 	.gdm1_cnt		= 0x2400,
84 	.gdma_to_ppe	= {
85 		[0]		= 0x4444,
86 	},
87 	.ppe_base		= 0x0c00,
88 	.wdma_base = {
89 		[0]		= 0x2800,
90 		[1]		= 0x2c00,
91 	},
92 	.pse_iq_sta		= 0x0110,
93 	.pse_oq_sta		= 0x0118,
94 };
95 
96 static const struct mtk_reg_map mt7628_reg_map = {
97 	.tx_irq_mask		= 0x0a28,
98 	.tx_irq_status		= 0x0a20,
99 	.pdma = {
100 		.rx_ptr		= 0x0900,
101 		.rx_cnt_cfg	= 0x0904,
102 		.pcrx_ptr	= 0x0908,
103 		.glo_cfg	= 0x0a04,
104 		.rst_idx	= 0x0a08,
105 		.delay_irq	= 0x0a0c,
106 		.irq_status	= 0x0a20,
107 		.irq_mask	= 0x0a28,
108 		.int_grp	= 0x0a50,
109 	},
110 };
111 
112 static const struct mtk_reg_map mt7986_reg_map = {
113 	.tx_irq_mask		= 0x461c,
114 	.tx_irq_status		= 0x4618,
115 	.pdma = {
116 		.rx_ptr		= 0x4100,
117 		.rx_cnt_cfg	= 0x4104,
118 		.pcrx_ptr	= 0x4108,
119 		.glo_cfg	= 0x4204,
120 		.rst_idx	= 0x4208,
121 		.delay_irq	= 0x420c,
122 		.irq_status	= 0x4220,
123 		.irq_mask	= 0x4228,
124 		.adma_rx_dbg0	= 0x4238,
125 		.int_grp	= 0x4250,
126 	},
127 	.qdma = {
128 		.qtx_cfg	= 0x4400,
129 		.qtx_sch	= 0x4404,
130 		.rx_ptr		= 0x4500,
131 		.rx_cnt_cfg	= 0x4504,
132 		.qcrx_ptr	= 0x4508,
133 		.glo_cfg	= 0x4604,
134 		.rst_idx	= 0x4608,
135 		.delay_irq	= 0x460c,
136 		.fc_th		= 0x4610,
137 		.int_grp	= 0x4620,
138 		.hred		= 0x4644,
139 		.ctx_ptr	= 0x4700,
140 		.dtx_ptr	= 0x4704,
141 		.crx_ptr	= 0x4710,
142 		.drx_ptr	= 0x4714,
143 		.fq_head	= 0x4720,
144 		.fq_tail	= 0x4724,
145 		.fq_count	= 0x4728,
146 		.fq_blen	= 0x472c,
147 		.tx_sch_rate	= 0x4798,
148 	},
149 	.gdm1_cnt		= 0x1c00,
150 	.gdma_to_ppe	= {
151 		[0]		= 0x3333,
152 		[1]		= 0x4444,
153 	},
154 	.ppe_base		= 0x2000,
155 	.wdma_base = {
156 		[0]		= 0x4800,
157 		[1]		= 0x4c00,
158 	},
159 	.pse_iq_sta		= 0x0180,
160 	.pse_oq_sta		= 0x01a0,
161 };
162 
163 static const struct mtk_reg_map mt7988_reg_map = {
164 	.tx_irq_mask		= 0x461c,
165 	.tx_irq_status		= 0x4618,
166 	.pdma = {
167 		.rx_ptr		= 0x6900,
168 		.rx_cnt_cfg	= 0x6904,
169 		.pcrx_ptr	= 0x6908,
170 		.glo_cfg	= 0x6a04,
171 		.rst_idx	= 0x6a08,
172 		.delay_irq	= 0x6a0c,
173 		.irq_status	= 0x6a20,
174 		.irq_mask	= 0x6a28,
175 		.adma_rx_dbg0	= 0x6a38,
176 		.int_grp	= 0x6a50,
177 	},
178 	.qdma = {
179 		.qtx_cfg	= 0x4400,
180 		.qtx_sch	= 0x4404,
181 		.rx_ptr		= 0x4500,
182 		.rx_cnt_cfg	= 0x4504,
183 		.qcrx_ptr	= 0x4508,
184 		.glo_cfg	= 0x4604,
185 		.rst_idx	= 0x4608,
186 		.delay_irq	= 0x460c,
187 		.fc_th		= 0x4610,
188 		.int_grp	= 0x4620,
189 		.hred		= 0x4644,
190 		.ctx_ptr	= 0x4700,
191 		.dtx_ptr	= 0x4704,
192 		.crx_ptr	= 0x4710,
193 		.drx_ptr	= 0x4714,
194 		.fq_head	= 0x4720,
195 		.fq_tail	= 0x4724,
196 		.fq_count	= 0x4728,
197 		.fq_blen	= 0x472c,
198 		.tx_sch_rate	= 0x4798,
199 	},
200 	.gdm1_cnt		= 0x1c00,
201 	.gdma_to_ppe	= {
202 		[0]		= 0x3333,
203 		[1]		= 0x4444,
204 		[2]		= 0xcccc,
205 	},
206 	.ppe_base		= 0x2000,
207 	.wdma_base = {
208 		[0]		= 0x4800,
209 		[1]		= 0x4c00,
210 		[2]		= 0x5000,
211 	},
212 	.pse_iq_sta		= 0x0180,
213 	.pse_oq_sta		= 0x01a0,
214 };
215 
216 /* strings used by ethtool */
217 static const struct mtk_ethtool_stats {
218 	char str[ETH_GSTRING_LEN];
219 	u32 offset;
220 } mtk_ethtool_stats[] = {
221 	MTK_ETHTOOL_STAT(tx_bytes),
222 	MTK_ETHTOOL_STAT(tx_packets),
223 	MTK_ETHTOOL_STAT(tx_skip),
224 	MTK_ETHTOOL_STAT(tx_collisions),
225 	MTK_ETHTOOL_STAT(rx_bytes),
226 	MTK_ETHTOOL_STAT(rx_packets),
227 	MTK_ETHTOOL_STAT(rx_overflow),
228 	MTK_ETHTOOL_STAT(rx_fcs_errors),
229 	MTK_ETHTOOL_STAT(rx_short_errors),
230 	MTK_ETHTOOL_STAT(rx_long_errors),
231 	MTK_ETHTOOL_STAT(rx_checksum_errors),
232 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
233 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
234 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
235 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
236 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
237 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
238 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
239 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
240 };
241 
242 static const char * const mtk_clks_source_name[] = {
243 	"ethif",
244 	"sgmiitop",
245 	"esw",
246 	"gp0",
247 	"gp1",
248 	"gp2",
249 	"gp3",
250 	"xgp1",
251 	"xgp2",
252 	"xgp3",
253 	"crypto",
254 	"fe",
255 	"trgpll",
256 	"sgmii_tx250m",
257 	"sgmii_rx250m",
258 	"sgmii_cdr_ref",
259 	"sgmii_cdr_fb",
260 	"sgmii2_tx250m",
261 	"sgmii2_rx250m",
262 	"sgmii2_cdr_ref",
263 	"sgmii2_cdr_fb",
264 	"sgmii_ck",
265 	"eth2pll",
266 	"wocpu0",
267 	"wocpu1",
268 	"netsys0",
269 	"netsys1",
270 	"ethwarp_wocpu2",
271 	"ethwarp_wocpu1",
272 	"ethwarp_wocpu0",
273 	"top_sgm0_sel",
274 	"top_sgm1_sel",
275 	"top_eth_gmii_sel",
276 	"top_eth_refck_50m_sel",
277 	"top_eth_sys_200m_sel",
278 	"top_eth_sys_sel",
279 	"top_eth_xgmii_sel",
280 	"top_eth_mii_sel",
281 	"top_netsys_sel",
282 	"top_netsys_500m_sel",
283 	"top_netsys_pao_2x_sel",
284 	"top_netsys_sync_250m_sel",
285 	"top_netsys_ppefb_250m_sel",
286 	"top_netsys_warp_sel",
287 };
288 
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)289 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
290 {
291 	__raw_writel(val, eth->base + reg);
292 }
293 
mtk_r32(struct mtk_eth * eth,unsigned reg)294 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
295 {
296 	return __raw_readl(eth->base + reg);
297 }
298 
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)299 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
300 {
301 	u32 val;
302 
303 	val = mtk_r32(eth, reg);
304 	val &= ~mask;
305 	val |= set;
306 	mtk_w32(eth, val, reg);
307 	return reg;
308 }
309 
mtk_mdio_busy_wait(struct mtk_eth * eth)310 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
311 {
312 	unsigned long t_start = jiffies;
313 
314 	while (1) {
315 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
316 			return 0;
317 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
318 			break;
319 		cond_resched();
320 	}
321 
322 	dev_err(eth->dev, "mdio: MDIO timeout\n");
323 	return -ETIMEDOUT;
324 }
325 
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)326 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
327 			       u32 write_data)
328 {
329 	int ret;
330 
331 	ret = mtk_mdio_busy_wait(eth);
332 	if (ret < 0)
333 		return ret;
334 
335 	mtk_w32(eth, PHY_IAC_ACCESS |
336 		PHY_IAC_START_C22 |
337 		PHY_IAC_CMD_WRITE |
338 		PHY_IAC_REG(phy_reg) |
339 		PHY_IAC_ADDR(phy_addr) |
340 		PHY_IAC_DATA(write_data),
341 		MTK_PHY_IAC);
342 
343 	ret = mtk_mdio_busy_wait(eth);
344 	if (ret < 0)
345 		return ret;
346 
347 	return 0;
348 }
349 
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)350 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
351 			       u32 devad, u32 phy_reg, u32 write_data)
352 {
353 	int ret;
354 
355 	ret = mtk_mdio_busy_wait(eth);
356 	if (ret < 0)
357 		return ret;
358 
359 	mtk_w32(eth, PHY_IAC_ACCESS |
360 		PHY_IAC_START_C45 |
361 		PHY_IAC_CMD_C45_ADDR |
362 		PHY_IAC_REG(devad) |
363 		PHY_IAC_ADDR(phy_addr) |
364 		PHY_IAC_DATA(phy_reg),
365 		MTK_PHY_IAC);
366 
367 	ret = mtk_mdio_busy_wait(eth);
368 	if (ret < 0)
369 		return ret;
370 
371 	mtk_w32(eth, PHY_IAC_ACCESS |
372 		PHY_IAC_START_C45 |
373 		PHY_IAC_CMD_WRITE |
374 		PHY_IAC_REG(devad) |
375 		PHY_IAC_ADDR(phy_addr) |
376 		PHY_IAC_DATA(write_data),
377 		MTK_PHY_IAC);
378 
379 	ret = mtk_mdio_busy_wait(eth);
380 	if (ret < 0)
381 		return ret;
382 
383 	return 0;
384 }
385 
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)386 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
387 {
388 	int ret;
389 
390 	ret = mtk_mdio_busy_wait(eth);
391 	if (ret < 0)
392 		return ret;
393 
394 	mtk_w32(eth, PHY_IAC_ACCESS |
395 		PHY_IAC_START_C22 |
396 		PHY_IAC_CMD_C22_READ |
397 		PHY_IAC_REG(phy_reg) |
398 		PHY_IAC_ADDR(phy_addr),
399 		MTK_PHY_IAC);
400 
401 	ret = mtk_mdio_busy_wait(eth);
402 	if (ret < 0)
403 		return ret;
404 
405 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
406 }
407 
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)408 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
409 			      u32 devad, u32 phy_reg)
410 {
411 	int ret;
412 
413 	ret = mtk_mdio_busy_wait(eth);
414 	if (ret < 0)
415 		return ret;
416 
417 	mtk_w32(eth, PHY_IAC_ACCESS |
418 		PHY_IAC_START_C45 |
419 		PHY_IAC_CMD_C45_ADDR |
420 		PHY_IAC_REG(devad) |
421 		PHY_IAC_ADDR(phy_addr) |
422 		PHY_IAC_DATA(phy_reg),
423 		MTK_PHY_IAC);
424 
425 	ret = mtk_mdio_busy_wait(eth);
426 	if (ret < 0)
427 		return ret;
428 
429 	mtk_w32(eth, PHY_IAC_ACCESS |
430 		PHY_IAC_START_C45 |
431 		PHY_IAC_CMD_C45_READ |
432 		PHY_IAC_REG(devad) |
433 		PHY_IAC_ADDR(phy_addr),
434 		MTK_PHY_IAC);
435 
436 	ret = mtk_mdio_busy_wait(eth);
437 	if (ret < 0)
438 		return ret;
439 
440 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
441 }
442 
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)443 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
444 			      int phy_reg, u16 val)
445 {
446 	struct mtk_eth *eth = bus->priv;
447 
448 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
449 }
450 
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)451 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
452 			      int devad, int phy_reg, u16 val)
453 {
454 	struct mtk_eth *eth = bus->priv;
455 
456 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
457 }
458 
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)459 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
460 {
461 	struct mtk_eth *eth = bus->priv;
462 
463 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
464 }
465 
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)466 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
467 			     int phy_reg)
468 {
469 	struct mtk_eth *eth = bus->priv;
470 
471 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
472 }
473 
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)474 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
475 				     phy_interface_t interface)
476 {
477 	u32 val;
478 
479 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
480 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
481 
482 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
483 			   ETHSYS_TRGMII_MT7621_MASK, val);
484 
485 	return 0;
486 }
487 
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)488 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
489 				   phy_interface_t interface)
490 {
491 	int ret;
492 
493 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
494 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
495 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
496 		if (ret)
497 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
498 		return;
499 	}
500 
501 	dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
502 }
503 
mtk_setup_bridge_switch(struct mtk_eth * eth)504 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
505 {
506 	/* Force Port1 XGMAC Link Up */
507 	mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
508 		MTK_XGMAC_STS(MTK_GMAC1_ID));
509 
510 	/* Adjust GSW bridge IPG to 11 */
511 	mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
512 		(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
513 		(GSW_IPG_11 << GSWRX_IPG_SHIFT),
514 		MTK_GSW_CFG);
515 }
516 
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)517 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
518 					      phy_interface_t interface)
519 {
520 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
521 					   phylink_config);
522 	struct mtk_eth *eth = mac->hw;
523 	unsigned int sid;
524 
525 	if (interface == PHY_INTERFACE_MODE_SGMII ||
526 	    phy_interface_mode_is_8023z(interface)) {
527 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
528 		       0 : mac->id;
529 
530 		return eth->sgmii_pcs[sid];
531 	}
532 
533 	return NULL;
534 }
535 
mtk_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t iface)536 static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
537 			   phy_interface_t iface)
538 {
539 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
540 					   phylink_config);
541 	struct mtk_eth *eth = mac->hw;
542 
543 	if (mtk_interface_mode_is_xgmii(eth, iface) &&
544 	    mac->id != MTK_GMAC1_ID) {
545 		mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
546 			XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
547 
548 		mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
549 				 MTK_XGMAC_FORCE_LINK(mac->id),
550 			MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
551 	}
552 
553 	return 0;
554 }
555 
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)556 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
557 			   const struct phylink_link_state *state)
558 {
559 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
560 					   phylink_config);
561 	struct mtk_eth *eth = mac->hw;
562 	int val, ge_mode, err = 0;
563 	u32 i;
564 
565 	if (mac->interface != state->interface) {
566 		/* Setup soc pin functions */
567 		switch (state->interface) {
568 		case PHY_INTERFACE_MODE_TRGMII:
569 		case PHY_INTERFACE_MODE_RGMII_TXID:
570 		case PHY_INTERFACE_MODE_RGMII_RXID:
571 		case PHY_INTERFACE_MODE_RGMII_ID:
572 		case PHY_INTERFACE_MODE_RGMII:
573 		case PHY_INTERFACE_MODE_MII:
574 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
575 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
576 				if (err)
577 					goto init_err;
578 			}
579 			break;
580 		case PHY_INTERFACE_MODE_1000BASEX:
581 		case PHY_INTERFACE_MODE_2500BASEX:
582 		case PHY_INTERFACE_MODE_SGMII:
583 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
584 			if (err)
585 				goto init_err;
586 			break;
587 		case PHY_INTERFACE_MODE_GMII:
588 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
589 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
590 				if (err)
591 					goto init_err;
592 			}
593 			break;
594 		case PHY_INTERFACE_MODE_INTERNAL:
595 			if (mac->id == MTK_GMAC2_ID &&
596 			    MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
597 				err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
598 				if (err)
599 					goto init_err;
600 			}
601 			break;
602 		default:
603 			goto err_phy;
604 		}
605 
606 		/* Setup clock for 1st gmac */
607 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
608 		    !phy_interface_mode_is_8023z(state->interface) &&
609 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
610 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
611 					 MTK_TRGMII_MT7621_CLK)) {
612 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
613 							      state->interface))
614 					goto err_phy;
615 			} else {
616 				mtk_gmac0_rgmii_adjust(mac->hw,
617 						       state->interface);
618 
619 				/* mt7623_pad_clk_setup */
620 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
621 					mtk_w32(mac->hw,
622 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
623 						TRGMII_TD_ODT(i));
624 
625 				/* Assert/release MT7623 RXC reset */
626 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
627 					TRGMII_RCK_CTRL);
628 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
629 			}
630 		}
631 
632 		switch (state->interface) {
633 		case PHY_INTERFACE_MODE_MII:
634 		case PHY_INTERFACE_MODE_GMII:
635 			ge_mode = 1;
636 			break;
637 		default:
638 			ge_mode = 0;
639 			break;
640 		}
641 
642 		/* put the gmac into the right mode */
643 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
644 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
645 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
646 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
647 
648 		mac->interface = state->interface;
649 	}
650 
651 	/* SGMII */
652 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
653 	    phy_interface_mode_is_8023z(state->interface)) {
654 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
655 		 * being setup done.
656 		 */
657 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
658 
659 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
660 				   SYSCFG0_SGMII_MASK,
661 				   ~(u32)SYSCFG0_SGMII_MASK);
662 
663 		/* Save the syscfg0 value for mac_finish */
664 		mac->syscfg0 = val;
665 	} else if (phylink_autoneg_inband(mode)) {
666 		dev_err(eth->dev,
667 			"In-band mode not supported in non SGMII mode!\n");
668 		return;
669 	}
670 
671 	/* Setup gmac */
672 	if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
673 		mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
674 		mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
675 
676 		if (mac->id == MTK_GMAC1_ID)
677 			mtk_setup_bridge_switch(eth);
678 	}
679 
680 	return;
681 
682 err_phy:
683 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
684 		mac->id, phy_modes(state->interface));
685 	return;
686 
687 init_err:
688 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
689 		mac->id, phy_modes(state->interface), err);
690 }
691 
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)692 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
693 			  phy_interface_t interface)
694 {
695 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
696 					   phylink_config);
697 	struct mtk_eth *eth = mac->hw;
698 	u32 mcr_cur, mcr_new;
699 
700 	/* Enable SGMII */
701 	if (interface == PHY_INTERFACE_MODE_SGMII ||
702 	    phy_interface_mode_is_8023z(interface))
703 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
704 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
705 
706 	/* Setup gmac */
707 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
708 	mcr_new = mcr_cur;
709 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
710 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
711 
712 	/* Only update control register when needed! */
713 	if (mcr_new != mcr_cur)
714 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
715 
716 	return 0;
717 }
718 
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)719 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
720 			      phy_interface_t interface)
721 {
722 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
723 					   phylink_config);
724 
725 	if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
726 		/* GMAC modes */
727 		mtk_m32(mac->hw,
728 			MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
729 			MTK_MAC_MCR(mac->id));
730 	} else if (mac->id != MTK_GMAC1_ID) {
731 		/* XGMAC except for built-in switch */
732 		mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
733 			MTK_XMAC_MCR(mac->id));
734 		mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
735 			MTK_XGMAC_STS(mac->id));
736 	}
737 }
738 
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)739 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
740 				int speed)
741 {
742 	const struct mtk_soc_data *soc = eth->soc;
743 	u32 ofs, val;
744 
745 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
746 		return;
747 
748 	val = MTK_QTX_SCH_MIN_RATE_EN |
749 	      /* minimum: 10 Mbps */
750 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
751 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
752 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
753 	if (mtk_is_netsys_v1(eth))
754 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
755 
756 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
757 		switch (speed) {
758 		case SPEED_10:
759 			val |= MTK_QTX_SCH_MAX_RATE_EN |
760 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
761 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
762 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
763 			break;
764 		case SPEED_100:
765 			val |= MTK_QTX_SCH_MAX_RATE_EN |
766 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
767 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
768 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
769 			break;
770 		case SPEED_1000:
771 			val |= MTK_QTX_SCH_MAX_RATE_EN |
772 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
773 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
774 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
775 			break;
776 		default:
777 			break;
778 		}
779 	} else {
780 		switch (speed) {
781 		case SPEED_10:
782 			val |= MTK_QTX_SCH_MAX_RATE_EN |
783 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
784 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
785 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
786 			break;
787 		case SPEED_100:
788 			val |= MTK_QTX_SCH_MAX_RATE_EN |
789 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
790 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
791 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
792 			break;
793 		case SPEED_1000:
794 			val |= MTK_QTX_SCH_MAX_RATE_EN |
795 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
796 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
797 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
798 			break;
799 		default:
800 			break;
801 		}
802 	}
803 
804 	ofs = MTK_QTX_OFFSET * idx;
805 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
806 }
807 
mtk_gdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)808 static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
809 				struct phy_device *phy,
810 				unsigned int mode, phy_interface_t interface,
811 				int speed, int duplex, bool tx_pause,
812 				bool rx_pause)
813 {
814 	u32 mcr;
815 
816 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
817 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
818 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
819 		 MAC_MCR_FORCE_RX_FC);
820 
821 	/* Configure speed */
822 	mac->speed = speed;
823 	switch (speed) {
824 	case SPEED_2500:
825 	case SPEED_1000:
826 		mcr |= MAC_MCR_SPEED_1000;
827 		break;
828 	case SPEED_100:
829 		mcr |= MAC_MCR_SPEED_100;
830 		break;
831 	}
832 
833 	/* Configure duplex */
834 	if (duplex == DUPLEX_FULL)
835 		mcr |= MAC_MCR_FORCE_DPX;
836 
837 	/* Configure pause modes - phylink will avoid these for half duplex */
838 	if (tx_pause)
839 		mcr |= MAC_MCR_FORCE_TX_FC;
840 	if (rx_pause)
841 		mcr |= MAC_MCR_FORCE_RX_FC;
842 
843 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
844 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
845 }
846 
mtk_xgdm_mac_link_up(struct mtk_mac * mac,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)847 static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
848 				 struct phy_device *phy,
849 				 unsigned int mode, phy_interface_t interface,
850 				 int speed, int duplex, bool tx_pause,
851 				 bool rx_pause)
852 {
853 	u32 mcr;
854 
855 	if (mac->id == MTK_GMAC1_ID)
856 		return;
857 
858 	/* Eliminate the interference(before link-up) caused by PHY noise */
859 	mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
860 	mdelay(20);
861 	mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
862 		MTK_XMAC_CNT_CTRL(mac->id));
863 
864 	mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
865 		MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
866 
867 	mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
868 	mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
869 		 XMAC_MCR_TRX_DISABLE);
870 	/* Configure pause modes -
871 	 * phylink will avoid these for half duplex
872 	 */
873 	if (tx_pause)
874 		mcr |= XMAC_MCR_FORCE_TX_FC;
875 	if (rx_pause)
876 		mcr |= XMAC_MCR_FORCE_RX_FC;
877 
878 	mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
879 }
880 
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)881 static void mtk_mac_link_up(struct phylink_config *config,
882 			    struct phy_device *phy,
883 			    unsigned int mode, phy_interface_t interface,
884 			    int speed, int duplex, bool tx_pause, bool rx_pause)
885 {
886 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
887 					   phylink_config);
888 
889 	if (mtk_interface_mode_is_xgmii(mac->hw, interface))
890 		mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
891 				     tx_pause, rx_pause);
892 	else
893 		mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
894 				    tx_pause, rx_pause);
895 }
896 
mtk_mac_disable_tx_lpi(struct phylink_config * config)897 static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
898 {
899 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
900 					   phylink_config);
901 	struct mtk_eth *eth = mac->hw;
902 
903 	mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
904 }
905 
mtk_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)906 static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
907 				 bool tx_clk_stop)
908 {
909 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
910 					   phylink_config);
911 	struct mtk_eth *eth = mac->hw;
912 	u32 val;
913 
914 	if (mtk_interface_mode_is_xgmii(eth, mac->interface))
915 		return -EOPNOTSUPP;
916 
917 	/* Tx idle timer in ms */
918 	timer = DIV_ROUND_UP(timer, 1000);
919 
920 	/* If the timer is zero, then set LPI_MODE, which allows the
921 	 * system to enter LPI mode immediately rather than waiting for
922 	 * the LPI threshold.
923 	 */
924 	if (!timer)
925 		val = MAC_EEE_LPI_MODE;
926 	else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
927 		val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
928 	else
929 		val = MAC_EEE_LPI_TXIDLE_THD;
930 
931 	if (tx_clk_stop)
932 		val |= MAC_EEE_CKG_TXIDLE;
933 
934 	/* PHY Wake-up time, this field does not have a reset value, so use the
935 	 * reset value from MT7531 (36us for 100M and 17us for 1000M).
936 	 */
937 	val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
938 	       FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
939 
940 	mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
941 	mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
942 
943 	return 0;
944 }
945 
946 static const struct phylink_mac_ops mtk_phylink_ops = {
947 	.mac_prepare = mtk_mac_prepare,
948 	.mac_select_pcs = mtk_mac_select_pcs,
949 	.mac_config = mtk_mac_config,
950 	.mac_finish = mtk_mac_finish,
951 	.mac_link_down = mtk_mac_link_down,
952 	.mac_link_up = mtk_mac_link_up,
953 	.mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
954 	.mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
955 };
956 
rt5350_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)957 static void rt5350_mac_config(struct phylink_config *config, unsigned int mode,
958 				const struct phylink_link_state *state)
959 {
960 }
961 
rt5350_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)962 static void rt5350_mac_link_down(struct phylink_config *config, unsigned int mode,
963 				phy_interface_t interface)
964 {
965 }
966 
rt5350_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)967 static void rt5350_mac_link_up(struct phylink_config *config,
968 			    struct phy_device *phy,
969 			    unsigned int mode, phy_interface_t interface,
970 			    int speed, int duplex, bool tx_pause, bool rx_pause)
971 {
972 }
973 
974 /* MT76x8 (rt5350-eth) does not expose any MAC control registers */
975 static const struct phylink_mac_ops rt5350_phylink_ops = {
976 	.mac_config = rt5350_mac_config,
977 	.mac_link_down = rt5350_mac_link_down,
978 	.mac_link_up = rt5350_mac_link_up,
979 };
980 
mtk_mdio_config(struct mtk_eth * eth)981 static void mtk_mdio_config(struct mtk_eth *eth)
982 {
983 	u32 val;
984 
985 	/* Configure MDC Divider */
986 	val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
987 
988 	/* Configure MDC Turbo Mode */
989 	if (mtk_is_netsys_v3_or_greater(eth))
990 		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
991 	else
992 		val |= PPSC_MDC_TURBO;
993 
994 	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
995 }
996 
mtk_mdio_init(struct mtk_eth * eth)997 static int mtk_mdio_init(struct mtk_eth *eth)
998 {
999 	unsigned int max_clk = 2500000;
1000 	struct device_node *mii_np;
1001 	int ret;
1002 	u32 val;
1003 
1004 	mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
1005 	if (!mii_np) {
1006 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
1007 		return -ENODEV;
1008 	}
1009 
1010 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
1011 	if (!eth->mii_bus) {
1012 		ret = -ENOMEM;
1013 		goto err_put_node;
1014 	}
1015 
1016 	eth->mii_bus->name = "mdio";
1017 	eth->mii_bus->read = mtk_mdio_read_c22;
1018 	eth->mii_bus->write = mtk_mdio_write_c22;
1019 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
1020 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
1021 	eth->mii_bus->priv = eth;
1022 	eth->mii_bus->parent = eth->dev;
1023 
1024 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
1025 
1026 	if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
1027 		if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1028 			dev_err(eth->dev, "MDIO clock frequency out of range");
1029 			ret = -EINVAL;
1030 			goto err_put_node;
1031 		}
1032 		max_clk = val;
1033 	}
1034 	eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
1035 	mtk_mdio_config(eth);
1036 	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
1037 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
1038 
1039 err_put_node:
1040 	of_node_put(mii_np);
1041 	return ret;
1042 }
1043 
mtk_mdio_cleanup(struct mtk_eth * eth)1044 static void mtk_mdio_cleanup(struct mtk_eth *eth)
1045 {
1046 	if (!eth->mii_bus)
1047 		return;
1048 
1049 	mdiobus_unregister(eth->mii_bus);
1050 }
1051 
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)1052 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
1053 {
1054 	unsigned long flags;
1055 	u32 val;
1056 
1057 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
1058 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1059 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
1060 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1061 }
1062 
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)1063 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
1064 {
1065 	unsigned long flags;
1066 	u32 val;
1067 
1068 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
1069 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
1070 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
1071 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
1072 }
1073 
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)1074 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
1075 {
1076 	unsigned long flags;
1077 	u32 val;
1078 
1079 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
1080 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1081 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
1082 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1083 }
1084 
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)1085 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
1086 {
1087 	unsigned long flags;
1088 	u32 val;
1089 
1090 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
1091 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
1092 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
1093 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
1094 }
1095 
mtk_set_mac_address(struct net_device * dev,void * p)1096 static int mtk_set_mac_address(struct net_device *dev, void *p)
1097 {
1098 	int ret = eth_mac_addr(dev, p);
1099 	struct mtk_mac *mac = netdev_priv(dev);
1100 	struct mtk_eth *eth = mac->hw;
1101 	const char *macaddr = dev->dev_addr;
1102 
1103 	if (ret)
1104 		return ret;
1105 
1106 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
1107 		return -EBUSY;
1108 
1109 	spin_lock_bh(&mac->hw->page_lock);
1110 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1111 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1112 			MT7628_SDM_MAC_ADRH);
1113 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1114 			(macaddr[4] << 8) | macaddr[5],
1115 			MT7628_SDM_MAC_ADRL);
1116 	} else {
1117 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
1118 			MTK_GDMA_MAC_ADRH(mac->id));
1119 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
1120 			(macaddr[4] << 8) | macaddr[5],
1121 			MTK_GDMA_MAC_ADRL(mac->id));
1122 	}
1123 	spin_unlock_bh(&mac->hw->page_lock);
1124 
1125 	return 0;
1126 }
1127 
mtk_stats_update_mac(struct mtk_mac * mac)1128 void mtk_stats_update_mac(struct mtk_mac *mac)
1129 {
1130 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1131 	struct mtk_eth *eth = mac->hw;
1132 
1133 	u64_stats_update_begin(&hw_stats->syncp);
1134 
1135 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1136 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
1137 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
1138 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
1139 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
1140 		hw_stats->rx_checksum_errors +=
1141 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
1142 	} else {
1143 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1144 		unsigned int offs = hw_stats->reg_offset;
1145 		u64 stats;
1146 
1147 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
1148 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
1149 		if (stats)
1150 			hw_stats->rx_bytes += (stats << 32);
1151 		hw_stats->rx_packets +=
1152 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
1153 		hw_stats->rx_overflow +=
1154 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
1155 		hw_stats->rx_fcs_errors +=
1156 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
1157 		hw_stats->rx_short_errors +=
1158 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
1159 		hw_stats->rx_long_errors +=
1160 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
1161 		hw_stats->rx_checksum_errors +=
1162 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
1163 		hw_stats->rx_flow_control_packets +=
1164 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1165 
1166 		if (mtk_is_netsys_v3_or_greater(eth)) {
1167 			hw_stats->tx_skip +=
1168 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1169 			hw_stats->tx_collisions +=
1170 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1171 			hw_stats->tx_bytes +=
1172 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1173 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1174 			if (stats)
1175 				hw_stats->tx_bytes += (stats << 32);
1176 			hw_stats->tx_packets +=
1177 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1178 		} else {
1179 			hw_stats->tx_skip +=
1180 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1181 			hw_stats->tx_collisions +=
1182 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1183 			hw_stats->tx_bytes +=
1184 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1185 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1186 			if (stats)
1187 				hw_stats->tx_bytes += (stats << 32);
1188 			hw_stats->tx_packets +=
1189 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1190 		}
1191 	}
1192 
1193 	u64_stats_update_end(&hw_stats->syncp);
1194 }
1195 
mtk_stats_update(struct mtk_eth * eth)1196 static void mtk_stats_update(struct mtk_eth *eth)
1197 {
1198 	int i;
1199 
1200 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1201 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1202 			continue;
1203 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1204 			mtk_stats_update_mac(eth->mac[i]);
1205 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1206 		}
1207 	}
1208 }
1209 
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1210 static void mtk_get_stats64(struct net_device *dev,
1211 			    struct rtnl_link_stats64 *storage)
1212 {
1213 	struct mtk_mac *mac = netdev_priv(dev);
1214 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1215 	unsigned int start;
1216 
1217 	if (netif_running(dev) && netif_device_present(dev)) {
1218 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
1219 			mtk_stats_update_mac(mac);
1220 			spin_unlock_bh(&hw_stats->stats_lock);
1221 		}
1222 	}
1223 
1224 	do {
1225 		start = u64_stats_fetch_begin(&hw_stats->syncp);
1226 		storage->rx_packets = hw_stats->rx_packets;
1227 		storage->tx_packets = hw_stats->tx_packets;
1228 		storage->rx_bytes = hw_stats->rx_bytes;
1229 		storage->tx_bytes = hw_stats->tx_bytes;
1230 		storage->collisions = hw_stats->tx_collisions;
1231 		storage->rx_length_errors = hw_stats->rx_short_errors +
1232 			hw_stats->rx_long_errors;
1233 		storage->rx_over_errors = hw_stats->rx_overflow;
1234 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1235 		storage->rx_errors = hw_stats->rx_checksum_errors;
1236 		storage->tx_aborted_errors = hw_stats->tx_skip;
1237 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1238 
1239 	storage->tx_errors = dev->stats.tx_errors;
1240 	storage->rx_dropped = dev->stats.rx_dropped;
1241 	storage->tx_dropped = dev->stats.tx_dropped;
1242 }
1243 
mtk_max_frag_size(int mtu)1244 static inline int mtk_max_frag_size(int mtu)
1245 {
1246 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1247 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1248 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1249 
1250 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1251 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1252 }
1253 
mtk_max_buf_size(int frag_size)1254 static inline int mtk_max_buf_size(int frag_size)
1255 {
1256 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1257 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1258 
1259 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1260 
1261 	return buf_size;
1262 }
1263 
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1264 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1265 			    struct mtk_rx_dma_v2 *dma_rxd)
1266 {
1267 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1268 	if (!(rxd->rxd2 & RX_DMA_DONE))
1269 		return false;
1270 
1271 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1272 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1273 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1274 	if (mtk_is_netsys_v3_or_greater(eth)) {
1275 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1276 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1277 	}
1278 
1279 	return true;
1280 }
1281 
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1282 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1283 {
1284 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1285 	unsigned long data;
1286 
1287 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1288 				get_order(size));
1289 
1290 	return (void *)data;
1291 }
1292 
mtk_dma_ring_alloc(struct mtk_eth * eth,size_t size,dma_addr_t * dma_handle,bool use_sram)1293 static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
1294 				dma_addr_t *dma_handle, bool use_sram)
1295 {
1296 	void *dma_ring;
1297 
1298 	if (use_sram && eth->sram_pool) {
1299 		dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
1300 		if (!dma_ring)
1301 			return dma_ring;
1302 		*dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
1303 						    (unsigned long)dma_ring);
1304 	} else {
1305 		dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
1306 					      GFP_KERNEL);
1307 	}
1308 
1309 	return dma_ring;
1310 }
1311 
mtk_dma_ring_free(struct mtk_eth * eth,size_t size,void * dma_ring,dma_addr_t dma_handle,bool in_sram)1312 static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
1313 			      dma_addr_t dma_handle, bool in_sram)
1314 {
1315 	if (in_sram && eth->sram_pool)
1316 		gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
1317 	else
1318 		dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
1319 }
1320 
1321 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1322 static int mtk_init_fq_dma(struct mtk_eth *eth)
1323 {
1324 	const struct mtk_soc_data *soc = eth->soc;
1325 	dma_addr_t phy_ring_tail;
1326 	int cnt = soc->tx.fq_dma_size;
1327 	dma_addr_t dma_addr;
1328 	int i, j, len;
1329 
1330 	eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
1331 					       &eth->phy_scratch_ring, true);
1332 
1333 	if (unlikely(!eth->scratch_ring))
1334 		return -ENOMEM;
1335 
1336 	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1337 
1338 	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1339 		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1340 		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1341 
1342 		if (unlikely(!eth->scratch_head[j]))
1343 			return -ENOMEM;
1344 
1345 		dma_addr = dma_map_single(eth->dma_dev,
1346 					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1347 					  DMA_FROM_DEVICE);
1348 
1349 		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1350 			return -ENOMEM;
1351 
1352 		for (i = 0; i < len; i++) {
1353 			struct mtk_tx_dma_v2 *txd;
1354 
1355 			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1356 			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1357 			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1358 				txd->txd2 = eth->phy_scratch_ring +
1359 					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1360 
1361 			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1362 			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1363 				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1364 
1365 			txd->txd4 = 0;
1366 			if (mtk_is_netsys_v2_or_greater(eth)) {
1367 				txd->txd5 = 0;
1368 				txd->txd6 = 0;
1369 				txd->txd7 = 0;
1370 				txd->txd8 = 0;
1371 			}
1372 		}
1373 	}
1374 
1375 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1376 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1377 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1378 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1379 
1380 	return 0;
1381 }
1382 
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1383 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1384 {
1385 	return ring->dma + (desc - ring->phys);
1386 }
1387 
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1388 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1389 					     void *txd, u32 txd_size)
1390 {
1391 	int idx = (txd - ring->dma) / txd_size;
1392 
1393 	return &ring->buf[idx];
1394 }
1395 
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1396 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1397 				       struct mtk_tx_dma *dma)
1398 {
1399 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1400 }
1401 
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1402 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1403 {
1404 	return (dma - ring->dma) / txd_size;
1405 }
1406 
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1407 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1408 			 struct xdp_frame_bulk *bq, bool napi)
1409 {
1410 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1411 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1412 			dma_unmap_single(eth->dma_dev,
1413 					 dma_unmap_addr(tx_buf, dma_addr0),
1414 					 dma_unmap_len(tx_buf, dma_len0),
1415 					 DMA_TO_DEVICE);
1416 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1417 			dma_unmap_page(eth->dma_dev,
1418 				       dma_unmap_addr(tx_buf, dma_addr0),
1419 				       dma_unmap_len(tx_buf, dma_len0),
1420 				       DMA_TO_DEVICE);
1421 		}
1422 	} else {
1423 		if (dma_unmap_len(tx_buf, dma_len0)) {
1424 			dma_unmap_page(eth->dma_dev,
1425 				       dma_unmap_addr(tx_buf, dma_addr0),
1426 				       dma_unmap_len(tx_buf, dma_len0),
1427 				       DMA_TO_DEVICE);
1428 		}
1429 
1430 		if (dma_unmap_len(tx_buf, dma_len1)) {
1431 			dma_unmap_page(eth->dma_dev,
1432 				       dma_unmap_addr(tx_buf, dma_addr1),
1433 				       dma_unmap_len(tx_buf, dma_len1),
1434 				       DMA_TO_DEVICE);
1435 		}
1436 	}
1437 
1438 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1439 		if (tx_buf->type == MTK_TYPE_SKB) {
1440 			struct sk_buff *skb = tx_buf->data;
1441 
1442 			if (napi)
1443 				napi_consume_skb(skb, napi);
1444 			else
1445 				dev_kfree_skb_any(skb);
1446 		} else {
1447 			struct xdp_frame *xdpf = tx_buf->data;
1448 
1449 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1450 				xdp_return_frame_rx_napi(xdpf);
1451 			else if (bq)
1452 				xdp_return_frame_bulk(xdpf, bq);
1453 			else
1454 				xdp_return_frame(xdpf);
1455 		}
1456 	}
1457 	tx_buf->flags = 0;
1458 	tx_buf->data = NULL;
1459 }
1460 
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1461 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1462 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1463 			 size_t size, int idx)
1464 {
1465 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1466 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1467 		dma_unmap_len_set(tx_buf, dma_len0, size);
1468 	} else {
1469 		if (idx & 1) {
1470 			txd->txd3 = mapped_addr;
1471 			txd->txd2 |= TX_DMA_PLEN1(size);
1472 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1473 			dma_unmap_len_set(tx_buf, dma_len1, size);
1474 		} else {
1475 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1476 			txd->txd1 = mapped_addr;
1477 			txd->txd2 = TX_DMA_PLEN0(size);
1478 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1479 			dma_unmap_len_set(tx_buf, dma_len0, size);
1480 		}
1481 	}
1482 }
1483 
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1484 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1485 				   struct mtk_tx_dma_desc_info *info)
1486 {
1487 	struct mtk_mac *mac = netdev_priv(dev);
1488 	struct mtk_eth *eth = mac->hw;
1489 	struct mtk_tx_dma *desc = txd;
1490 	u32 data;
1491 
1492 	WRITE_ONCE(desc->txd1, info->addr);
1493 
1494 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1495 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1496 	if (info->last)
1497 		data |= TX_DMA_LS0;
1498 	WRITE_ONCE(desc->txd3, data);
1499 
1500 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1501 	if (info->first) {
1502 		if (info->gso)
1503 			data |= TX_DMA_TSO;
1504 		/* tx checksum offload */
1505 		if (info->csum)
1506 			data |= TX_DMA_CHKSUM;
1507 		/* vlan header offload */
1508 		if (info->vlan)
1509 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1510 	}
1511 	WRITE_ONCE(desc->txd4, data);
1512 }
1513 
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1514 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1515 				   struct mtk_tx_dma_desc_info *info)
1516 {
1517 	struct mtk_mac *mac = netdev_priv(dev);
1518 	struct mtk_tx_dma_v2 *desc = txd;
1519 	struct mtk_eth *eth = mac->hw;
1520 	u32 data;
1521 
1522 	WRITE_ONCE(desc->txd1, info->addr);
1523 
1524 	data = TX_DMA_PLEN0(info->size);
1525 	if (info->last)
1526 		data |= TX_DMA_LS0;
1527 
1528 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1529 		data |= TX_DMA_PREP_ADDR64(info->addr);
1530 
1531 	WRITE_ONCE(desc->txd3, data);
1532 
1533 	 /* set forward port */
1534 	switch (mac->id) {
1535 	case MTK_GMAC1_ID:
1536 		data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1537 		break;
1538 	case MTK_GMAC2_ID:
1539 		data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1540 		break;
1541 	case MTK_GMAC3_ID:
1542 		data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1543 		break;
1544 	}
1545 
1546 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1547 	WRITE_ONCE(desc->txd4, data);
1548 
1549 	data = 0;
1550 	if (info->first) {
1551 		if (info->gso)
1552 			data |= TX_DMA_TSO_V2;
1553 		/* tx checksum offload */
1554 		if (info->csum)
1555 			data |= TX_DMA_CHKSUM_V2;
1556 		if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1557 			data |= TX_DMA_SPTAG_V3;
1558 	}
1559 	WRITE_ONCE(desc->txd5, data);
1560 
1561 	data = 0;
1562 	if (info->first && info->vlan)
1563 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1564 	WRITE_ONCE(desc->txd6, data);
1565 
1566 	WRITE_ONCE(desc->txd7, 0);
1567 	WRITE_ONCE(desc->txd8, 0);
1568 }
1569 
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1570 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1571 				struct mtk_tx_dma_desc_info *info)
1572 {
1573 	struct mtk_mac *mac = netdev_priv(dev);
1574 	struct mtk_eth *eth = mac->hw;
1575 
1576 	if (mtk_is_netsys_v2_or_greater(eth))
1577 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1578 	else
1579 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1580 }
1581 
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1582 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1583 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1584 {
1585 	struct mtk_tx_dma_desc_info txd_info = {
1586 		.size = skb_headlen(skb),
1587 		.gso = gso,
1588 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1589 		.vlan = skb_vlan_tag_present(skb),
1590 		.qid = skb_get_queue_mapping(skb),
1591 		.vlan_tci = skb_vlan_tag_get(skb),
1592 		.first = true,
1593 		.last = !skb_is_nonlinear(skb),
1594 	};
1595 	struct netdev_queue *txq;
1596 	struct mtk_mac *mac = netdev_priv(dev);
1597 	struct mtk_eth *eth = mac->hw;
1598 	const struct mtk_soc_data *soc = eth->soc;
1599 	struct mtk_tx_dma *itxd, *txd;
1600 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1601 	struct mtk_tx_buf *itx_buf, *tx_buf;
1602 	int i, n_desc = 1;
1603 	int queue = skb_get_queue_mapping(skb);
1604 	int k = 0;
1605 
1606 	txq = netdev_get_tx_queue(dev, queue);
1607 	itxd = ring->next_free;
1608 	itxd_pdma = qdma_to_pdma(ring, itxd);
1609 	if (itxd == ring->last_free)
1610 		return -ENOMEM;
1611 
1612 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1613 	memset(itx_buf, 0, sizeof(*itx_buf));
1614 
1615 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1616 				       DMA_TO_DEVICE);
1617 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1618 		return -ENOMEM;
1619 
1620 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1621 
1622 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1623 	itx_buf->mac_id = mac->id;
1624 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1625 		     k++);
1626 
1627 	/* TX SG offload */
1628 	txd = itxd;
1629 	txd_pdma = qdma_to_pdma(ring, txd);
1630 
1631 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1632 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1633 		unsigned int offset = 0;
1634 		int frag_size = skb_frag_size(frag);
1635 
1636 		while (frag_size) {
1637 			bool new_desc = true;
1638 
1639 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1640 			    (i & 0x1)) {
1641 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1642 				txd_pdma = qdma_to_pdma(ring, txd);
1643 				if (txd == ring->last_free)
1644 					goto err_dma;
1645 
1646 				n_desc++;
1647 			} else {
1648 				new_desc = false;
1649 			}
1650 
1651 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1652 			txd_info.size = min_t(unsigned int, frag_size,
1653 					      soc->tx.dma_max_len);
1654 			txd_info.qid = queue;
1655 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1656 					!(frag_size - txd_info.size);
1657 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1658 							 offset, txd_info.size,
1659 							 DMA_TO_DEVICE);
1660 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1661 				goto err_dma;
1662 
1663 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1664 
1665 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1666 						    soc->tx.desc_size);
1667 			if (new_desc)
1668 				memset(tx_buf, 0, sizeof(*tx_buf));
1669 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1670 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1671 			tx_buf->mac_id = mac->id;
1672 
1673 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1674 				     txd_info.size, k++);
1675 
1676 			frag_size -= txd_info.size;
1677 			offset += txd_info.size;
1678 		}
1679 	}
1680 
1681 	/* store skb to cleanup */
1682 	itx_buf->type = MTK_TYPE_SKB;
1683 	itx_buf->data = skb;
1684 
1685 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1686 		if (k & 0x1)
1687 			txd_pdma->txd2 |= TX_DMA_LS0;
1688 		else
1689 			txd_pdma->txd2 |= TX_DMA_LS1;
1690 	}
1691 
1692 	netdev_tx_sent_queue(txq, skb->len);
1693 	skb_tx_timestamp(skb);
1694 
1695 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1696 	atomic_sub(n_desc, &ring->free_count);
1697 
1698 	/* make sure that all changes to the dma ring are flushed before we
1699 	 * continue
1700 	 */
1701 	wmb();
1702 
1703 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1704 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1705 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1706 	} else {
1707 		int next_idx;
1708 
1709 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1710 					 ring->dma_size);
1711 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1712 	}
1713 
1714 	return 0;
1715 
1716 err_dma:
1717 	do {
1718 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1719 
1720 		/* unmap dma */
1721 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1722 
1723 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1724 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1725 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1726 
1727 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1728 		itxd_pdma = qdma_to_pdma(ring, itxd);
1729 	} while (itxd != txd);
1730 
1731 	return -ENOMEM;
1732 }
1733 
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1734 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1735 {
1736 	int i, nfrags = 1;
1737 	skb_frag_t *frag;
1738 
1739 	if (skb_is_gso(skb)) {
1740 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1741 			frag = &skb_shinfo(skb)->frags[i];
1742 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1743 					       eth->soc->tx.dma_max_len);
1744 		}
1745 	} else {
1746 		nfrags += skb_shinfo(skb)->nr_frags;
1747 	}
1748 
1749 	return nfrags;
1750 }
1751 
mtk_queue_stopped(struct mtk_eth * eth)1752 static int mtk_queue_stopped(struct mtk_eth *eth)
1753 {
1754 	int i;
1755 
1756 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1757 		if (!eth->netdev[i])
1758 			continue;
1759 		if (netif_queue_stopped(eth->netdev[i]))
1760 			return 1;
1761 	}
1762 
1763 	return 0;
1764 }
1765 
mtk_wake_queue(struct mtk_eth * eth)1766 static void mtk_wake_queue(struct mtk_eth *eth)
1767 {
1768 	int i;
1769 
1770 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1771 		if (!eth->netdev[i])
1772 			continue;
1773 		netif_tx_wake_all_queues(eth->netdev[i]);
1774 	}
1775 }
1776 
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1777 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1778 {
1779 	struct mtk_mac *mac = netdev_priv(dev);
1780 	struct mtk_eth *eth = mac->hw;
1781 	struct mtk_tx_ring *ring = &eth->tx_ring;
1782 	struct net_device_stats *stats = &dev->stats;
1783 	bool gso = false;
1784 	int tx_num;
1785 
1786 	if (skb_vlan_tag_present(skb) &&
1787 	    !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
1788 		skb = __vlan_hwaccel_push_inside(skb);
1789 		if (!skb)
1790 			goto dropped;
1791 	}
1792 
1793 	/* normally we can rely on the stack not calling this more than once,
1794 	 * however we have 2 queues running on the same ring so we need to lock
1795 	 * the ring access
1796 	 */
1797 	spin_lock(&eth->page_lock);
1798 
1799 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1800 		goto drop;
1801 
1802 	tx_num = mtk_cal_txd_req(eth, skb);
1803 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1804 		netif_tx_stop_all_queues(dev);
1805 		netif_err(eth, tx_queued, dev,
1806 			  "Tx Ring full when queue awake!\n");
1807 		spin_unlock(&eth->page_lock);
1808 		return NETDEV_TX_BUSY;
1809 	}
1810 
1811 	/* TSO: fill MSS info in tcp checksum field */
1812 	if (skb_is_gso(skb)) {
1813 		if (skb_cow_head(skb, 0)) {
1814 			netif_warn(eth, tx_err, dev,
1815 				   "GSO expand head fail.\n");
1816 			goto drop;
1817 		}
1818 
1819 		if (skb_shinfo(skb)->gso_type &
1820 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1821 			gso = true;
1822 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1823 		}
1824 	}
1825 
1826 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1827 		goto drop;
1828 
1829 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1830 		netif_tx_stop_all_queues(dev);
1831 
1832 	spin_unlock(&eth->page_lock);
1833 
1834 	return NETDEV_TX_OK;
1835 
1836 drop:
1837 	spin_unlock(&eth->page_lock);
1838 	dev_kfree_skb_any(skb);
1839 dropped:
1840 	stats->tx_dropped++;
1841 	return NETDEV_TX_OK;
1842 }
1843 
mtk_get_rx_ring(struct mtk_eth * eth)1844 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1845 {
1846 	int i;
1847 	struct mtk_rx_ring *ring;
1848 	int idx;
1849 
1850 	if (!eth->hwlro)
1851 		return &eth->rx_ring[0];
1852 
1853 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1854 		struct mtk_rx_dma *rxd;
1855 
1856 		ring = &eth->rx_ring[i];
1857 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1858 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
1859 		if (rxd->rxd2 & RX_DMA_DONE) {
1860 			ring->calc_idx_update = true;
1861 			return ring;
1862 		}
1863 	}
1864 
1865 	return NULL;
1866 }
1867 
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1868 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1869 {
1870 	struct mtk_rx_ring *ring;
1871 	int i;
1872 
1873 	if (!eth->hwlro) {
1874 		ring = &eth->rx_ring[0];
1875 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1876 	} else {
1877 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1878 			ring = &eth->rx_ring[i];
1879 			if (ring->calc_idx_update) {
1880 				ring->calc_idx_update = false;
1881 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1882 			}
1883 		}
1884 	}
1885 }
1886 
mtk_page_pool_enabled(struct mtk_eth * eth)1887 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1888 {
1889 	return mtk_is_netsys_v2_or_greater(eth);
1890 }
1891 
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1892 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1893 					      struct xdp_rxq_info *xdp_q,
1894 					      int id, int size)
1895 {
1896 	struct page_pool_params pp_params = {
1897 		.order = 0,
1898 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1899 		.pool_size = size,
1900 		.nid = NUMA_NO_NODE,
1901 		.dev = eth->dma_dev,
1902 		.offset = MTK_PP_HEADROOM,
1903 		.max_len = MTK_PP_MAX_BUF_SIZE,
1904 	};
1905 	struct page_pool *pp;
1906 	int err;
1907 
1908 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1909 							  : DMA_FROM_DEVICE;
1910 	pp = page_pool_create(&pp_params);
1911 	if (IS_ERR(pp))
1912 		return pp;
1913 
1914 	err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1915 				 eth->rx_napi.napi_id, PAGE_SIZE);
1916 	if (err < 0)
1917 		goto err_free_pp;
1918 
1919 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1920 	if (err)
1921 		goto err_unregister_rxq;
1922 
1923 	return pp;
1924 
1925 err_unregister_rxq:
1926 	xdp_rxq_info_unreg(xdp_q);
1927 err_free_pp:
1928 	page_pool_destroy(pp);
1929 
1930 	return ERR_PTR(err);
1931 }
1932 
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1933 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1934 				    gfp_t gfp_mask)
1935 {
1936 	struct page *page;
1937 
1938 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1939 	if (!page)
1940 		return NULL;
1941 
1942 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1943 	return page_address(page);
1944 }
1945 
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1946 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1947 {
1948 	if (ring->page_pool)
1949 		page_pool_put_full_page(ring->page_pool,
1950 					virt_to_head_page(data), napi);
1951 	else
1952 		skb_free_frag(data);
1953 }
1954 
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1955 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1956 			     struct mtk_tx_dma_desc_info *txd_info,
1957 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1958 			     void *data, u16 headroom, int index, bool dma_map)
1959 {
1960 	struct mtk_tx_ring *ring = &eth->tx_ring;
1961 	struct mtk_mac *mac = netdev_priv(dev);
1962 	struct mtk_tx_dma *txd_pdma;
1963 
1964 	if (dma_map) {  /* ndo_xdp_xmit */
1965 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1966 						txd_info->size, DMA_TO_DEVICE);
1967 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1968 			return -ENOMEM;
1969 
1970 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1971 	} else {
1972 		struct page *page = virt_to_head_page(data);
1973 
1974 		txd_info->addr = page_pool_get_dma_addr(page) +
1975 				 sizeof(struct xdp_frame) + headroom;
1976 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1977 					   txd_info->size, DMA_BIDIRECTIONAL);
1978 	}
1979 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1980 
1981 	tx_buf->mac_id = mac->id;
1982 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1983 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1984 
1985 	txd_pdma = qdma_to_pdma(ring, txd);
1986 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1987 		     index);
1988 
1989 	return 0;
1990 }
1991 
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1992 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1993 				struct net_device *dev, bool dma_map)
1994 {
1995 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1996 	const struct mtk_soc_data *soc = eth->soc;
1997 	struct mtk_tx_ring *ring = &eth->tx_ring;
1998 	struct mtk_mac *mac = netdev_priv(dev);
1999 	struct mtk_tx_dma_desc_info txd_info = {
2000 		.size	= xdpf->len,
2001 		.first	= true,
2002 		.last	= !xdp_frame_has_frags(xdpf),
2003 		.qid	= mac->id,
2004 	};
2005 	int err, index = 0, n_desc = 1, nr_frags;
2006 	struct mtk_tx_buf *htx_buf, *tx_buf;
2007 	struct mtk_tx_dma *htxd, *txd;
2008 	void *data = xdpf->data;
2009 
2010 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2011 		return -EBUSY;
2012 
2013 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2014 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
2015 		return -EBUSY;
2016 
2017 	spin_lock(&eth->page_lock);
2018 
2019 	txd = ring->next_free;
2020 	if (txd == ring->last_free) {
2021 		spin_unlock(&eth->page_lock);
2022 		return -ENOMEM;
2023 	}
2024 	htxd = txd;
2025 
2026 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
2027 	memset(tx_buf, 0, sizeof(*tx_buf));
2028 	htx_buf = tx_buf;
2029 
2030 	for (;;) {
2031 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
2032 					data, xdpf->headroom, index, dma_map);
2033 		if (err < 0)
2034 			goto unmap;
2035 
2036 		if (txd_info.last)
2037 			break;
2038 
2039 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
2040 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
2041 			if (txd == ring->last_free)
2042 				goto unmap;
2043 
2044 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
2045 						    soc->tx.desc_size);
2046 			memset(tx_buf, 0, sizeof(*tx_buf));
2047 			n_desc++;
2048 		}
2049 
2050 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
2051 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
2052 		txd_info.last = index + 1 == nr_frags;
2053 		txd_info.qid = mac->id;
2054 		data = skb_frag_address(&sinfo->frags[index]);
2055 
2056 		index++;
2057 	}
2058 	/* store xdpf for cleanup */
2059 	htx_buf->data = xdpf;
2060 
2061 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2062 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
2063 
2064 		if (index & 1)
2065 			txd_pdma->txd2 |= TX_DMA_LS0;
2066 		else
2067 			txd_pdma->txd2 |= TX_DMA_LS1;
2068 	}
2069 
2070 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
2071 	atomic_sub(n_desc, &ring->free_count);
2072 
2073 	/* make sure that all changes to the dma ring are flushed before we
2074 	 * continue
2075 	 */
2076 	wmb();
2077 
2078 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2079 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
2080 	} else {
2081 		int idx;
2082 
2083 		idx = txd_to_idx(ring, txd, soc->tx.desc_size);
2084 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
2085 			MT7628_TX_CTX_IDX0);
2086 	}
2087 
2088 	spin_unlock(&eth->page_lock);
2089 
2090 	return 0;
2091 
2092 unmap:
2093 	while (htxd != txd) {
2094 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
2095 		mtk_tx_unmap(eth, tx_buf, NULL, false);
2096 
2097 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2098 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2099 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
2100 
2101 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
2102 		}
2103 
2104 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
2105 	}
2106 
2107 	spin_unlock(&eth->page_lock);
2108 
2109 	return err;
2110 }
2111 
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)2112 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
2113 			struct xdp_frame **frames, u32 flags)
2114 {
2115 	struct mtk_mac *mac = netdev_priv(dev);
2116 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
2117 	struct mtk_eth *eth = mac->hw;
2118 	int i, nxmit = 0;
2119 
2120 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2121 		return -EINVAL;
2122 
2123 	for (i = 0; i < num_frame; i++) {
2124 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
2125 			break;
2126 		nxmit++;
2127 	}
2128 
2129 	u64_stats_update_begin(&hw_stats->syncp);
2130 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
2131 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
2132 	u64_stats_update_end(&hw_stats->syncp);
2133 
2134 	return nxmit;
2135 }
2136 
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)2137 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
2138 		       struct xdp_buff *xdp, struct net_device *dev)
2139 {
2140 	struct mtk_mac *mac = netdev_priv(dev);
2141 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
2142 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
2143 	struct bpf_prog *prog;
2144 	u32 act = XDP_PASS;
2145 
2146 	rcu_read_lock();
2147 
2148 	prog = rcu_dereference(eth->prog);
2149 	if (!prog)
2150 		goto out;
2151 
2152 	act = bpf_prog_run_xdp(prog, xdp);
2153 	switch (act) {
2154 	case XDP_PASS:
2155 		count = &hw_stats->xdp_stats.rx_xdp_pass;
2156 		goto update_stats;
2157 	case XDP_REDIRECT:
2158 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
2159 			act = XDP_DROP;
2160 			break;
2161 		}
2162 
2163 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
2164 		goto update_stats;
2165 	case XDP_TX: {
2166 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2167 
2168 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
2169 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
2170 			act = XDP_DROP;
2171 			break;
2172 		}
2173 
2174 		count = &hw_stats->xdp_stats.rx_xdp_tx;
2175 		goto update_stats;
2176 	}
2177 	default:
2178 		bpf_warn_invalid_xdp_action(dev, prog, act);
2179 		fallthrough;
2180 	case XDP_ABORTED:
2181 		trace_xdp_exception(dev, prog, act);
2182 		fallthrough;
2183 	case XDP_DROP:
2184 		break;
2185 	}
2186 
2187 	page_pool_put_full_page(ring->page_pool,
2188 				virt_to_head_page(xdp->data), true);
2189 
2190 update_stats:
2191 	u64_stats_update_begin(&hw_stats->syncp);
2192 	*count = *count + 1;
2193 	u64_stats_update_end(&hw_stats->syncp);
2194 out:
2195 	rcu_read_unlock();
2196 
2197 	return act;
2198 }
2199 
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2200 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2201 		       struct mtk_eth *eth)
2202 {
2203 	struct dim_sample dim_sample = {};
2204 	struct mtk_rx_ring *ring;
2205 	bool xdp_flush = false;
2206 	int idx;
2207 	struct sk_buff *skb;
2208 	u64 addr64 = 0;
2209 	u8 *data, *new_data;
2210 	struct mtk_rx_dma_v2 *rxd, trxd;
2211 	int done = 0, bytes = 0;
2212 	dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2213 	int ppe_idx = 0;
2214 
2215 	while (done < budget) {
2216 		unsigned int pktlen, *rxdcsum;
2217 		struct net_device *netdev;
2218 		u32 hash, reason;
2219 		int mac = 0;
2220 
2221 		ring = mtk_get_rx_ring(eth);
2222 		if (unlikely(!ring))
2223 			goto rx_done;
2224 
2225 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2226 		rxd = ring->dma + idx * eth->soc->rx.desc_size;
2227 		data = ring->data[idx];
2228 
2229 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
2230 			break;
2231 
2232 		/* find out which mac the packet come from. values start at 1 */
2233 		if (mtk_is_netsys_v3_or_greater(eth)) {
2234 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2235 
2236 			switch (val) {
2237 			case PSE_GDM1_PORT:
2238 			case PSE_GDM2_PORT:
2239 				mac = val - 1;
2240 				break;
2241 			case PSE_GDM3_PORT:
2242 				mac = MTK_GMAC3_ID;
2243 				break;
2244 			default:
2245 				break;
2246 			}
2247 		} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2248 			   !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2249 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2250 		}
2251 
2252 		if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2253 			     !eth->netdev[mac]))
2254 			goto release_desc;
2255 
2256 		netdev = eth->netdev[mac];
2257 		ppe_idx = eth->mac[mac]->ppe_idx;
2258 
2259 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2260 			goto release_desc;
2261 
2262 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2263 
2264 		/* alloc new buffer */
2265 		if (ring->page_pool) {
2266 			struct page *page = virt_to_head_page(data);
2267 			struct xdp_buff xdp;
2268 			u32 ret, metasize;
2269 
2270 			new_data = mtk_page_pool_get_buff(ring->page_pool,
2271 							  &dma_addr,
2272 							  GFP_ATOMIC);
2273 			if (unlikely(!new_data)) {
2274 				netdev->stats.rx_dropped++;
2275 				goto release_desc;
2276 			}
2277 
2278 			dma_sync_single_for_cpu(eth->dma_dev,
2279 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2280 				pktlen, page_pool_get_dma_dir(ring->page_pool));
2281 
2282 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2283 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2284 					 true);
2285 			xdp_buff_clear_frags_flag(&xdp);
2286 
2287 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2288 			if (ret == XDP_REDIRECT)
2289 				xdp_flush = true;
2290 
2291 			if (ret != XDP_PASS)
2292 				goto skip_rx;
2293 
2294 			skb = build_skb(data, PAGE_SIZE);
2295 			if (unlikely(!skb)) {
2296 				page_pool_put_full_page(ring->page_pool,
2297 							page, true);
2298 				netdev->stats.rx_dropped++;
2299 				goto skip_rx;
2300 			}
2301 
2302 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
2303 			skb_put(skb, xdp.data_end - xdp.data);
2304 			metasize = xdp.data - xdp.data_meta;
2305 			if (metasize)
2306 				skb_metadata_set(skb, metasize);
2307 			skb_mark_for_recycle(skb);
2308 		} else {
2309 			if (ring->frag_size <= PAGE_SIZE)
2310 				new_data = napi_alloc_frag(ring->frag_size);
2311 			else
2312 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2313 
2314 			if (unlikely(!new_data)) {
2315 				netdev->stats.rx_dropped++;
2316 				goto release_desc;
2317 			}
2318 
2319 			dma_addr = dma_map_single(eth->dma_dev,
2320 				new_data + NET_SKB_PAD + eth->ip_align,
2321 				ring->buf_size, DMA_FROM_DEVICE);
2322 			if (unlikely(dma_mapping_error(eth->dma_dev,
2323 						       dma_addr))) {
2324 				skb_free_frag(new_data);
2325 				netdev->stats.rx_dropped++;
2326 				goto release_desc;
2327 			}
2328 
2329 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2330 				addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2331 
2332 			dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2333 					 ring->buf_size, DMA_FROM_DEVICE);
2334 
2335 			skb = build_skb(data, ring->frag_size);
2336 			if (unlikely(!skb)) {
2337 				netdev->stats.rx_dropped++;
2338 				skb_free_frag(data);
2339 				goto skip_rx;
2340 			}
2341 
2342 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2343 			skb_put(skb, pktlen);
2344 		}
2345 
2346 		skb->dev = netdev;
2347 		bytes += skb->len;
2348 
2349 		if (mtk_is_netsys_v3_or_greater(eth)) {
2350 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2351 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2352 			if (hash != MTK_RXD5_FOE_ENTRY)
2353 				skb_set_hash(skb, jhash_1word(hash, 0),
2354 					     PKT_HASH_TYPE_L4);
2355 			rxdcsum = &trxd.rxd3;
2356 		} else {
2357 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2358 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2359 			if (hash != MTK_RXD4_FOE_ENTRY)
2360 				skb_set_hash(skb, jhash_1word(hash, 0),
2361 					     PKT_HASH_TYPE_L4);
2362 			rxdcsum = &trxd.rxd4;
2363 		}
2364 
2365 		if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2366 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2367 		else
2368 			skb_checksum_none_assert(skb);
2369 		skb->protocol = eth_type_trans(skb, netdev);
2370 
2371 		/* When using VLAN untagging in combination with DSA, the
2372 		 * hardware treats the MTK special tag as a VLAN and untags it.
2373 		 */
2374 		if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2375 		    netdev_uses_dsa(netdev)) {
2376 			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2377 
2378 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2379 			    eth->dsa_meta[port])
2380 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2381 		}
2382 
2383 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2384 			mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
2385 
2386 		skb_record_rx_queue(skb, 0);
2387 		napi_gro_receive(napi, skb);
2388 
2389 skip_rx:
2390 		ring->data[idx] = new_data;
2391 		rxd->rxd1 = (unsigned int)dma_addr;
2392 release_desc:
2393 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
2394 			if (unlikely(dma_addr == DMA_MAPPING_ERROR))
2395 				addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
2396 						   rxd->rxd2);
2397 			else
2398 				addr64 = RX_DMA_PREP_ADDR64(dma_addr);
2399 		}
2400 
2401 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2402 			rxd->rxd2 = RX_DMA_LSO;
2403 		else
2404 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
2405 
2406 		ring->calc_idx = idx;
2407 		done++;
2408 	}
2409 
2410 rx_done:
2411 	if (done) {
2412 		/* make sure that all changes to the dma ring are flushed before
2413 		 * we continue
2414 		 */
2415 		wmb();
2416 		mtk_update_rx_cpu_idx(eth);
2417 	}
2418 
2419 	eth->rx_packets += done;
2420 	eth->rx_bytes += bytes;
2421 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2422 			  &dim_sample);
2423 	net_dim(&eth->rx_dim, &dim_sample);
2424 
2425 	if (xdp_flush)
2426 		xdp_do_flush();
2427 
2428 	return done;
2429 }
2430 
2431 struct mtk_poll_state {
2432     struct netdev_queue *txq;
2433     unsigned int total;
2434     unsigned int done;
2435     unsigned int bytes;
2436 };
2437 
2438 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2439 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2440 		 struct sk_buff *skb)
2441 {
2442 	struct netdev_queue *txq;
2443 	struct net_device *dev;
2444 	unsigned int bytes = skb->len;
2445 
2446 	state->total++;
2447 	eth->tx_packets++;
2448 	eth->tx_bytes += bytes;
2449 
2450 	dev = eth->netdev[mac];
2451 	if (!dev)
2452 		return;
2453 
2454 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2455 	if (state->txq == txq) {
2456 		state->done++;
2457 		state->bytes += bytes;
2458 		return;
2459 	}
2460 
2461 	if (state->txq)
2462 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2463 
2464 	state->txq = txq;
2465 	state->done = 1;
2466 	state->bytes = bytes;
2467 }
2468 
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2469 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2470 			    struct mtk_poll_state *state)
2471 {
2472 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2473 	struct mtk_tx_ring *ring = &eth->tx_ring;
2474 	struct mtk_tx_buf *tx_buf;
2475 	struct xdp_frame_bulk bq;
2476 	struct mtk_tx_dma *desc;
2477 	u32 cpu, dma;
2478 
2479 	cpu = ring->last_free_ptr;
2480 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2481 
2482 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2483 	xdp_frame_bulk_init(&bq);
2484 
2485 	while ((cpu != dma) && budget) {
2486 		u32 next_cpu = desc->txd2;
2487 
2488 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2489 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2490 			break;
2491 
2492 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2493 					    eth->soc->tx.desc_size);
2494 		if (!tx_buf->data)
2495 			break;
2496 
2497 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2498 			if (tx_buf->type == MTK_TYPE_SKB)
2499 				mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2500 						 tx_buf->data);
2501 
2502 			budget--;
2503 		}
2504 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2505 
2506 		ring->last_free = desc;
2507 		atomic_inc(&ring->free_count);
2508 
2509 		cpu = next_cpu;
2510 	}
2511 	xdp_flush_frame_bulk(&bq);
2512 
2513 	ring->last_free_ptr = cpu;
2514 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2515 
2516 	return budget;
2517 }
2518 
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2519 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2520 			    struct mtk_poll_state *state)
2521 {
2522 	struct mtk_tx_ring *ring = &eth->tx_ring;
2523 	struct mtk_tx_buf *tx_buf;
2524 	struct xdp_frame_bulk bq;
2525 	struct mtk_tx_dma *desc;
2526 	u32 cpu, dma;
2527 
2528 	cpu = ring->cpu_idx;
2529 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2530 	xdp_frame_bulk_init(&bq);
2531 
2532 	while ((cpu != dma) && budget) {
2533 		tx_buf = &ring->buf[cpu];
2534 		if (!tx_buf->data)
2535 			break;
2536 
2537 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2538 			if (tx_buf->type == MTK_TYPE_SKB)
2539 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2540 			budget--;
2541 		}
2542 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2543 
2544 		desc = ring->dma + cpu * eth->soc->tx.desc_size;
2545 		ring->last_free = desc;
2546 		atomic_inc(&ring->free_count);
2547 
2548 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2549 	}
2550 	xdp_flush_frame_bulk(&bq);
2551 
2552 	ring->cpu_idx = cpu;
2553 
2554 	return budget;
2555 }
2556 
mtk_poll_tx(struct mtk_eth * eth,int budget)2557 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2558 {
2559 	struct mtk_tx_ring *ring = &eth->tx_ring;
2560 	struct dim_sample dim_sample = {};
2561 	struct mtk_poll_state state = {};
2562 
2563 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2564 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2565 	else
2566 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2567 
2568 	if (state.txq)
2569 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2570 
2571 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2572 			  &dim_sample);
2573 	net_dim(&eth->tx_dim, &dim_sample);
2574 
2575 	if (mtk_queue_stopped(eth) &&
2576 	    (atomic_read(&ring->free_count) > ring->thresh))
2577 		mtk_wake_queue(eth);
2578 
2579 	return state.total;
2580 }
2581 
mtk_handle_status_irq(struct mtk_eth * eth)2582 static void mtk_handle_status_irq(struct mtk_eth *eth)
2583 {
2584 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2585 
2586 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2587 		mtk_stats_update(eth);
2588 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2589 			MTK_INT_STATUS2);
2590 	}
2591 }
2592 
mtk_napi_tx(struct napi_struct * napi,int budget)2593 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2594 {
2595 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2596 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2597 	int tx_done = 0;
2598 
2599 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2600 		mtk_handle_status_irq(eth);
2601 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2602 	tx_done = mtk_poll_tx(eth, budget);
2603 
2604 	if (unlikely(netif_msg_intr(eth))) {
2605 		dev_info(eth->dev,
2606 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2607 			 mtk_r32(eth, reg_map->tx_irq_status),
2608 			 mtk_r32(eth, reg_map->tx_irq_mask));
2609 	}
2610 
2611 	if (tx_done == budget)
2612 		return budget;
2613 
2614 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2615 		return budget;
2616 
2617 	if (napi_complete_done(napi, tx_done))
2618 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2619 
2620 	return tx_done;
2621 }
2622 
mtk_napi_rx(struct napi_struct * napi,int budget)2623 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2624 {
2625 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2626 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2627 	int rx_done_total = 0;
2628 
2629 	mtk_handle_status_irq(eth);
2630 
2631 	do {
2632 		int rx_done;
2633 
2634 		mtk_w32(eth, eth->soc->rx.irq_done_mask,
2635 			reg_map->pdma.irq_status);
2636 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2637 		rx_done_total += rx_done;
2638 
2639 		if (unlikely(netif_msg_intr(eth))) {
2640 			dev_info(eth->dev,
2641 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2642 				 mtk_r32(eth, reg_map->pdma.irq_status),
2643 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2644 		}
2645 
2646 		if (rx_done_total == budget)
2647 			return budget;
2648 
2649 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2650 		 eth->soc->rx.irq_done_mask);
2651 
2652 	if (napi_complete_done(napi, rx_done_total))
2653 		mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2654 
2655 	return rx_done_total;
2656 }
2657 
mtk_tx_alloc(struct mtk_eth * eth)2658 static int mtk_tx_alloc(struct mtk_eth *eth)
2659 {
2660 	const struct mtk_soc_data *soc = eth->soc;
2661 	struct mtk_tx_ring *ring = &eth->tx_ring;
2662 	int i, sz = soc->tx.desc_size;
2663 	struct mtk_tx_dma_v2 *txd;
2664 	int ring_size;
2665 	u32 ofs, val;
2666 
2667 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2668 		ring_size = MTK_QDMA_RING_SIZE;
2669 	else
2670 		ring_size = soc->tx.dma_size;
2671 
2672 	ring->buf = kzalloc_objs(*ring->buf, ring_size);
2673 	if (!ring->buf)
2674 		goto no_tx_mem;
2675 
2676 	ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
2677 	if (!ring->dma)
2678 		goto no_tx_mem;
2679 
2680 	for (i = 0; i < ring_size; i++) {
2681 		int next = (i + 1) % ring_size;
2682 		u32 next_ptr = ring->phys + next * sz;
2683 
2684 		txd = ring->dma + i * sz;
2685 		txd->txd2 = next_ptr;
2686 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2687 		txd->txd4 = 0;
2688 		if (mtk_is_netsys_v2_or_greater(eth)) {
2689 			txd->txd5 = 0;
2690 			txd->txd6 = 0;
2691 			txd->txd7 = 0;
2692 			txd->txd8 = 0;
2693 		}
2694 	}
2695 
2696 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2697 	 * only as the framework. The real HW descriptors are the PDMA
2698 	 * descriptors in ring->dma_pdma.
2699 	 */
2700 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2701 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2702 						    &ring->phys_pdma, GFP_KERNEL);
2703 		if (!ring->dma_pdma)
2704 			goto no_tx_mem;
2705 
2706 		for (i = 0; i < ring_size; i++) {
2707 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2708 			ring->dma_pdma[i].txd4 = 0;
2709 		}
2710 	}
2711 
2712 	ring->dma_size = ring_size;
2713 	atomic_set(&ring->free_count, ring_size - 2);
2714 	ring->next_free = ring->dma;
2715 	ring->last_free = (void *)txd;
2716 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2717 	ring->thresh = MAX_SKB_FRAGS;
2718 
2719 	/* make sure that all changes to the dma ring are flushed before we
2720 	 * continue
2721 	 */
2722 	wmb();
2723 
2724 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2725 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2726 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2727 		mtk_w32(eth,
2728 			ring->phys + ((ring_size - 1) * sz),
2729 			soc->reg_map->qdma.crx_ptr);
2730 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2731 
2732 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2733 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2734 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2735 
2736 			val = MTK_QTX_SCH_MIN_RATE_EN |
2737 			      /* minimum: 10 Mbps */
2738 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2739 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2740 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2741 			if (mtk_is_netsys_v1(eth))
2742 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2743 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2744 			ofs += MTK_QTX_OFFSET;
2745 		}
2746 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2747 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2748 		if (mtk_is_netsys_v2_or_greater(eth))
2749 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2750 	} else {
2751 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2752 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2753 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2754 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2755 	}
2756 
2757 	return 0;
2758 
2759 no_tx_mem:
2760 	return -ENOMEM;
2761 }
2762 
mtk_tx_clean(struct mtk_eth * eth)2763 static void mtk_tx_clean(struct mtk_eth *eth)
2764 {
2765 	const struct mtk_soc_data *soc = eth->soc;
2766 	struct mtk_tx_ring *ring = &eth->tx_ring;
2767 	int i;
2768 
2769 	if (ring->buf) {
2770 		for (i = 0; i < ring->dma_size; i++)
2771 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2772 		kfree(ring->buf);
2773 		ring->buf = NULL;
2774 	}
2775 
2776 	if (ring->dma) {
2777 		mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
2778 				  ring->dma, ring->phys, true);
2779 		ring->dma = NULL;
2780 	}
2781 
2782 	if (ring->dma_pdma) {
2783 		dma_free_coherent(eth->dma_dev,
2784 				  ring->dma_size * soc->tx.desc_size,
2785 				  ring->dma_pdma, ring->phys_pdma);
2786 		ring->dma_pdma = NULL;
2787 	}
2788 }
2789 
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2790 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2791 {
2792 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2793 	const struct mtk_soc_data *soc = eth->soc;
2794 	struct mtk_rx_ring *ring;
2795 	int rx_data_len, rx_dma_size;
2796 	int i;
2797 
2798 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2799 		if (ring_no)
2800 			return -EINVAL;
2801 		ring = &eth->rx_ring_qdma;
2802 	} else {
2803 		ring = &eth->rx_ring[ring_no];
2804 	}
2805 
2806 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2807 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2808 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2809 	} else {
2810 		rx_data_len = ETH_DATA_LEN;
2811 		rx_dma_size = soc->rx.dma_size;
2812 	}
2813 
2814 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2815 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2816 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2817 			     GFP_KERNEL);
2818 	if (!ring->data)
2819 		return -ENOMEM;
2820 
2821 	if (mtk_page_pool_enabled(eth)) {
2822 		struct page_pool *pp;
2823 
2824 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2825 					  rx_dma_size);
2826 		if (IS_ERR(pp))
2827 			return PTR_ERR(pp);
2828 
2829 		ring->page_pool = pp;
2830 	}
2831 
2832 	ring->dma = mtk_dma_ring_alloc(eth,
2833 				       rx_dma_size * eth->soc->rx.desc_size,
2834 				       &ring->phys,
2835 				       rx_flag == MTK_RX_FLAGS_NORMAL);
2836 	if (!ring->dma)
2837 		return -ENOMEM;
2838 
2839 	for (i = 0; i < rx_dma_size; i++) {
2840 		struct mtk_rx_dma_v2 *rxd;
2841 		dma_addr_t dma_addr;
2842 		void *data;
2843 
2844 		rxd = ring->dma + i * eth->soc->rx.desc_size;
2845 		if (ring->page_pool) {
2846 			data = mtk_page_pool_get_buff(ring->page_pool,
2847 						      &dma_addr, GFP_KERNEL);
2848 			if (!data)
2849 				return -ENOMEM;
2850 		} else {
2851 			if (ring->frag_size <= PAGE_SIZE)
2852 				data = netdev_alloc_frag(ring->frag_size);
2853 			else
2854 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2855 
2856 			if (!data)
2857 				return -ENOMEM;
2858 
2859 			dma_addr = dma_map_single(eth->dma_dev,
2860 				data + NET_SKB_PAD + eth->ip_align,
2861 				ring->buf_size, DMA_FROM_DEVICE);
2862 			if (unlikely(dma_mapping_error(eth->dma_dev,
2863 						       dma_addr))) {
2864 				skb_free_frag(data);
2865 				return -ENOMEM;
2866 			}
2867 		}
2868 		rxd->rxd1 = (unsigned int)dma_addr;
2869 		ring->data[i] = data;
2870 
2871 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2872 			rxd->rxd2 = RX_DMA_LSO;
2873 		else
2874 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2875 
2876 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2877 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2878 
2879 		rxd->rxd3 = 0;
2880 		rxd->rxd4 = 0;
2881 		if (mtk_is_netsys_v3_or_greater(eth)) {
2882 			rxd->rxd5 = 0;
2883 			rxd->rxd6 = 0;
2884 			rxd->rxd7 = 0;
2885 			rxd->rxd8 = 0;
2886 		}
2887 	}
2888 
2889 	ring->dma_size = rx_dma_size;
2890 	ring->calc_idx_update = false;
2891 	ring->calc_idx = rx_dma_size - 1;
2892 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2893 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2894 				    ring_no * MTK_QRX_OFFSET;
2895 	else
2896 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2897 				    ring_no * MTK_QRX_OFFSET;
2898 	/* make sure that all changes to the dma ring are flushed before we
2899 	 * continue
2900 	 */
2901 	wmb();
2902 
2903 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2904 		mtk_w32(eth, ring->phys,
2905 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2906 		mtk_w32(eth, rx_dma_size,
2907 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2908 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2909 			reg_map->qdma.rst_idx);
2910 	} else {
2911 		mtk_w32(eth, ring->phys,
2912 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2913 		mtk_w32(eth, rx_dma_size,
2914 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2915 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2916 			reg_map->pdma.rst_idx);
2917 	}
2918 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2919 
2920 	return 0;
2921 }
2922 
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2923 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2924 {
2925 	u64 addr64 = 0;
2926 	int i;
2927 
2928 	if (ring->data && ring->dma) {
2929 		for (i = 0; i < ring->dma_size; i++) {
2930 			struct mtk_rx_dma *rxd;
2931 
2932 			if (!ring->data[i])
2933 				continue;
2934 
2935 			rxd = ring->dma + i * eth->soc->rx.desc_size;
2936 			if (!rxd->rxd1)
2937 				continue;
2938 
2939 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2940 				addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2941 
2942 			dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2943 					 ring->buf_size, DMA_FROM_DEVICE);
2944 			mtk_rx_put_buff(ring, ring->data[i], false);
2945 		}
2946 		kfree(ring->data);
2947 		ring->data = NULL;
2948 	}
2949 
2950 	if (ring->dma) {
2951 		mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
2952 				  ring->dma, ring->phys, in_sram);
2953 		ring->dma = NULL;
2954 	}
2955 
2956 	if (ring->page_pool) {
2957 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2958 			xdp_rxq_info_unreg(&ring->xdp_q);
2959 		page_pool_destroy(ring->page_pool);
2960 		ring->page_pool = NULL;
2961 	}
2962 }
2963 
mtk_hwlro_rx_init(struct mtk_eth * eth)2964 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2965 {
2966 	int i;
2967 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2968 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2969 
2970 	/* set LRO rings to auto-learn modes */
2971 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2972 
2973 	/* validate LRO ring */
2974 	ring_ctrl_dw2 |= MTK_RING_VLD;
2975 
2976 	/* set AGE timer (unit: 20us) */
2977 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2978 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2979 
2980 	/* set max AGG timer (unit: 20us) */
2981 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2982 
2983 	/* set max LRO AGG count */
2984 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2985 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2986 
2987 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2988 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2989 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2990 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2991 	}
2992 
2993 	/* IPv4 checksum update enable */
2994 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2995 
2996 	/* switch priority comparison to packet count mode */
2997 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2998 
2999 	/* bandwidth threshold setting */
3000 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
3001 
3002 	/* auto-learn score delta setting */
3003 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
3004 
3005 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
3006 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
3007 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
3008 
3009 	/* set HW LRO mode & the max aggregation count for rx packets */
3010 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
3011 
3012 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
3013 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
3014 
3015 	/* enable HW LRO */
3016 	lro_ctrl_dw0 |= MTK_LRO_EN;
3017 
3018 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
3019 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
3020 
3021 	return 0;
3022 }
3023 
mtk_hwlro_rx_uninit(struct mtk_eth * eth)3024 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
3025 {
3026 	int i;
3027 	u32 val;
3028 
3029 	/* relinquish lro rings, flush aggregated packets */
3030 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
3031 
3032 	/* wait for relinquishments done */
3033 	for (i = 0; i < 10; i++) {
3034 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
3035 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
3036 			msleep(20);
3037 			continue;
3038 		}
3039 		break;
3040 	}
3041 
3042 	/* invalidate lro rings */
3043 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3044 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
3045 
3046 	/* disable HW LRO */
3047 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
3048 }
3049 
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)3050 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
3051 {
3052 	u32 reg_val;
3053 
3054 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3055 
3056 	/* invalidate the IP setting */
3057 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3058 
3059 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
3060 
3061 	/* validate the IP setting */
3062 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3063 }
3064 
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)3065 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
3066 {
3067 	u32 reg_val;
3068 
3069 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
3070 
3071 	/* invalidate the IP setting */
3072 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
3073 
3074 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
3075 }
3076 
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)3077 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
3078 {
3079 	int cnt = 0;
3080 	int i;
3081 
3082 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3083 		if (mac->hwlro_ip[i])
3084 			cnt++;
3085 	}
3086 
3087 	return cnt;
3088 }
3089 
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3090 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
3091 				struct ethtool_rxnfc *cmd)
3092 {
3093 	struct ethtool_rx_flow_spec *fsp =
3094 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3095 	struct mtk_mac *mac = netdev_priv(dev);
3096 	struct mtk_eth *eth = mac->hw;
3097 	int hwlro_idx;
3098 
3099 	if ((fsp->flow_type != TCP_V4_FLOW) ||
3100 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
3101 	    (fsp->location > 1))
3102 		return -EINVAL;
3103 
3104 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
3105 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3106 
3107 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3108 
3109 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
3110 
3111 	return 0;
3112 }
3113 
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)3114 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
3115 				struct ethtool_rxnfc *cmd)
3116 {
3117 	struct ethtool_rx_flow_spec *fsp =
3118 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3119 	struct mtk_mac *mac = netdev_priv(dev);
3120 	struct mtk_eth *eth = mac->hw;
3121 	int hwlro_idx;
3122 
3123 	if (fsp->location > 1)
3124 		return -EINVAL;
3125 
3126 	mac->hwlro_ip[fsp->location] = 0;
3127 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
3128 
3129 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3130 
3131 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3132 
3133 	return 0;
3134 }
3135 
mtk_hwlro_netdev_disable(struct net_device * dev)3136 static void mtk_hwlro_netdev_disable(struct net_device *dev)
3137 {
3138 	struct mtk_mac *mac = netdev_priv(dev);
3139 	struct mtk_eth *eth = mac->hw;
3140 	int i, hwlro_idx;
3141 
3142 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3143 		mac->hwlro_ip[i] = 0;
3144 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
3145 
3146 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
3147 	}
3148 
3149 	mac->hwlro_ip_cnt = 0;
3150 }
3151 
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)3152 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
3153 				    struct ethtool_rxnfc *cmd)
3154 {
3155 	struct mtk_mac *mac = netdev_priv(dev);
3156 	struct ethtool_rx_flow_spec *fsp =
3157 		(struct ethtool_rx_flow_spec *)&cmd->fs;
3158 
3159 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
3160 		return -EINVAL;
3161 
3162 	/* only tcp dst ipv4 is meaningful, others are meaningless */
3163 	fsp->flow_type = TCP_V4_FLOW;
3164 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
3165 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
3166 
3167 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
3168 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
3169 	fsp->h_u.tcp_ip4_spec.psrc = 0;
3170 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
3171 	fsp->h_u.tcp_ip4_spec.pdst = 0;
3172 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
3173 	fsp->h_u.tcp_ip4_spec.tos = 0;
3174 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
3175 
3176 	return 0;
3177 }
3178 
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3179 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3180 				  struct ethtool_rxnfc *cmd,
3181 				  u32 *rule_locs)
3182 {
3183 	struct mtk_mac *mac = netdev_priv(dev);
3184 	int cnt = 0;
3185 	int i;
3186 
3187 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3188 		if (cnt == cmd->rule_cnt)
3189 			return -EMSGSIZE;
3190 
3191 		if (mac->hwlro_ip[i]) {
3192 			rule_locs[cnt] = i;
3193 			cnt++;
3194 		}
3195 	}
3196 
3197 	cmd->rule_cnt = cnt;
3198 
3199 	return 0;
3200 }
3201 
mtk_fix_features(struct net_device * dev,netdev_features_t features)3202 static netdev_features_t mtk_fix_features(struct net_device *dev,
3203 					  netdev_features_t features)
3204 {
3205 	if (!(features & NETIF_F_LRO)) {
3206 		struct mtk_mac *mac = netdev_priv(dev);
3207 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3208 
3209 		if (ip_cnt) {
3210 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3211 
3212 			features |= NETIF_F_LRO;
3213 		}
3214 	}
3215 
3216 	return features;
3217 }
3218 
mtk_set_features(struct net_device * dev,netdev_features_t features)3219 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3220 {
3221 	netdev_features_t diff = dev->features ^ features;
3222 
3223 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3224 		mtk_hwlro_netdev_disable(dev);
3225 
3226 	return 0;
3227 }
3228 
3229 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3230 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3231 {
3232 	unsigned int reg;
3233 	int ret;
3234 	u32 val;
3235 
3236 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3237 		reg = eth->soc->reg_map->qdma.glo_cfg;
3238 	else
3239 		reg = eth->soc->reg_map->pdma.glo_cfg;
3240 
3241 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3242 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3243 					5, MTK_DMA_BUSY_TIMEOUT_US);
3244 	if (ret)
3245 		dev_err(eth->dev, "DMA init timeout\n");
3246 
3247 	return ret;
3248 }
3249 
mtk_dma_init(struct mtk_eth * eth)3250 static int mtk_dma_init(struct mtk_eth *eth)
3251 {
3252 	int err;
3253 	u32 i;
3254 
3255 	if (mtk_dma_busy_wait(eth))
3256 		return -EBUSY;
3257 
3258 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3259 		/* QDMA needs scratch memory for internal reordering of the
3260 		 * descriptors
3261 		 */
3262 		err = mtk_init_fq_dma(eth);
3263 		if (err)
3264 			return err;
3265 	}
3266 
3267 	err = mtk_tx_alloc(eth);
3268 	if (err)
3269 		return err;
3270 
3271 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3272 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3273 		if (err)
3274 			return err;
3275 	}
3276 
3277 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3278 	if (err)
3279 		return err;
3280 
3281 	if (eth->hwlro) {
3282 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3283 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3284 			if (err)
3285 				return err;
3286 		}
3287 		err = mtk_hwlro_rx_init(eth);
3288 		if (err)
3289 			return err;
3290 	}
3291 
3292 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3293 		/* Enable random early drop and set drop threshold
3294 		 * automatically
3295 		 */
3296 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3297 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3298 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3299 	}
3300 
3301 	return 0;
3302 }
3303 
mtk_dma_free(struct mtk_eth * eth)3304 static void mtk_dma_free(struct mtk_eth *eth)
3305 {
3306 	const struct mtk_soc_data *soc = eth->soc;
3307 	int i, j, txqs = 1;
3308 
3309 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3310 		txqs = MTK_QDMA_NUM_QUEUES;
3311 
3312 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3313 		if (!eth->netdev[i])
3314 			continue;
3315 
3316 		for (j = 0; j < txqs; j++)
3317 			netdev_tx_reset_subqueue(eth->netdev[i], j);
3318 	}
3319 
3320 	if (eth->scratch_ring) {
3321 		mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
3322 				  eth->scratch_ring, eth->phy_scratch_ring,
3323 				  true);
3324 		eth->scratch_ring = NULL;
3325 		eth->phy_scratch_ring = 0;
3326 	}
3327 
3328 	mtk_tx_clean(eth);
3329 	mtk_rx_clean(eth, &eth->rx_ring[0], true);
3330 	mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
3331 
3332 	if (eth->hwlro) {
3333 		mtk_hwlro_rx_uninit(eth);
3334 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3335 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
3336 	}
3337 
3338 	for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3339 		kfree(eth->scratch_head[i]);
3340 		eth->scratch_head[i] = NULL;
3341 	}
3342 }
3343 
mtk_hw_reset_check(struct mtk_eth * eth)3344 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3345 {
3346 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3347 
3348 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3349 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3350 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3351 }
3352 
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3353 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3354 {
3355 	struct mtk_mac *mac = netdev_priv(dev);
3356 	struct mtk_eth *eth = mac->hw;
3357 
3358 	if (test_bit(MTK_RESETTING, &eth->state))
3359 		return;
3360 
3361 	if (!mtk_hw_reset_check(eth))
3362 		return;
3363 
3364 	eth->netdev[mac->id]->stats.tx_errors++;
3365 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3366 
3367 	schedule_work(&eth->pending_work);
3368 }
3369 
mtk_get_irqs(struct platform_device * pdev,struct mtk_eth * eth)3370 static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
3371 {
3372 	int i;
3373 
3374 	/* future SoCs beginning with MT7988 should use named IRQs in dts */
3375 	eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
3376 	eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
3377 	if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
3378 		return 0;
3379 
3380 	/* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
3381 	if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
3382 		return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
3383 				     "Error requesting FE TX IRQ\n");
3384 
3385 	if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
3386 		return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
3387 				     "Error requesting FE RX IRQ\n");
3388 
3389 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
3390 		dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
3391 
3392 	/* legacy way:
3393 	 * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
3394 	 * from devicetree and used for both RX and TX - it is shared.
3395 	 * On SoCs with non-shared IRQs the first entry is not used,
3396 	 * the second is for TX, and the third is for RX.
3397 	 */
3398 	for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
3399 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3400 			if (i == MTK_FE_IRQ_SHARED)
3401 				eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
3402 			else
3403 				eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
3404 		} else {
3405 			eth->irq[i] = platform_get_irq(pdev, i + 1);
3406 		}
3407 
3408 		if (eth->irq[i] < 0) {
3409 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3410 			return -ENXIO;
3411 		}
3412 	}
3413 
3414 	return 0;
3415 }
3416 
mtk_handle_irq_rx(int irq,void * _eth)3417 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3418 {
3419 	struct mtk_eth *eth = _eth;
3420 
3421 	eth->rx_events++;
3422 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3423 		mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3424 		__napi_schedule(&eth->rx_napi);
3425 	}
3426 
3427 	return IRQ_HANDLED;
3428 }
3429 
mtk_handle_irq_tx(int irq,void * _eth)3430 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3431 {
3432 	struct mtk_eth *eth = _eth;
3433 
3434 	eth->tx_events++;
3435 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3436 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3437 		__napi_schedule(&eth->tx_napi);
3438 	}
3439 
3440 	return IRQ_HANDLED;
3441 }
3442 
mtk_handle_irq(int irq,void * _eth)3443 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3444 {
3445 	struct mtk_eth *eth = _eth;
3446 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3447 
3448 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3449 	    eth->soc->rx.irq_done_mask) {
3450 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3451 		    eth->soc->rx.irq_done_mask)
3452 			mtk_handle_irq_rx(irq, _eth);
3453 	}
3454 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3455 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3456 			mtk_handle_irq_tx(irq, _eth);
3457 	}
3458 
3459 	return IRQ_HANDLED;
3460 }
3461 
3462 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3463 static void mtk_poll_controller(struct net_device *dev)
3464 {
3465 	struct mtk_mac *mac = netdev_priv(dev);
3466 	struct mtk_eth *eth = mac->hw;
3467 
3468 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3469 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3470 	mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
3471 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3472 	mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3473 }
3474 #endif
3475 
mtk_start_dma(struct mtk_eth * eth)3476 static int mtk_start_dma(struct mtk_eth *eth)
3477 {
3478 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3479 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3480 	int err;
3481 
3482 	err = mtk_dma_init(eth);
3483 	if (err) {
3484 		mtk_dma_free(eth);
3485 		return err;
3486 	}
3487 
3488 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3489 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3490 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3491 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3492 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3493 
3494 		if (mtk_is_netsys_v2_or_greater(eth))
3495 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3496 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3497 			       MTK_CHK_DDONE_EN;
3498 		else
3499 			val |= MTK_RX_BT_32DWORDS;
3500 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3501 
3502 		mtk_w32(eth,
3503 			MTK_RX_DMA_EN | rx_2b_offset |
3504 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3505 			reg_map->pdma.glo_cfg);
3506 	} else {
3507 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3508 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3509 			reg_map->pdma.glo_cfg);
3510 	}
3511 
3512 	return 0;
3513 }
3514 
mtk_gdm_config(struct mtk_eth * eth,u32 id,u32 config)3515 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
3516 {
3517 	u32 val;
3518 
3519 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3520 		return;
3521 
3522 	val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
3523 
3524 	/* default setup the forward port to send frame to PDMA */
3525 	val &= ~0xffff;
3526 
3527 	/* Enable RX checksum */
3528 	val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3529 
3530 	val |= config;
3531 
3532 	if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
3533 		val |= MTK_GDMA_SPECIAL_TAG;
3534 
3535 	mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
3536 }
3537 
3538 
mtk_uses_dsa(struct net_device * dev)3539 static bool mtk_uses_dsa(struct net_device *dev)
3540 {
3541 #if IS_ENABLED(CONFIG_NET_DSA)
3542 	return netdev_uses_dsa(dev) &&
3543 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3544 #else
3545 	return false;
3546 #endif
3547 }
3548 
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3549 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3550 {
3551 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3552 	struct mtk_eth *eth = mac->hw;
3553 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3554 	struct ethtool_link_ksettings s;
3555 	struct net_device *ldev;
3556 	struct list_head *iter;
3557 	struct dsa_port *dp;
3558 
3559 	if (event != NETDEV_CHANGE)
3560 		return NOTIFY_DONE;
3561 
3562 	netdev_for_each_lower_dev(dev, ldev, iter) {
3563 		if (netdev_priv(ldev) == mac)
3564 			goto found;
3565 	}
3566 
3567 	return NOTIFY_DONE;
3568 
3569 found:
3570 	if (!dsa_user_dev_check(dev))
3571 		return NOTIFY_DONE;
3572 
3573 	if (__ethtool_get_link_ksettings(dev, &s))
3574 		return NOTIFY_DONE;
3575 
3576 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3577 		return NOTIFY_DONE;
3578 
3579 	dp = dsa_port_from_netdev(dev);
3580 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3581 		return NOTIFY_DONE;
3582 
3583 	if (mac->speed > 0 && mac->speed <= s.base.speed)
3584 		s.base.speed = 0;
3585 
3586 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3587 
3588 	return NOTIFY_DONE;
3589 }
3590 
mtk_max_gmac_mtu(struct mtk_eth * eth)3591 static int mtk_max_gmac_mtu(struct mtk_eth *eth)
3592 {
3593 	int i, max_mtu = ETH_DATA_LEN;
3594 
3595 	for (i = 0; i < ARRAY_SIZE(eth->netdev); i++)
3596 		if (eth->netdev[i] && eth->netdev[i]->mtu > max_mtu)
3597 			max_mtu = eth->netdev[i]->mtu;
3598 
3599 	return max_mtu;
3600 }
3601 
mtk_open(struct net_device * dev)3602 static int mtk_open(struct net_device *dev)
3603 {
3604 	struct mtk_mac *mac = netdev_priv(dev);
3605 	struct mtk_eth *eth = mac->hw;
3606 	struct mtk_mac *target_mac;
3607 	int i, err, ppe_num, mtu;
3608 
3609 	ppe_num = eth->soc->ppe_num;
3610 
3611 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3612 	if (err) {
3613 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3614 			   err);
3615 		return err;
3616 	}
3617 
3618 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3619 	if (!refcount_read(&eth->dma_refcnt)) {
3620 		const struct mtk_soc_data *soc = eth->soc;
3621 		u32 gdm_config;
3622 		int i;
3623 
3624 		err = mtk_start_dma(eth);
3625 		if (err) {
3626 			phylink_disconnect_phy(mac->phylink);
3627 			return err;
3628 		}
3629 
3630 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3631 			mtk_ppe_start(eth->ppe[i]);
3632 
3633 		for (i = 0; i < MTK_MAX_DEVS; i++) {
3634 			if (!eth->netdev[i])
3635 				continue;
3636 
3637 			target_mac = netdev_priv(eth->netdev[i]);
3638 			if (!soc->offload_version) {
3639 				target_mac->ppe_idx = 0;
3640 				gdm_config = MTK_GDMA_TO_PDMA;
3641 			} else if (ppe_num >= 3 && target_mac->id == 2) {
3642 				target_mac->ppe_idx = 2;
3643 				gdm_config = soc->reg_map->gdma_to_ppe[2];
3644 			} else if (ppe_num >= 2 && target_mac->id == 1) {
3645 				target_mac->ppe_idx = 1;
3646 				gdm_config = soc->reg_map->gdma_to_ppe[1];
3647 			} else {
3648 				target_mac->ppe_idx = 0;
3649 				gdm_config = soc->reg_map->gdma_to_ppe[0];
3650 			}
3651 			mtk_gdm_config(eth, target_mac->id, gdm_config);
3652 		}
3653 
3654 		mtu = mtk_max_gmac_mtu(eth);
3655 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3656 			mtk_ppe_update_mtu(eth->ppe[i], mtu);
3657 
3658 		napi_enable(&eth->tx_napi);
3659 		napi_enable(&eth->rx_napi);
3660 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3661 		mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3662 		refcount_set(&eth->dma_refcnt, 1);
3663 	} else {
3664 		refcount_inc(&eth->dma_refcnt);
3665 	}
3666 
3667 	phylink_start(mac->phylink);
3668 	netif_tx_start_all_queues(dev);
3669 
3670 	if (mtk_is_netsys_v2_or_greater(eth))
3671 		return 0;
3672 
3673 	if (mtk_uses_dsa(dev) && !eth->prog) {
3674 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3675 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3676 
3677 			if (md_dst)
3678 				continue;
3679 
3680 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3681 						    GFP_KERNEL);
3682 			if (!md_dst)
3683 				return -ENOMEM;
3684 
3685 			md_dst->u.port_info.port_id = i;
3686 			eth->dsa_meta[i] = md_dst;
3687 		}
3688 	} else {
3689 		/* Hardware DSA untagging and VLAN RX offloading need to be
3690 		 * disabled if at least one MAC does not use DSA.
3691 		 */
3692 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3693 
3694 		val &= ~MTK_CDMP_STAG_EN;
3695 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3696 
3697 		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3698 	}
3699 
3700 	return 0;
3701 }
3702 
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3703 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3704 {
3705 	u32 val;
3706 	int i;
3707 
3708 	/* stop the dma engine */
3709 	spin_lock_bh(&eth->page_lock);
3710 	val = mtk_r32(eth, glo_cfg);
3711 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3712 		glo_cfg);
3713 	spin_unlock_bh(&eth->page_lock);
3714 
3715 	/* wait for dma stop */
3716 	for (i = 0; i < 10; i++) {
3717 		val = mtk_r32(eth, glo_cfg);
3718 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3719 			msleep(20);
3720 			continue;
3721 		}
3722 		break;
3723 	}
3724 }
3725 
mtk_stop(struct net_device * dev)3726 static int mtk_stop(struct net_device *dev)
3727 {
3728 	struct mtk_mac *mac = netdev_priv(dev);
3729 	struct mtk_eth *eth = mac->hw;
3730 	int i;
3731 
3732 	phylink_stop(mac->phylink);
3733 
3734 	netif_tx_disable(dev);
3735 
3736 	phylink_disconnect_phy(mac->phylink);
3737 
3738 	/* only shutdown DMA if this is the last user */
3739 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3740 		return 0;
3741 
3742 	for (i = 0; i < MTK_MAX_DEVS; i++)
3743 		mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
3744 
3745 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3746 	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3747 	napi_disable(&eth->tx_napi);
3748 	napi_disable(&eth->rx_napi);
3749 
3750 	cancel_work_sync(&eth->rx_dim.work);
3751 	cancel_work_sync(&eth->tx_dim.work);
3752 
3753 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3754 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3755 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3756 
3757 	mtk_dma_free(eth);
3758 
3759 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3760 		mtk_ppe_stop(eth->ppe[i]);
3761 
3762 	return 0;
3763 }
3764 
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3765 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3766 			 struct netlink_ext_ack *extack)
3767 {
3768 	struct mtk_mac *mac = netdev_priv(dev);
3769 	struct mtk_eth *eth = mac->hw;
3770 	struct bpf_prog *old_prog;
3771 	bool need_update;
3772 
3773 	if (eth->hwlro) {
3774 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3775 		return -EOPNOTSUPP;
3776 	}
3777 
3778 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3779 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3780 		return -EOPNOTSUPP;
3781 	}
3782 
3783 	need_update = !!eth->prog != !!prog;
3784 	if (netif_running(dev) && need_update)
3785 		mtk_stop(dev);
3786 
3787 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3788 
3789 	if (netif_running(dev) && need_update) {
3790 		int err;
3791 
3792 		err = mtk_open(dev);
3793 		if (err) {
3794 			rcu_assign_pointer(eth->prog, old_prog);
3795 
3796 			return err;
3797 		}
3798 	}
3799 
3800 	if (old_prog)
3801 		bpf_prog_put(old_prog);
3802 
3803 	return 0;
3804 }
3805 
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3806 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3807 {
3808 	switch (xdp->command) {
3809 	case XDP_SETUP_PROG:
3810 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3811 	default:
3812 		return -EINVAL;
3813 	}
3814 }
3815 
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3816 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3817 {
3818 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3819 			   reset_bits,
3820 			   reset_bits);
3821 
3822 	usleep_range(1000, 1100);
3823 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3824 			   reset_bits,
3825 			   ~reset_bits);
3826 	mdelay(10);
3827 }
3828 
mtk_clk_disable(struct mtk_eth * eth)3829 static void mtk_clk_disable(struct mtk_eth *eth)
3830 {
3831 	int clk;
3832 
3833 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3834 		clk_disable_unprepare(eth->clks[clk]);
3835 }
3836 
mtk_clk_enable(struct mtk_eth * eth)3837 static int mtk_clk_enable(struct mtk_eth *eth)
3838 {
3839 	int clk, ret;
3840 
3841 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3842 		ret = clk_prepare_enable(eth->clks[clk]);
3843 		if (ret)
3844 			goto err_disable_clks;
3845 	}
3846 
3847 	return 0;
3848 
3849 err_disable_clks:
3850 	while (--clk >= 0)
3851 		clk_disable_unprepare(eth->clks[clk]);
3852 
3853 	return ret;
3854 }
3855 
mtk_dim_rx(struct work_struct * work)3856 static void mtk_dim_rx(struct work_struct *work)
3857 {
3858 	struct dim *dim = container_of(work, struct dim, work);
3859 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3860 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3861 	struct dim_cq_moder cur_profile;
3862 	u32 val, cur;
3863 
3864 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3865 						dim->profile_ix);
3866 	spin_lock_bh(&eth->dim_lock);
3867 
3868 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3869 	val &= MTK_PDMA_DELAY_TX_MASK;
3870 	val |= MTK_PDMA_DELAY_RX_EN;
3871 
3872 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3873 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3874 
3875 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3876 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3877 
3878 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3879 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3880 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3881 
3882 	spin_unlock_bh(&eth->dim_lock);
3883 
3884 	dim->state = DIM_START_MEASURE;
3885 }
3886 
mtk_dim_tx(struct work_struct * work)3887 static void mtk_dim_tx(struct work_struct *work)
3888 {
3889 	struct dim *dim = container_of(work, struct dim, work);
3890 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3891 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3892 	struct dim_cq_moder cur_profile;
3893 	u32 val, cur;
3894 
3895 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3896 						dim->profile_ix);
3897 	spin_lock_bh(&eth->dim_lock);
3898 
3899 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3900 	val &= MTK_PDMA_DELAY_RX_MASK;
3901 	val |= MTK_PDMA_DELAY_TX_EN;
3902 
3903 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3904 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3905 
3906 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3907 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3908 
3909 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3910 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3911 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3912 
3913 	spin_unlock_bh(&eth->dim_lock);
3914 
3915 	dim->state = DIM_START_MEASURE;
3916 }
3917 
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3918 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3919 {
3920 	struct mtk_eth *eth = mac->hw;
3921 	u32 mcr_cur, mcr_new;
3922 
3923 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3924 		return;
3925 
3926 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3927 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3928 
3929 	if (val <= 1518)
3930 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3931 	else if (val <= 1536)
3932 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3933 	else if (val <= 1552)
3934 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3935 	else
3936 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3937 
3938 	if (mcr_new != mcr_cur)
3939 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3940 }
3941 
mtk_hw_reset(struct mtk_eth * eth)3942 static void mtk_hw_reset(struct mtk_eth *eth)
3943 {
3944 	u32 val;
3945 
3946 	if (mtk_is_netsys_v2_or_greater(eth))
3947 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3948 
3949 	if (mtk_is_netsys_v3_or_greater(eth)) {
3950 		val = RSTCTRL_PPE0_V3;
3951 
3952 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3953 			val |= RSTCTRL_PPE1_V3;
3954 
3955 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3956 			val |= RSTCTRL_PPE2;
3957 
3958 		val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3959 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3960 		val = RSTCTRL_PPE0_V2;
3961 
3962 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3963 			val |= RSTCTRL_PPE1;
3964 	} else {
3965 		val = RSTCTRL_PPE0;
3966 	}
3967 
3968 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3969 
3970 	if (mtk_is_netsys_v3_or_greater(eth))
3971 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3972 			     0x6f8ff);
3973 	else if (mtk_is_netsys_v2_or_greater(eth))
3974 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3975 			     0x3ffffff);
3976 }
3977 
mtk_hw_reset_read(struct mtk_eth * eth)3978 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3979 {
3980 	u32 val;
3981 
3982 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3983 	return val;
3984 }
3985 
mtk_hw_warm_reset(struct mtk_eth * eth)3986 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3987 {
3988 	u32 rst_mask, val;
3989 
3990 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3991 			   RSTCTRL_FE);
3992 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3993 				      val & RSTCTRL_FE, 1, 1000)) {
3994 		dev_err(eth->dev, "warm reset failed\n");
3995 		mtk_hw_reset(eth);
3996 		return;
3997 	}
3998 
3999 	if (mtk_is_netsys_v3_or_greater(eth)) {
4000 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
4001 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4002 			rst_mask |= RSTCTRL_PPE1_V3;
4003 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4004 			rst_mask |= RSTCTRL_PPE2;
4005 
4006 		rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
4007 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
4008 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
4009 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4010 			rst_mask |= RSTCTRL_PPE1;
4011 	} else {
4012 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
4013 	}
4014 
4015 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
4016 
4017 	udelay(1);
4018 	val = mtk_hw_reset_read(eth);
4019 	if (!(val & rst_mask))
4020 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
4021 			val, rst_mask);
4022 
4023 	rst_mask |= RSTCTRL_FE;
4024 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
4025 
4026 	udelay(1);
4027 	val = mtk_hw_reset_read(eth);
4028 	if (val & rst_mask)
4029 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
4030 			val, rst_mask);
4031 }
4032 
mtk_hw_check_dma_hang(struct mtk_eth * eth)4033 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
4034 {
4035 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
4036 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
4037 	bool oq_hang, cdm1_busy, adma_busy;
4038 	bool wtx_busy, cdm_full, oq_free;
4039 	u32 wdidx, val, gdm1_fc, gdm2_fc;
4040 	bool qfsm_hang, qfwd_hang;
4041 	bool ret = false;
4042 
4043 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4044 		return false;
4045 
4046 	/* WDMA sanity checks */
4047 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
4048 
4049 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
4050 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
4051 
4052 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
4053 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
4054 
4055 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
4056 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
4057 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
4058 
4059 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
4060 		if (++eth->reset.wdma_hang_count > 2) {
4061 			eth->reset.wdma_hang_count = 0;
4062 			ret = true;
4063 		}
4064 		goto out;
4065 	}
4066 
4067 	/* QDMA sanity checks */
4068 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
4069 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
4070 
4071 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
4072 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
4073 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
4074 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
4075 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
4076 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
4077 
4078 	if (qfsm_hang && qfwd_hang &&
4079 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
4080 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
4081 		if (++eth->reset.qdma_hang_count > 2) {
4082 			eth->reset.qdma_hang_count = 0;
4083 			ret = true;
4084 		}
4085 		goto out;
4086 	}
4087 
4088 	/* ADMA sanity checks */
4089 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
4090 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
4091 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
4092 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
4093 
4094 	if (oq_hang && cdm1_busy && adma_busy) {
4095 		if (++eth->reset.adma_hang_count > 2) {
4096 			eth->reset.adma_hang_count = 0;
4097 			ret = true;
4098 		}
4099 		goto out;
4100 	}
4101 
4102 	eth->reset.wdma_hang_count = 0;
4103 	eth->reset.qdma_hang_count = 0;
4104 	eth->reset.adma_hang_count = 0;
4105 out:
4106 	eth->reset.wdidx = wdidx;
4107 
4108 	return ret;
4109 }
4110 
mtk_hw_reset_monitor_work(struct work_struct * work)4111 static void mtk_hw_reset_monitor_work(struct work_struct *work)
4112 {
4113 	struct delayed_work *del_work = to_delayed_work(work);
4114 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
4115 					   reset.monitor_work);
4116 
4117 	if (test_bit(MTK_RESETTING, &eth->state))
4118 		goto out;
4119 
4120 	/* DMA stuck checks */
4121 	if (mtk_hw_check_dma_hang(eth))
4122 		schedule_work(&eth->pending_work);
4123 
4124 out:
4125 	schedule_delayed_work(&eth->reset.monitor_work,
4126 			      MTK_DMA_MONITOR_TIMEOUT);
4127 }
4128 
mtk_hw_init(struct mtk_eth * eth,bool reset)4129 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
4130 {
4131 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
4132 		       ETHSYS_DMA_AG_MAP_PPE;
4133 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
4134 	int i, val, ret;
4135 
4136 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
4137 		return 0;
4138 
4139 	if (!reset) {
4140 		pm_runtime_enable(eth->dev);
4141 		pm_runtime_get_sync(eth->dev);
4142 
4143 		ret = mtk_clk_enable(eth);
4144 		if (ret)
4145 			goto err_disable_pm;
4146 	}
4147 
4148 	if (eth->ethsys)
4149 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
4150 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
4151 
4152 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4153 		ret = device_reset(eth->dev);
4154 		if (ret) {
4155 			dev_err(eth->dev, "MAC reset failed!\n");
4156 			goto err_disable_pm;
4157 		}
4158 
4159 		/* set interrupt delays based on current Net DIM sample */
4160 		mtk_dim_rx(&eth->rx_dim.work);
4161 		mtk_dim_tx(&eth->tx_dim.work);
4162 
4163 		/* disable delay and normal interrupt */
4164 		mtk_tx_irq_disable(eth, ~0);
4165 		mtk_rx_irq_disable(eth, ~0);
4166 
4167 		return 0;
4168 	}
4169 
4170 	msleep(100);
4171 
4172 	if (reset)
4173 		mtk_hw_warm_reset(eth);
4174 	else
4175 		mtk_hw_reset(eth);
4176 
4177 	/* No MT7628/88 support yet */
4178 	if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4179 		mtk_mdio_config(eth);
4180 
4181 	if (mtk_is_netsys_v3_or_greater(eth)) {
4182 		/* Set FE to PDMAv2 if necessary */
4183 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
4184 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
4185 	}
4186 
4187 	if (eth->pctl) {
4188 		/* Set GE2 driving and slew rate */
4189 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
4190 
4191 		/* set GE2 TDSEL */
4192 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
4193 
4194 		/* set GE2 TUNE */
4195 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
4196 	}
4197 
4198 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
4199 	 * up with the more appropriate value when mtk_mac_config call is being
4200 	 * invoked.
4201 	 */
4202 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4203 		struct net_device *dev = eth->netdev[i];
4204 
4205 		if (!dev)
4206 			continue;
4207 
4208 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
4209 		mtk_set_mcr_max_rx(netdev_priv(dev),
4210 				   dev->mtu + MTK_RX_ETH_HLEN);
4211 	}
4212 
4213 	/* Indicates CDM to parse the MTK special tag from CPU
4214 	 * which also is working out for untag packets.
4215 	 */
4216 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
4217 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
4218 	if (mtk_is_netsys_v1(eth)) {
4219 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
4220 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
4221 
4222 		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
4223 	}
4224 
4225 	/* set interrupt delays based on current Net DIM sample */
4226 	mtk_dim_rx(&eth->rx_dim.work);
4227 	mtk_dim_tx(&eth->tx_dim.work);
4228 
4229 	/* disable delay and normal interrupt */
4230 	mtk_tx_irq_disable(eth, ~0);
4231 	mtk_rx_irq_disable(eth, ~0);
4232 
4233 	/* FE int grouping */
4234 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
4235 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
4236 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
4237 	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
4238 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
4239 
4240 	if (mtk_is_netsys_v3_or_greater(eth)) {
4241 		/* PSE dummy page mechanism */
4242 		mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
4243 			PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
4244 
4245 		/* PSE free buffer drop threshold */
4246 		mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
4247 
4248 		/* PSE should not drop port8, port9 and port13 packets from
4249 		 * WDMA Tx
4250 		 */
4251 		mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
4252 
4253 		/* PSE should drop packets to port8, port9 and port13 on WDMA Rx
4254 		 * ring full
4255 		 */
4256 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
4257 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
4258 		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
4259 
4260 		/* GDM and CDM Threshold */
4261 		mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
4262 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
4263 
4264 		/* Disable GDM1 RX CRC stripping */
4265 		mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
4266 
4267 		/* PSE GDM3 MIB counter has incorrect hw default values,
4268 		 * so the driver ought to read clear the values beforehand
4269 		 * in case ethtool retrieve wrong mib values.
4270 		 */
4271 		for (i = 0; i < 0x80; i += 0x4)
4272 			mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
4273 	} else if (!mtk_is_netsys_v1(eth)) {
4274 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
4275 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
4276 
4277 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
4278 		mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
4279 
4280 		/* PSE Free Queue Flow Control  */
4281 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
4282 
4283 		/* PSE config input queue threshold */
4284 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
4285 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
4286 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
4287 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
4288 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
4289 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
4290 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4291 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4292 
4293 		/* PSE config output queue threshold */
4294 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4295 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4296 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4297 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4298 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4299 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4300 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4301 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4302 
4303 		/* GDM and CDM Threshold */
4304 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4305 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4306 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4307 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4308 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4309 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4310 	}
4311 
4312 	return 0;
4313 
4314 err_disable_pm:
4315 	if (!reset) {
4316 		pm_runtime_put_sync(eth->dev);
4317 		pm_runtime_disable(eth->dev);
4318 	}
4319 
4320 	return ret;
4321 }
4322 
mtk_hw_deinit(struct mtk_eth * eth)4323 static int mtk_hw_deinit(struct mtk_eth *eth)
4324 {
4325 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4326 		return 0;
4327 
4328 	mtk_clk_disable(eth);
4329 
4330 	pm_runtime_put_sync(eth->dev);
4331 	pm_runtime_disable(eth->dev);
4332 
4333 	return 0;
4334 }
4335 
mtk_uninit(struct net_device * dev)4336 static void mtk_uninit(struct net_device *dev)
4337 {
4338 	struct mtk_mac *mac = netdev_priv(dev);
4339 	struct mtk_eth *eth = mac->hw;
4340 
4341 	phylink_disconnect_phy(mac->phylink);
4342 	mtk_tx_irq_disable(eth, ~0);
4343 	mtk_rx_irq_disable(eth, ~0);
4344 }
4345 
mtk_change_mtu(struct net_device * dev,int new_mtu)4346 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4347 {
4348 	int length = new_mtu + MTK_RX_ETH_HLEN;
4349 	struct mtk_mac *mac = netdev_priv(dev);
4350 	struct mtk_eth *eth = mac->hw;
4351 	int max_mtu, i;
4352 
4353 	if (rcu_access_pointer(eth->prog) &&
4354 	    length > MTK_PP_MAX_BUF_SIZE) {
4355 		netdev_err(dev, "Invalid MTU for XDP mode\n");
4356 		return -EINVAL;
4357 	}
4358 
4359 	mtk_set_mcr_max_rx(mac, length);
4360 	WRITE_ONCE(dev->mtu, new_mtu);
4361 
4362 	max_mtu = mtk_max_gmac_mtu(eth);
4363 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4364 		mtk_ppe_update_mtu(eth->ppe[i], max_mtu);
4365 
4366 	return 0;
4367 }
4368 
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4369 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4370 {
4371 	struct mtk_mac *mac = netdev_priv(dev);
4372 
4373 	switch (cmd) {
4374 	case SIOCGMIIPHY:
4375 	case SIOCGMIIREG:
4376 	case SIOCSMIIREG:
4377 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4378 	default:
4379 		break;
4380 	}
4381 
4382 	return -EOPNOTSUPP;
4383 }
4384 
mtk_prepare_for_reset(struct mtk_eth * eth)4385 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4386 {
4387 	u32 val;
4388 	int i;
4389 
4390 	/* set FE PPE ports link down */
4391 	for (i = MTK_GMAC1_ID;
4392 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4393 	     i += 2) {
4394 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4395 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4396 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4397 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4398 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4399 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4400 	}
4401 
4402 	/* adjust PPE configurations to prepare for reset */
4403 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4404 		mtk_ppe_prepare_reset(eth->ppe[i]);
4405 
4406 	/* disable NETSYS interrupts */
4407 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4408 
4409 	/* force link down GMAC */
4410 	for (i = 0; i < 2; i++) {
4411 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4412 		mtk_w32(eth, val, MTK_MAC_MCR(i));
4413 	}
4414 }
4415 
mtk_pending_work(struct work_struct * work)4416 static void mtk_pending_work(struct work_struct *work)
4417 {
4418 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4419 	unsigned long restart = 0;
4420 	u32 val;
4421 	int i;
4422 
4423 	rtnl_lock();
4424 	set_bit(MTK_RESETTING, &eth->state);
4425 
4426 	mtk_prepare_for_reset(eth);
4427 	mtk_wed_fe_reset();
4428 	/* Run again reset preliminary configuration in order to avoid any
4429 	 * possible race during FE reset since it can run releasing RTNL lock.
4430 	 */
4431 	mtk_prepare_for_reset(eth);
4432 
4433 	/* stop all devices to make sure that dma is properly shut down */
4434 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4435 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4436 			continue;
4437 
4438 		mtk_stop(eth->netdev[i]);
4439 		__set_bit(i, &restart);
4440 	}
4441 
4442 	usleep_range(15000, 16000);
4443 
4444 	if (eth->dev->pins)
4445 		pinctrl_select_state(eth->dev->pins->p,
4446 				     eth->dev->pins->default_state);
4447 	mtk_hw_init(eth, true);
4448 
4449 	/* restart DMA and enable IRQs */
4450 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4451 		if (!eth->netdev[i] || !test_bit(i, &restart))
4452 			continue;
4453 
4454 		if (mtk_open(eth->netdev[i])) {
4455 			netif_alert(eth, ifup, eth->netdev[i],
4456 				    "Driver up/down cycle failed\n");
4457 			dev_close(eth->netdev[i]);
4458 		}
4459 	}
4460 
4461 	/* set FE PPE ports link up */
4462 	for (i = MTK_GMAC1_ID;
4463 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4464 	     i += 2) {
4465 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4466 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4467 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4468 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4469 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4470 
4471 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4472 	}
4473 
4474 	clear_bit(MTK_RESETTING, &eth->state);
4475 
4476 	mtk_wed_fe_reset_complete();
4477 
4478 	rtnl_unlock();
4479 }
4480 
mtk_free_dev(struct mtk_eth * eth)4481 static int mtk_free_dev(struct mtk_eth *eth)
4482 {
4483 	int i;
4484 
4485 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4486 		if (!eth->netdev[i])
4487 			continue;
4488 		free_netdev(eth->netdev[i]);
4489 	}
4490 
4491 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4492 		if (!eth->dsa_meta[i])
4493 			break;
4494 		metadata_dst_free(eth->dsa_meta[i]);
4495 	}
4496 
4497 	return 0;
4498 }
4499 
mtk_unreg_dev(struct mtk_eth * eth)4500 static int mtk_unreg_dev(struct mtk_eth *eth)
4501 {
4502 	int i;
4503 
4504 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4505 		struct mtk_mac *mac;
4506 		if (!eth->netdev[i])
4507 			continue;
4508 		mac = netdev_priv(eth->netdev[i]);
4509 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4510 			unregister_netdevice_notifier(&mac->device_notifier);
4511 		unregister_netdev(eth->netdev[i]);
4512 	}
4513 
4514 	return 0;
4515 }
4516 
mtk_sgmii_destroy(struct mtk_eth * eth)4517 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4518 {
4519 	int i;
4520 
4521 	for (i = 0; i < MTK_MAX_DEVS; i++)
4522 		mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4523 }
4524 
mtk_cleanup(struct mtk_eth * eth)4525 static int mtk_cleanup(struct mtk_eth *eth)
4526 {
4527 	mtk_sgmii_destroy(eth);
4528 	mtk_unreg_dev(eth);
4529 	mtk_free_dev(eth);
4530 	cancel_work_sync(&eth->pending_work);
4531 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4532 
4533 	return 0;
4534 }
4535 
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4536 static int mtk_get_link_ksettings(struct net_device *ndev,
4537 				  struct ethtool_link_ksettings *cmd)
4538 {
4539 	struct mtk_mac *mac = netdev_priv(ndev);
4540 
4541 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4542 		return -EBUSY;
4543 
4544 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4545 }
4546 
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4547 static int mtk_set_link_ksettings(struct net_device *ndev,
4548 				  const struct ethtool_link_ksettings *cmd)
4549 {
4550 	struct mtk_mac *mac = netdev_priv(ndev);
4551 
4552 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4553 		return -EBUSY;
4554 
4555 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4556 }
4557 
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4558 static void mtk_get_drvinfo(struct net_device *dev,
4559 			    struct ethtool_drvinfo *info)
4560 {
4561 	struct mtk_mac *mac = netdev_priv(dev);
4562 
4563 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4564 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4565 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4566 }
4567 
mtk_get_msglevel(struct net_device * dev)4568 static u32 mtk_get_msglevel(struct net_device *dev)
4569 {
4570 	struct mtk_mac *mac = netdev_priv(dev);
4571 
4572 	return mac->hw->msg_enable;
4573 }
4574 
mtk_set_msglevel(struct net_device * dev,u32 value)4575 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4576 {
4577 	struct mtk_mac *mac = netdev_priv(dev);
4578 
4579 	mac->hw->msg_enable = value;
4580 }
4581 
mtk_nway_reset(struct net_device * dev)4582 static int mtk_nway_reset(struct net_device *dev)
4583 {
4584 	struct mtk_mac *mac = netdev_priv(dev);
4585 
4586 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4587 		return -EBUSY;
4588 
4589 	if (!mac->phylink)
4590 		return -ENOTSUPP;
4591 
4592 	return phylink_ethtool_nway_reset(mac->phylink);
4593 }
4594 
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4595 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4596 {
4597 	int i;
4598 
4599 	switch (stringset) {
4600 	case ETH_SS_STATS: {
4601 		struct mtk_mac *mac = netdev_priv(dev);
4602 
4603 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4604 			ethtool_puts(&data, mtk_ethtool_stats[i].str);
4605 		if (mtk_page_pool_enabled(mac->hw))
4606 			page_pool_ethtool_stats_get_strings(data);
4607 		break;
4608 	}
4609 	default:
4610 		break;
4611 	}
4612 }
4613 
mtk_get_sset_count(struct net_device * dev,int sset)4614 static int mtk_get_sset_count(struct net_device *dev, int sset)
4615 {
4616 	switch (sset) {
4617 	case ETH_SS_STATS: {
4618 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4619 		struct mtk_mac *mac = netdev_priv(dev);
4620 
4621 		if (mtk_page_pool_enabled(mac->hw))
4622 			count += page_pool_ethtool_stats_get_count();
4623 		return count;
4624 	}
4625 	default:
4626 		return -EOPNOTSUPP;
4627 	}
4628 }
4629 
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4630 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4631 {
4632 	struct page_pool_stats stats = {};
4633 	int i;
4634 
4635 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4636 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4637 
4638 		if (!ring->page_pool)
4639 			continue;
4640 
4641 		page_pool_get_stats(ring->page_pool, &stats);
4642 	}
4643 	page_pool_ethtool_stats_get(data, &stats);
4644 }
4645 
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4646 static void mtk_get_ethtool_stats(struct net_device *dev,
4647 				  struct ethtool_stats *stats, u64 *data)
4648 {
4649 	struct mtk_mac *mac = netdev_priv(dev);
4650 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4651 	u64 *data_src, *data_dst;
4652 	unsigned int start;
4653 	int i;
4654 
4655 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4656 		return;
4657 
4658 	if (netif_running(dev) && netif_device_present(dev)) {
4659 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4660 			mtk_stats_update_mac(mac);
4661 			spin_unlock_bh(&hwstats->stats_lock);
4662 		}
4663 	}
4664 
4665 	data_src = (u64 *)hwstats;
4666 
4667 	do {
4668 		data_dst = data;
4669 		start = u64_stats_fetch_begin(&hwstats->syncp);
4670 
4671 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4672 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4673 		if (mtk_page_pool_enabled(mac->hw))
4674 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4675 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4676 }
4677 
mtk_get_rx_ring_count(struct net_device * dev)4678 static u32 mtk_get_rx_ring_count(struct net_device *dev)
4679 {
4680 	if (dev->hw_features & NETIF_F_LRO)
4681 		return MTK_MAX_RX_RING_NUM;
4682 
4683 	return 0;
4684 }
4685 
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4686 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4687 			 u32 *rule_locs)
4688 {
4689 	int ret = -EOPNOTSUPP;
4690 
4691 	switch (cmd->cmd) {
4692 	case ETHTOOL_GRXCLSRLCNT:
4693 		if (dev->hw_features & NETIF_F_LRO) {
4694 			struct mtk_mac *mac = netdev_priv(dev);
4695 
4696 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4697 			ret = 0;
4698 		}
4699 		break;
4700 	case ETHTOOL_GRXCLSRULE:
4701 		if (dev->hw_features & NETIF_F_LRO)
4702 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4703 		break;
4704 	case ETHTOOL_GRXCLSRLALL:
4705 		if (dev->hw_features & NETIF_F_LRO)
4706 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4707 						     rule_locs);
4708 		break;
4709 	default:
4710 		break;
4711 	}
4712 
4713 	return ret;
4714 }
4715 
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4716 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4717 {
4718 	int ret = -EOPNOTSUPP;
4719 
4720 	switch (cmd->cmd) {
4721 	case ETHTOOL_SRXCLSRLINS:
4722 		if (dev->hw_features & NETIF_F_LRO)
4723 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4724 		break;
4725 	case ETHTOOL_SRXCLSRLDEL:
4726 		if (dev->hw_features & NETIF_F_LRO)
4727 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4728 		break;
4729 	default:
4730 		break;
4731 	}
4732 
4733 	return ret;
4734 }
4735 
mtk_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4736 static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4737 {
4738 	struct mtk_mac *mac = netdev_priv(dev);
4739 
4740 	phylink_ethtool_get_pauseparam(mac->phylink, pause);
4741 }
4742 
mtk_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)4743 static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
4744 {
4745 	struct mtk_mac *mac = netdev_priv(dev);
4746 
4747 	return phylink_ethtool_set_pauseparam(mac->phylink, pause);
4748 }
4749 
mtk_get_eee(struct net_device * dev,struct ethtool_keee * eee)4750 static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
4751 {
4752 	struct mtk_mac *mac = netdev_priv(dev);
4753 
4754 	return phylink_ethtool_get_eee(mac->phylink, eee);
4755 }
4756 
mtk_set_eee(struct net_device * dev,struct ethtool_keee * eee)4757 static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
4758 {
4759 	struct mtk_mac *mac = netdev_priv(dev);
4760 
4761 	return phylink_ethtool_set_eee(mac->phylink, eee);
4762 }
4763 
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4764 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4765 			    struct net_device *sb_dev)
4766 {
4767 	struct mtk_mac *mac = netdev_priv(dev);
4768 	unsigned int queue = 0;
4769 
4770 	if (netdev_uses_dsa(dev))
4771 		queue = skb_get_queue_mapping(skb) + 3;
4772 	else
4773 		queue = mac->id;
4774 
4775 	if (queue >= dev->num_tx_queues)
4776 		queue = 0;
4777 
4778 	return queue;
4779 }
4780 
4781 static const struct ethtool_ops mtk_ethtool_ops = {
4782 	.get_link_ksettings	= mtk_get_link_ksettings,
4783 	.set_link_ksettings	= mtk_set_link_ksettings,
4784 	.get_drvinfo		= mtk_get_drvinfo,
4785 	.get_msglevel		= mtk_get_msglevel,
4786 	.set_msglevel		= mtk_set_msglevel,
4787 	.nway_reset		= mtk_nway_reset,
4788 	.get_link		= ethtool_op_get_link,
4789 	.get_strings		= mtk_get_strings,
4790 	.get_sset_count		= mtk_get_sset_count,
4791 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4792 	.get_pauseparam		= mtk_get_pauseparam,
4793 	.set_pauseparam		= mtk_set_pauseparam,
4794 	.get_rxnfc		= mtk_get_rxnfc,
4795 	.set_rxnfc		= mtk_set_rxnfc,
4796 	.get_rx_ring_count	= mtk_get_rx_ring_count,
4797 	.get_eee		= mtk_get_eee,
4798 	.set_eee		= mtk_set_eee,
4799 };
4800 
4801 static const struct net_device_ops mtk_netdev_ops = {
4802 	.ndo_uninit		= mtk_uninit,
4803 	.ndo_open		= mtk_open,
4804 	.ndo_stop		= mtk_stop,
4805 	.ndo_start_xmit		= mtk_start_xmit,
4806 	.ndo_set_mac_address	= mtk_set_mac_address,
4807 	.ndo_validate_addr	= eth_validate_addr,
4808 	.ndo_eth_ioctl		= mtk_do_ioctl,
4809 	.ndo_change_mtu		= mtk_change_mtu,
4810 	.ndo_tx_timeout		= mtk_tx_timeout,
4811 	.ndo_get_stats64        = mtk_get_stats64,
4812 	.ndo_fix_features	= mtk_fix_features,
4813 	.ndo_set_features	= mtk_set_features,
4814 #ifdef CONFIG_NET_POLL_CONTROLLER
4815 	.ndo_poll_controller	= mtk_poll_controller,
4816 #endif
4817 	.ndo_setup_tc		= mtk_eth_setup_tc,
4818 	.ndo_bpf		= mtk_xdp,
4819 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4820 	.ndo_select_queue	= mtk_select_queue,
4821 };
4822 
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4823 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4824 {
4825 	const struct phylink_mac_ops *mac_ops = &mtk_phylink_ops;
4826 	const __be32 *_id = of_get_property(np, "reg", NULL);
4827 	phy_interface_t phy_mode;
4828 	struct phylink *phylink;
4829 	struct mtk_mac *mac;
4830 	int id, err;
4831 	int txqs = 1;
4832 	u32 val;
4833 
4834 	if (!_id) {
4835 		dev_err(eth->dev, "missing mac id\n");
4836 		return -EINVAL;
4837 	}
4838 
4839 	id = be32_to_cpup(_id);
4840 	if (id >= MTK_MAX_DEVS) {
4841 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4842 		return -EINVAL;
4843 	}
4844 
4845 	if (eth->netdev[id]) {
4846 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4847 		return -EINVAL;
4848 	}
4849 
4850 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4851 		txqs = MTK_QDMA_NUM_QUEUES;
4852 
4853 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4854 	if (!eth->netdev[id]) {
4855 		dev_err(eth->dev, "alloc_etherdev failed\n");
4856 		return -ENOMEM;
4857 	}
4858 	mac = netdev_priv(eth->netdev[id]);
4859 	eth->mac[id] = mac;
4860 	mac->id = id;
4861 	mac->hw = eth;
4862 	mac->of_node = np;
4863 
4864 	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4865 	if (err == -EPROBE_DEFER)
4866 		return err;
4867 
4868 	if (err) {
4869 		/* If the mac address is invalid, use random mac address */
4870 		eth_hw_addr_random(eth->netdev[id]);
4871 		dev_err(eth->dev, "generated random MAC address %pM\n",
4872 			eth->netdev[id]->dev_addr);
4873 	}
4874 
4875 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4876 	mac->hwlro_ip_cnt = 0;
4877 
4878 	mac->hw_stats = devm_kzalloc(eth->dev,
4879 				     sizeof(*mac->hw_stats),
4880 				     GFP_KERNEL);
4881 	if (!mac->hw_stats) {
4882 		dev_err(eth->dev, "failed to allocate counter memory\n");
4883 		err = -ENOMEM;
4884 		goto free_netdev;
4885 	}
4886 	spin_lock_init(&mac->hw_stats->stats_lock);
4887 	u64_stats_init(&mac->hw_stats->syncp);
4888 
4889 	if (mtk_is_netsys_v3_or_greater(eth))
4890 		mac->hw_stats->reg_offset = id * 0x80;
4891 	else
4892 		mac->hw_stats->reg_offset = id * 0x40;
4893 
4894 	/* phylink create */
4895 	err = of_get_phy_mode(np, &phy_mode);
4896 	if (err) {
4897 		dev_err(eth->dev, "incorrect phy-mode\n");
4898 		goto free_netdev;
4899 	}
4900 
4901 	/* mac config is not set */
4902 	mac->interface = PHY_INTERFACE_MODE_NA;
4903 	mac->speed = SPEED_UNKNOWN;
4904 
4905 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4906 	mac->phylink_config.type = PHYLINK_NETDEV;
4907 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4908 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4909 	mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
4910 		MAC_2500FD;
4911 	mac->phylink_config.lpi_timer_default = 1000;
4912 
4913 	/* MT7623 gmac0 is now missing its speed-specific PLL configuration
4914 	 * in its .mac_config method (since state->speed is not valid there.
4915 	 * Disable support for MII, GMII and RGMII.
4916 	 */
4917 	if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4918 		__set_bit(PHY_INTERFACE_MODE_MII,
4919 			  mac->phylink_config.supported_interfaces);
4920 		__set_bit(PHY_INTERFACE_MODE_GMII,
4921 			  mac->phylink_config.supported_interfaces);
4922 
4923 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4924 			phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4925 	}
4926 
4927 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4928 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4929 			  mac->phylink_config.supported_interfaces);
4930 
4931 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4932 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4933 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4934 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4935 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4936 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4937 				    mac->phylink_config.supported_interfaces);
4938 	}
4939 
4940 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4941 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4942 			  mac->phylink_config.supported_interfaces);
4943 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4944 			  mac->phylink_config.supported_interfaces);
4945 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4946 			  mac->phylink_config.supported_interfaces);
4947 	}
4948 
4949 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4950 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
4951 	    id == MTK_GMAC1_ID) {
4952 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4953 						       MAC_SYM_PAUSE |
4954 						       MAC_10000FD;
4955 		phy_interface_zero(mac->phylink_config.supported_interfaces);
4956 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4957 			  mac->phylink_config.supported_interfaces);
4958 	}
4959 
4960 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4961 		mac_ops = &rt5350_phylink_ops;
4962 
4963 	phylink = phylink_create(&mac->phylink_config,
4964 				 of_fwnode_handle(mac->of_node),
4965 				 phy_mode, mac_ops);
4966 	if (IS_ERR(phylink)) {
4967 		err = PTR_ERR(phylink);
4968 		goto free_netdev;
4969 	}
4970 
4971 	mac->phylink = phylink;
4972 
4973 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
4974 	    id == MTK_GMAC2_ID)
4975 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4976 			  mac->phylink_config.supported_interfaces);
4977 
4978 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4979 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4980 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4981 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4982 
4983 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4984 	if (eth->hwlro)
4985 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4986 
4987 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4988 		~NETIF_F_HW_VLAN_CTAG_TX;
4989 	eth->netdev[id]->features |= eth->soc->hw_features;
4990 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4991 
4992 	eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
4993 	eth->netdev[id]->dev.of_node = np;
4994 
4995 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4996 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4997 	else
4998 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4999 
5000 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
5001 		mac->device_notifier.notifier_call = mtk_device_event;
5002 		register_netdevice_notifier(&mac->device_notifier);
5003 	}
5004 
5005 	if (mtk_page_pool_enabled(eth))
5006 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
5007 						NETDEV_XDP_ACT_REDIRECT |
5008 						NETDEV_XDP_ACT_NDO_XMIT |
5009 						NETDEV_XDP_ACT_NDO_XMIT_SG;
5010 
5011 	return 0;
5012 
5013 free_netdev:
5014 	free_netdev(eth->netdev[id]);
5015 	return err;
5016 }
5017 
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)5018 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
5019 {
5020 	struct net_device *dev, *tmp;
5021 	LIST_HEAD(dev_list);
5022 	int i;
5023 
5024 	rtnl_lock();
5025 
5026 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5027 		dev = eth->netdev[i];
5028 
5029 		if (!dev || !(dev->flags & IFF_UP))
5030 			continue;
5031 
5032 		list_add_tail(&dev->close_list, &dev_list);
5033 	}
5034 
5035 	netif_close_many(&dev_list, false);
5036 
5037 	eth->dma_dev = dma_dev;
5038 
5039 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
5040 		list_del_init(&dev->close_list);
5041 		dev_open(dev, NULL);
5042 	}
5043 
5044 	rtnl_unlock();
5045 }
5046 
mtk_sgmii_init(struct mtk_eth * eth)5047 static int mtk_sgmii_init(struct mtk_eth *eth)
5048 {
5049 	struct device_node *np;
5050 	struct regmap *regmap;
5051 	int i;
5052 
5053 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5054 		np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
5055 		if (!np)
5056 			break;
5057 
5058 		regmap = syscon_node_to_regmap(np);
5059 		if (IS_ERR(regmap)) {
5060 			of_node_put(np);
5061 			return PTR_ERR(regmap);
5062 		}
5063 
5064 		eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev,
5065 							 of_fwnode_handle(np),
5066 							 regmap,
5067 							 eth->soc->ana_rgc3);
5068 		of_node_put(np);
5069 	}
5070 
5071 	return 0;
5072 }
5073 
mtk_setup_legacy_sram(struct mtk_eth * eth,struct resource * res)5074 static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
5075 {
5076 	dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
5077 
5078 	if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
5079 	    res->end)
5080 		return -EINVAL;
5081 
5082 	eth->sram_pool = devm_gen_pool_create(eth->dev,
5083 					      const_ilog2(MTK_ETH_SRAM_GRANULARITY),
5084 					      NUMA_NO_NODE, dev_name(eth->dev));
5085 
5086 	if (IS_ERR(eth->sram_pool))
5087 		return PTR_ERR(eth->sram_pool);
5088 
5089 	return gen_pool_add_virt(eth->sram_pool,
5090 				 (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
5091 				 res->start + MTK_ETH_SRAM_OFFSET,
5092 				 MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
5093 }
5094 
mtk_probe(struct platform_device * pdev)5095 static int mtk_probe(struct platform_device *pdev)
5096 {
5097 	struct resource *res = NULL;
5098 	struct device_node *mac_np;
5099 	struct mtk_eth *eth;
5100 	int err, i;
5101 
5102 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
5103 	if (!eth)
5104 		return -ENOMEM;
5105 
5106 	eth->soc = of_device_get_match_data(&pdev->dev);
5107 
5108 	eth->dev = &pdev->dev;
5109 	eth->dma_dev = &pdev->dev;
5110 	eth->base = devm_platform_ioremap_resource(pdev, 0);
5111 	if (IS_ERR(eth->base))
5112 		return PTR_ERR(eth->base);
5113 
5114 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
5115 		eth->ip_align = NET_IP_ALIGN;
5116 
5117 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
5118 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
5119 		if (!err)
5120 			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5121 
5122 		if (err) {
5123 			dev_err(&pdev->dev, "Wrong DMA config\n");
5124 			return -EINVAL;
5125 		}
5126 	}
5127 
5128 	spin_lock_init(&eth->page_lock);
5129 	spin_lock_init(&eth->tx_irq_lock);
5130 	spin_lock_init(&eth->rx_irq_lock);
5131 	spin_lock_init(&eth->dim_lock);
5132 
5133 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5134 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
5135 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
5136 
5137 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5138 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
5139 
5140 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5141 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5142 							      "mediatek,ethsys");
5143 		if (IS_ERR(eth->ethsys)) {
5144 			dev_err(&pdev->dev, "no ethsys regmap found\n");
5145 			return PTR_ERR(eth->ethsys);
5146 		}
5147 	}
5148 
5149 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
5150 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5151 							     "mediatek,infracfg");
5152 		if (IS_ERR(eth->infra)) {
5153 			dev_err(&pdev->dev, "no infracfg regmap found\n");
5154 			return PTR_ERR(eth->infra);
5155 		}
5156 	}
5157 
5158 	if (of_dma_is_coherent(pdev->dev.of_node)) {
5159 		struct regmap *cci;
5160 
5161 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5162 						      "cci-control-port");
5163 		/* enable CPU/bus coherency */
5164 		if (!IS_ERR(cci))
5165 			regmap_write(cci, 0, 3);
5166 	}
5167 
5168 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
5169 		err = mtk_sgmii_init(eth);
5170 
5171 		if (err)
5172 			return err;
5173 	}
5174 
5175 	if (eth->soc->required_pctl) {
5176 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5177 							    "mediatek,pctl");
5178 		if (IS_ERR(eth->pctl)) {
5179 			dev_err(&pdev->dev, "no pctl regmap found\n");
5180 			err = PTR_ERR(eth->pctl);
5181 			goto err_destroy_sgmii;
5182 		}
5183 	}
5184 
5185 	if (mtk_is_netsys_v2_or_greater(eth)) {
5186 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5187 		if (!res) {
5188 			err = -EINVAL;
5189 			goto err_destroy_sgmii;
5190 		}
5191 
5192 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
5193 			eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
5194 							 "sram", 0);
5195 			if (!eth->sram_pool) {
5196 				if (!mtk_is_netsys_v3_or_greater(eth)) {
5197 					err = mtk_setup_legacy_sram(eth, res);
5198 					if (err)
5199 						goto err_destroy_sgmii;
5200 				} else {
5201 					dev_err(&pdev->dev,
5202 						"Could not get SRAM pool\n");
5203 					err = -EINVAL;
5204 					goto err_destroy_sgmii;
5205 				}
5206 			}
5207 		}
5208 	}
5209 
5210 	if (eth->soc->offload_version) {
5211 		for (i = 0;; i++) {
5212 			struct device_node *np;
5213 			phys_addr_t wdma_phy;
5214 			u32 wdma_base;
5215 
5216 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
5217 				break;
5218 
5219 			np = of_parse_phandle(pdev->dev.of_node,
5220 					      "mediatek,wed", i);
5221 			if (!np)
5222 				break;
5223 
5224 			wdma_base = eth->soc->reg_map->wdma_base[i];
5225 			wdma_phy = res ? res->start + wdma_base : 0;
5226 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
5227 				       wdma_phy, i);
5228 		}
5229 	}
5230 
5231 	err = mtk_get_irqs(pdev, eth);
5232 	if (err)
5233 		goto err_wed_exit;
5234 
5235 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
5236 		eth->clks[i] = devm_clk_get(eth->dev,
5237 					    mtk_clks_source_name[i]);
5238 		if (IS_ERR(eth->clks[i])) {
5239 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
5240 				err = -EPROBE_DEFER;
5241 				goto err_wed_exit;
5242 			}
5243 			if (eth->soc->required_clks & BIT(i)) {
5244 				dev_err(&pdev->dev, "clock %s not found\n",
5245 					mtk_clks_source_name[i]);
5246 				err = -EINVAL;
5247 				goto err_wed_exit;
5248 			}
5249 			eth->clks[i] = NULL;
5250 		}
5251 	}
5252 
5253 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
5254 	INIT_WORK(&eth->pending_work, mtk_pending_work);
5255 
5256 	err = mtk_hw_init(eth, false);
5257 	if (err)
5258 		goto err_wed_exit;
5259 
5260 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
5261 
5262 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
5263 		if (!of_device_is_compatible(mac_np,
5264 					     "mediatek,eth-mac"))
5265 			continue;
5266 
5267 		if (!of_device_is_available(mac_np))
5268 			continue;
5269 
5270 		err = mtk_add_mac(eth, mac_np);
5271 		if (err) {
5272 			of_node_put(mac_np);
5273 			goto err_deinit_hw;
5274 		}
5275 	}
5276 
5277 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
5278 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
5279 				       mtk_handle_irq, 0,
5280 				       dev_name(eth->dev), eth);
5281 	} else {
5282 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
5283 				       mtk_handle_irq_tx, 0,
5284 				       dev_name(eth->dev), eth);
5285 		if (err)
5286 			goto err_free_dev;
5287 
5288 		err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
5289 				       mtk_handle_irq_rx, 0,
5290 				       dev_name(eth->dev), eth);
5291 	}
5292 	if (err)
5293 		goto err_free_dev;
5294 
5295 	/* No MT7628/88 support yet */
5296 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
5297 		err = mtk_mdio_init(eth);
5298 		if (err)
5299 			goto err_free_dev;
5300 	}
5301 
5302 	if (eth->soc->offload_version) {
5303 		u8 ppe_num = eth->soc->ppe_num;
5304 
5305 		ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
5306 		for (i = 0; i < ppe_num; i++) {
5307 			u32 ppe_addr = eth->soc->reg_map->ppe_base;
5308 
5309 			ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
5310 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
5311 
5312 			if (!eth->ppe[i]) {
5313 				err = -ENOMEM;
5314 				goto err_deinit_ppe;
5315 			}
5316 			err = mtk_eth_offload_init(eth, i);
5317 
5318 			if (err)
5319 				goto err_deinit_ppe;
5320 		}
5321 	}
5322 
5323 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5324 		if (!eth->netdev[i])
5325 			continue;
5326 
5327 		err = register_netdev(eth->netdev[i]);
5328 		if (err) {
5329 			dev_err(eth->dev, "error bringing up device\n");
5330 			goto err_deinit_ppe;
5331 		} else
5332 			netif_info(eth, probe, eth->netdev[i],
5333 				   "mediatek frame engine at 0x%08lx, irq %d\n",
5334 				   eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
5335 	}
5336 
5337 	/* we run 2 devices on the same DMA ring so we need a dummy device
5338 	 * for NAPI to work
5339 	 */
5340 	eth->dummy_dev = alloc_netdev_dummy(0);
5341 	if (!eth->dummy_dev) {
5342 		err = -ENOMEM;
5343 		dev_err(eth->dev, "failed to allocated dummy device\n");
5344 		goto err_unreg_netdev;
5345 	}
5346 	netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
5347 	netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
5348 
5349 	platform_set_drvdata(pdev, eth);
5350 	schedule_delayed_work(&eth->reset.monitor_work,
5351 			      MTK_DMA_MONITOR_TIMEOUT);
5352 
5353 	return 0;
5354 
5355 err_unreg_netdev:
5356 	mtk_unreg_dev(eth);
5357 err_deinit_ppe:
5358 	mtk_ppe_deinit(eth);
5359 	mtk_mdio_cleanup(eth);
5360 err_free_dev:
5361 	mtk_free_dev(eth);
5362 err_deinit_hw:
5363 	mtk_hw_deinit(eth);
5364 err_wed_exit:
5365 	mtk_wed_exit();
5366 err_destroy_sgmii:
5367 	mtk_sgmii_destroy(eth);
5368 
5369 	return err;
5370 }
5371 
mtk_remove(struct platform_device * pdev)5372 static void mtk_remove(struct platform_device *pdev)
5373 {
5374 	struct mtk_eth *eth = platform_get_drvdata(pdev);
5375 	struct mtk_mac *mac;
5376 	int i;
5377 
5378 	/* stop all devices to make sure that dma is properly shut down */
5379 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5380 		if (!eth->netdev[i])
5381 			continue;
5382 		mtk_stop(eth->netdev[i]);
5383 		mac = netdev_priv(eth->netdev[i]);
5384 		phylink_disconnect_phy(mac->phylink);
5385 	}
5386 
5387 	mtk_wed_exit();
5388 	mtk_hw_deinit(eth);
5389 
5390 	netif_napi_del(&eth->tx_napi);
5391 	netif_napi_del(&eth->rx_napi);
5392 	mtk_cleanup(eth);
5393 	free_netdev(eth->dummy_dev);
5394 	mtk_mdio_cleanup(eth);
5395 }
5396 
5397 static const struct mtk_soc_data mt2701_data = {
5398 	.reg_map = &mtk_reg_map,
5399 	.caps = MT7623_CAPS | MTK_HWLRO,
5400 	.hw_features = MTK_HW_FEATURES,
5401 	.required_clks = MT7623_CLKS_BITMAP,
5402 	.required_pctl = true,
5403 	.version = 1,
5404 	.tx = {
5405 		.desc_size = sizeof(struct mtk_tx_dma),
5406 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5407 		.dma_len_offset = 16,
5408 		.dma_size = MTK_DMA_SIZE(2K),
5409 		.fq_dma_size = MTK_DMA_SIZE(2K),
5410 	},
5411 	.rx = {
5412 		.desc_size = sizeof(struct mtk_rx_dma),
5413 		.irq_done_mask = MTK_RX_DONE_INT,
5414 		.dma_l4_valid = RX_DMA_L4_VALID,
5415 		.dma_size = MTK_DMA_SIZE(2K),
5416 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5417 		.dma_len_offset = 16,
5418 	},
5419 };
5420 
5421 static const struct mtk_soc_data mt7621_data = {
5422 	.reg_map = &mtk_reg_map,
5423 	.caps = MT7621_CAPS,
5424 	.hw_features = MTK_HW_FEATURES,
5425 	.required_clks = MT7621_CLKS_BITMAP,
5426 	.required_pctl = false,
5427 	.version = 1,
5428 	.offload_version = 1,
5429 	.ppe_num = 1,
5430 	.hash_offset = 2,
5431 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5432 	.tx = {
5433 		.desc_size = sizeof(struct mtk_tx_dma),
5434 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5435 		.dma_len_offset = 16,
5436 		.dma_size = MTK_DMA_SIZE(2K),
5437 		.fq_dma_size = MTK_DMA_SIZE(2K),
5438 	},
5439 	.rx = {
5440 		.desc_size = sizeof(struct mtk_rx_dma),
5441 		.irq_done_mask = MTK_RX_DONE_INT,
5442 		.dma_l4_valid = RX_DMA_L4_VALID,
5443 		.dma_size = MTK_DMA_SIZE(2K),
5444 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5445 		.dma_len_offset = 16,
5446 	},
5447 };
5448 
5449 static const struct mtk_soc_data mt7622_data = {
5450 	.reg_map = &mtk_reg_map,
5451 	.ana_rgc3 = 0x2028,
5452 	.caps = MT7622_CAPS | MTK_HWLRO,
5453 	.hw_features = MTK_HW_FEATURES,
5454 	.required_clks = MT7622_CLKS_BITMAP,
5455 	.required_pctl = false,
5456 	.version = 1,
5457 	.offload_version = 2,
5458 	.ppe_num = 1,
5459 	.hash_offset = 2,
5460 	.has_accounting = true,
5461 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5462 	.tx = {
5463 		.desc_size = sizeof(struct mtk_tx_dma),
5464 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5465 		.dma_len_offset = 16,
5466 		.dma_size = MTK_DMA_SIZE(2K),
5467 		.fq_dma_size = MTK_DMA_SIZE(2K),
5468 	},
5469 	.rx = {
5470 		.desc_size = sizeof(struct mtk_rx_dma),
5471 		.irq_done_mask = MTK_RX_DONE_INT,
5472 		.dma_l4_valid = RX_DMA_L4_VALID,
5473 		.dma_size = MTK_DMA_SIZE(2K),
5474 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5475 		.dma_len_offset = 16,
5476 	},
5477 };
5478 
5479 static const struct mtk_soc_data mt7623_data = {
5480 	.reg_map = &mtk_reg_map,
5481 	.caps = MT7623_CAPS | MTK_HWLRO,
5482 	.hw_features = MTK_HW_FEATURES,
5483 	.required_clks = MT7623_CLKS_BITMAP,
5484 	.required_pctl = true,
5485 	.version = 1,
5486 	.offload_version = 1,
5487 	.ppe_num = 1,
5488 	.hash_offset = 2,
5489 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5490 	.disable_pll_modes = true,
5491 	.tx = {
5492 		.desc_size = sizeof(struct mtk_tx_dma),
5493 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5494 		.dma_len_offset = 16,
5495 		.dma_size = MTK_DMA_SIZE(2K),
5496 		.fq_dma_size = MTK_DMA_SIZE(2K),
5497 	},
5498 	.rx = {
5499 		.desc_size = sizeof(struct mtk_rx_dma),
5500 		.irq_done_mask = MTK_RX_DONE_INT,
5501 		.dma_l4_valid = RX_DMA_L4_VALID,
5502 		.dma_size = MTK_DMA_SIZE(2K),
5503 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5504 		.dma_len_offset = 16,
5505 	},
5506 };
5507 
5508 static const struct mtk_soc_data mt7629_data = {
5509 	.reg_map = &mtk_reg_map,
5510 	.ana_rgc3 = 0x128,
5511 	.caps = MT7629_CAPS | MTK_HWLRO,
5512 	.hw_features = MTK_HW_FEATURES,
5513 	.required_clks = MT7629_CLKS_BITMAP,
5514 	.required_pctl = false,
5515 	.has_accounting = true,
5516 	.version = 1,
5517 	.tx = {
5518 		.desc_size = sizeof(struct mtk_tx_dma),
5519 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5520 		.dma_len_offset = 16,
5521 		.dma_size = MTK_DMA_SIZE(2K),
5522 		.fq_dma_size = MTK_DMA_SIZE(2K),
5523 	},
5524 	.rx = {
5525 		.desc_size = sizeof(struct mtk_rx_dma),
5526 		.irq_done_mask = MTK_RX_DONE_INT,
5527 		.dma_l4_valid = RX_DMA_L4_VALID,
5528 		.dma_size = MTK_DMA_SIZE(2K),
5529 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5530 		.dma_len_offset = 16,
5531 	},
5532 };
5533 
5534 static const struct mtk_soc_data mt7981_data = {
5535 	.reg_map = &mt7986_reg_map,
5536 	.ana_rgc3 = 0x128,
5537 	.caps = MT7981_CAPS,
5538 	.hw_features = MTK_HW_FEATURES,
5539 	.required_clks = MT7981_CLKS_BITMAP,
5540 	.required_pctl = false,
5541 	.version = 2,
5542 	.offload_version = 2,
5543 	.ppe_num = 2,
5544 	.hash_offset = 4,
5545 	.has_accounting = true,
5546 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5547 	.tx = {
5548 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5549 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5550 		.dma_len_offset = 8,
5551 		.dma_size = MTK_DMA_SIZE(2K),
5552 		.fq_dma_size = MTK_DMA_SIZE(2K),
5553 	},
5554 	.rx = {
5555 		.desc_size = sizeof(struct mtk_rx_dma),
5556 		.irq_done_mask = MTK_RX_DONE_INT,
5557 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5558 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5559 		.dma_len_offset = 16,
5560 		.dma_size = MTK_DMA_SIZE(2K),
5561 	},
5562 };
5563 
5564 static const struct mtk_soc_data mt7986_data = {
5565 	.reg_map = &mt7986_reg_map,
5566 	.ana_rgc3 = 0x128,
5567 	.caps = MT7986_CAPS,
5568 	.hw_features = MTK_HW_FEATURES,
5569 	.required_clks = MT7986_CLKS_BITMAP,
5570 	.required_pctl = false,
5571 	.version = 2,
5572 	.offload_version = 2,
5573 	.ppe_num = 2,
5574 	.hash_offset = 4,
5575 	.has_accounting = true,
5576 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5577 	.tx = {
5578 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5579 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5580 		.dma_len_offset = 8,
5581 		.dma_size = MTK_DMA_SIZE(2K),
5582 		.fq_dma_size = MTK_DMA_SIZE(2K),
5583 	},
5584 	.rx = {
5585 		.desc_size = sizeof(struct mtk_rx_dma),
5586 		.irq_done_mask = MTK_RX_DONE_INT,
5587 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5588 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5589 		.dma_len_offset = 16,
5590 		.dma_size = MTK_DMA_SIZE(2K),
5591 	},
5592 };
5593 
5594 static const struct mtk_soc_data mt7988_data = {
5595 	.reg_map = &mt7988_reg_map,
5596 	.ana_rgc3 = 0x128,
5597 	.caps = MT7988_CAPS,
5598 	.hw_features = MTK_HW_FEATURES,
5599 	.required_clks = MT7988_CLKS_BITMAP,
5600 	.required_pctl = false,
5601 	.version = 3,
5602 	.offload_version = 2,
5603 	.ppe_num = 3,
5604 	.hash_offset = 4,
5605 	.has_accounting = true,
5606 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5607 	.tx = {
5608 		.desc_size = sizeof(struct mtk_tx_dma_v2),
5609 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5610 		.dma_len_offset = 8,
5611 		.dma_size = MTK_DMA_SIZE(2K),
5612 		.fq_dma_size = MTK_DMA_SIZE(4K),
5613 	},
5614 	.rx = {
5615 		.desc_size = sizeof(struct mtk_rx_dma_v2),
5616 		.irq_done_mask = MTK_RX_DONE_INT_V2,
5617 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
5618 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5619 		.dma_len_offset = 8,
5620 		.dma_size = MTK_DMA_SIZE(2K),
5621 	},
5622 };
5623 
5624 static const struct mtk_soc_data rt5350_data = {
5625 	.reg_map = &mt7628_reg_map,
5626 	.caps = MT7628_CAPS,
5627 	.hw_features = MTK_HW_FEATURES_MT7628,
5628 	.required_clks = MT7628_CLKS_BITMAP,
5629 	.required_pctl = false,
5630 	.version = 1,
5631 	.tx = {
5632 		.desc_size = sizeof(struct mtk_tx_dma),
5633 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5634 		.dma_len_offset = 16,
5635 		.dma_size = MTK_DMA_SIZE(2K),
5636 	},
5637 	.rx = {
5638 		.desc_size = sizeof(struct mtk_rx_dma),
5639 		.irq_done_mask = MTK_RX_DONE_INT,
5640 		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5641 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5642 		.dma_len_offset = 16,
5643 		.dma_size = MTK_DMA_SIZE(2K),
5644 	},
5645 };
5646 
5647 const struct of_device_id of_mtk_match[] = {
5648 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5649 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5650 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5651 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5652 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5653 	{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5654 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5655 	{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5656 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5657 	{},
5658 };
5659 MODULE_DEVICE_TABLE(of, of_mtk_match);
5660 
5661 static struct platform_driver mtk_driver = {
5662 	.probe = mtk_probe,
5663 	.remove = mtk_remove,
5664 	.driver = {
5665 		.name = "mtk_soc_eth",
5666 		.of_match_table = of_mtk_match,
5667 	},
5668 };
5669 
5670 module_platform_driver(mtk_driver);
5671 
5672 MODULE_LICENSE("GPL");
5673 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5674 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5675 MODULE_IMPORT_NS("NETDEV_INTERNAL");
5676