xref: /linux/drivers/net/dsa/microchip/ksz9477.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip KSZ9477 switch driver main logic
4  *
5  * Copyright (C) 2017-2025 Microchip Technology Inc.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17 
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21 
ksz_cfg(struct ksz_device * dev,u32 addr,u8 bits,bool set)22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 	regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
25 }
26 
ksz_port_cfg(struct ksz_device * dev,int port,int offset,u8 bits,bool set)27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 			 bool set)
29 {
30 	regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
31 			   bits, set ? bits : 0);
32 }
33 
ksz9477_cfg32(struct ksz_device * dev,u32 addr,u32 bits,bool set)34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 	regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
37 }
38 
ksz9477_port_cfg32(struct ksz_device * dev,int port,int offset,u32 bits,bool set)39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 			       u32 bits, bool set)
41 {
42 	regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
43 			   bits, set ? bits : 0);
44 }
45 
ksz9477_change_mtu(struct ksz_device * dev,int port,int mtu)46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 	u16 frame_size;
49 
50 	if (!dsa_is_cpu_port(dev->ds, port))
51 		return 0;
52 
53 	frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54 
55 	return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
56 				  REG_SW_MTU_MASK, frame_size);
57 }
58 
ksz9477_wait_vlan_ctrl_ready(struct ksz_device * dev)59 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
60 {
61 	unsigned int val;
62 
63 	return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
64 					val, !(val & VLAN_START), 10, 1000);
65 }
66 
ksz9477_get_vlan_table(struct ksz_device * dev,u16 vid,u32 * vlan_table)67 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
68 				  u32 *vlan_table)
69 {
70 	int ret;
71 
72 	mutex_lock(&dev->vlan_mutex);
73 
74 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
75 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
76 
77 	/* wait to be cleared */
78 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
79 	if (ret) {
80 		dev_dbg(dev->dev, "Failed to read vlan table\n");
81 		goto exit;
82 	}
83 
84 	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
85 	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
86 	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
87 
88 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
89 
90 exit:
91 	mutex_unlock(&dev->vlan_mutex);
92 
93 	return ret;
94 }
95 
ksz9477_set_vlan_table(struct ksz_device * dev,u16 vid,u32 * vlan_table)96 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
97 				  u32 *vlan_table)
98 {
99 	int ret;
100 
101 	mutex_lock(&dev->vlan_mutex);
102 
103 	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
104 	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
105 	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
106 
107 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
108 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
109 
110 	/* wait to be cleared */
111 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
112 	if (ret) {
113 		dev_dbg(dev->dev, "Failed to write vlan table\n");
114 		goto exit;
115 	}
116 
117 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
118 
119 	/* update vlan cache table */
120 	dev->vlan_cache[vid].table[0] = vlan_table[0];
121 	dev->vlan_cache[vid].table[1] = vlan_table[1];
122 	dev->vlan_cache[vid].table[2] = vlan_table[2];
123 
124 exit:
125 	mutex_unlock(&dev->vlan_mutex);
126 
127 	return ret;
128 }
129 
ksz9477_read_table(struct ksz_device * dev,u32 * table)130 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
131 {
132 	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
133 	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
134 	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
135 	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
136 }
137 
ksz9477_write_table(struct ksz_device * dev,u32 * table)138 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
139 {
140 	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
141 	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
142 	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
143 	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
144 }
145 
ksz9477_wait_alu_ready(struct ksz_device * dev)146 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
147 {
148 	unsigned int val;
149 
150 	return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
151 					val, !(val & ALU_START), 10, 1000);
152 }
153 
ksz9477_wait_alu_sta_ready(struct ksz_device * dev)154 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
155 {
156 	unsigned int val;
157 
158 	return regmap_read_poll_timeout(ksz_regmap_32(dev),
159 					REG_SW_ALU_STAT_CTRL__4,
160 					val, !(val & ALU_STAT_START),
161 					10, 1000);
162 }
163 
port_sgmii_s(struct ksz_device * dev,uint port,u16 devid,u16 reg)164 static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg)
165 {
166 	u32 data;
167 
168 	data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16;
169 	data |= reg;
170 	ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data);
171 }
172 
port_sgmii_r(struct ksz_device * dev,uint port,u16 devid,u16 reg,u16 * buf)173 static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg,
174 			 u16 *buf)
175 {
176 	port_sgmii_s(dev, port, devid, reg);
177 	ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf);
178 }
179 
port_sgmii_w(struct ksz_device * dev,uint port,u16 devid,u16 reg,u16 buf)180 static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg,
181 			 u16 buf)
182 {
183 	port_sgmii_s(dev, port, devid, reg);
184 	ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf);
185 }
186 
ksz9477_pcs_read(struct mii_bus * bus,int phy,int mmd,int reg)187 static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg)
188 {
189 	struct ksz_device *dev = bus->priv;
190 	int port = ksz_get_sgmii_port(dev);
191 	u16 val;
192 
193 	port_sgmii_r(dev, port, mmd, reg, &val);
194 
195 	/* Simulate a value to activate special code in the XPCS driver if
196 	 * supported.
197 	 */
198 	if (mmd == MDIO_MMD_PMAPMD) {
199 		if (reg == MDIO_DEVID1)
200 			val = 0x9477;
201 		else if (reg == MDIO_DEVID2)
202 			val = 0x22 << 10;
203 	} else if (mmd == MDIO_MMD_VEND2) {
204 		struct ksz_port *p = &dev->ports[port];
205 
206 		/* Need to update MII_BMCR register with the exact speed and
207 		 * duplex mode when running in SGMII mode and this register is
208 		 * used to detect connected speed in that mode.
209 		 */
210 		if (reg == MMD_SR_MII_AUTO_NEG_STATUS) {
211 			int duplex, speed;
212 
213 			if (val & SR_MII_STAT_LINK_UP) {
214 				speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M;
215 				if (speed == SR_MII_STAT_1000_MBPS)
216 					speed = SPEED_1000;
217 				else if (speed == SR_MII_STAT_100_MBPS)
218 					speed = SPEED_100;
219 				else
220 					speed = SPEED_10;
221 
222 				if (val & SR_MII_STAT_FULL_DUPLEX)
223 					duplex = DUPLEX_FULL;
224 				else
225 					duplex = DUPLEX_HALF;
226 
227 				if (!p->phydev.link ||
228 				    p->phydev.speed != speed ||
229 				    p->phydev.duplex != duplex) {
230 					u16 ctrl;
231 
232 					p->phydev.link = 1;
233 					p->phydev.speed = speed;
234 					p->phydev.duplex = duplex;
235 					port_sgmii_r(dev, port, mmd, MII_BMCR,
236 						     &ctrl);
237 					ctrl &= BMCR_ANENABLE;
238 					ctrl |= mii_bmcr_encode_fixed(speed,
239 								      duplex);
240 					port_sgmii_w(dev, port, mmd, MII_BMCR,
241 						     ctrl);
242 				}
243 			} else {
244 				p->phydev.link = 0;
245 			}
246 		} else if (reg == MII_BMSR) {
247 			p->phydev.link = (val & BMSR_LSTATUS);
248 		}
249 	}
250 
251 	return val;
252 }
253 
ksz9477_pcs_write(struct mii_bus * bus,int phy,int mmd,int reg,u16 val)254 static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg,
255 			     u16 val)
256 {
257 	struct ksz_device *dev = bus->priv;
258 	int port = ksz_get_sgmii_port(dev);
259 
260 	if (mmd == MDIO_MMD_VEND2) {
261 		struct ksz_port *p = &dev->ports[port];
262 
263 		if (reg == MMD_SR_MII_AUTO_NEG_CTRL) {
264 			u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S;
265 
266 			/* Need these bits for 1000BASE-X mode to work with
267 			 * AN on.
268 			 */
269 			if (!(val & sgmii_mode))
270 				val |= SR_MII_SGMII_LINK_UP |
271 				       SR_MII_TX_CFG_PHY_MASTER;
272 
273 			/* SGMII interrupt in the port cannot be masked, so
274 			 * make sure interrupt is not enabled as it is not
275 			 * handled.
276 			 */
277 			val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR;
278 		} else if (reg == MII_BMCR) {
279 			/* The MII_ADVERTISE register needs to write once
280 			 * before doing auto-negotiation for the correct
281 			 * config_word to be sent out after reset.
282 			 */
283 			if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) {
284 				u16 adv;
285 
286 				/* The SGMII port cannot disable flow control
287 				 * so it is better to just advertise symmetric
288 				 * pause.
289 				 */
290 				port_sgmii_r(dev, port, mmd, MII_ADVERTISE,
291 					     &adv);
292 				adv |= ADVERTISE_1000XPAUSE;
293 				adv &= ~ADVERTISE_1000XPSE_ASYM;
294 				port_sgmii_w(dev, port, mmd, MII_ADVERTISE,
295 					     adv);
296 				p->sgmii_adv_write = 1;
297 			} else if (val & BMCR_RESET) {
298 				p->sgmii_adv_write = 0;
299 			}
300 		} else if (reg == MII_ADVERTISE) {
301 			/* XPCS driver writes to this register so there is no
302 			 * need to update it for the errata.
303 			 */
304 			p->sgmii_adv_write = 1;
305 		}
306 	}
307 	port_sgmii_w(dev, port, mmd, reg, val);
308 
309 	return 0;
310 }
311 
ksz9477_pcs_create(struct ksz_device * dev)312 int ksz9477_pcs_create(struct ksz_device *dev)
313 {
314 	/* This chip has a SGMII port. */
315 	if (ksz_has_sgmii_port(dev)) {
316 		int port = ksz_get_sgmii_port(dev);
317 		struct ksz_port *p = &dev->ports[port];
318 		struct phylink_pcs *pcs;
319 		struct mii_bus *bus;
320 		int ret;
321 
322 		bus = devm_mdiobus_alloc(dev->dev);
323 		if (!bus)
324 			return -ENOMEM;
325 
326 		bus->name = "ksz_pcs_mdio_bus";
327 		snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
328 			 dev_name(dev->dev));
329 		bus->read_c45 = &ksz9477_pcs_read;
330 		bus->write_c45 = &ksz9477_pcs_write;
331 		bus->parent = dev->dev;
332 		bus->phy_mask = ~0;
333 		bus->priv = dev;
334 
335 		ret = devm_mdiobus_register(dev->dev, bus);
336 		if (ret)
337 			return ret;
338 
339 		pcs = xpcs_create_pcs_mdiodev(bus, 0);
340 		if (IS_ERR(pcs))
341 			return PTR_ERR(pcs);
342 		p->pcs = pcs;
343 	}
344 
345 	return 0;
346 }
347 
ksz9477_reset_switch(struct ksz_device * dev)348 int ksz9477_reset_switch(struct ksz_device *dev)
349 {
350 	u8 data8;
351 	u32 data32;
352 
353 	/* reset switch */
354 	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
355 
356 	/* turn off SPI DO Edge select */
357 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
358 			   SPI_AUTO_EDGE_DETECTION, 0);
359 
360 	/* default configuration */
361 	ksz_write8(dev, REG_SW_LUE_CTRL_1,
362 		   SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
363 
364 	/* disable interrupts */
365 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
366 	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
367 	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
368 
369 	/* KSZ9893 compatible chips do not support refclk configuration */
370 	if (dev->chip_id == KSZ9893_CHIP_ID ||
371 	    dev->chip_id == KSZ8563_CHIP_ID ||
372 	    dev->chip_id == KSZ9563_CHIP_ID)
373 		return 0;
374 
375 	data8 = SW_ENABLE_REFCLKO;
376 	if (dev->synclko_disable)
377 		data8 = 0;
378 	else if (dev->synclko_125)
379 		data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
380 	ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
381 
382 	return 0;
383 }
384 
ksz9477_r_mib_cnt(struct ksz_device * dev,int port,u16 addr,u64 * cnt)385 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
386 {
387 	struct ksz_port *p = &dev->ports[port];
388 	unsigned int val;
389 	u32 data;
390 	int ret;
391 
392 	/* retain the flush/freeze bit */
393 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
394 	data |= MIB_COUNTER_READ;
395 	data |= (addr << MIB_COUNTER_INDEX_S);
396 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
397 
398 	ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
399 			PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
400 			val, !(val & MIB_COUNTER_READ), 10, 1000);
401 	/* failed to read MIB. get out of loop */
402 	if (ret) {
403 		dev_dbg(dev->dev, "Failed to get MIB\n");
404 		return;
405 	}
406 
407 	/* count resets upon read */
408 	ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
409 	*cnt += data;
410 }
411 
ksz9477_r_mib_pkt(struct ksz_device * dev,int port,u16 addr,u64 * dropped,u64 * cnt)412 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
413 		       u64 *dropped, u64 *cnt)
414 {
415 	addr = dev->info->mib_names[addr].index;
416 	ksz9477_r_mib_cnt(dev, port, addr, cnt);
417 }
418 
ksz9477_freeze_mib(struct ksz_device * dev,int port,bool freeze)419 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
420 {
421 	u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
422 	struct ksz_port *p = &dev->ports[port];
423 
424 	/* enable/disable the port for flush/freeze function */
425 	mutex_lock(&p->mib.cnt_mutex);
426 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
427 
428 	/* used by MIB counter reading code to know freeze is enabled */
429 	p->freeze = freeze;
430 	mutex_unlock(&p->mib.cnt_mutex);
431 }
432 
ksz9477_half_duplex_monitor(struct ksz_device * dev,int port,u64 tx_late_col)433 static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
434 				       u64 tx_late_col)
435 {
436 	u8 lue_ctrl;
437 	u32 pmavbc;
438 	u16 pqm;
439 	int ret;
440 
441 	/* Errata DS80000754 recommends monitoring potential faults in
442 	 * half-duplex mode. The switch might not be able to communicate anymore
443 	 * in these states. If you see this message, please read the
444 	 * errata-sheet for more information:
445 	 * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
446 	 * To workaround this issue, half-duplex mode should be avoided.
447 	 * A software reset could be implemented to recover from this state.
448 	 */
449 	dev_warn_once(dev->dev,
450 		      "Half-duplex detected on port %d, transmission halt may occur\n",
451 		      port);
452 	if (tx_late_col != 0) {
453 		/* Transmission halt with late collisions */
454 		dev_crit_once(dev->dev,
455 			      "TX late collisions detected, transmission may be halted on port %d\n",
456 			      port);
457 	}
458 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
459 	if (ret)
460 		return ret;
461 	if (lue_ctrl & SW_VLAN_ENABLE) {
462 		ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
463 		if (ret)
464 			return ret;
465 
466 		ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
467 		if (ret)
468 			return ret;
469 
470 		if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
471 		    (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
472 			/* Transmission halt with Half-Duplex and VLAN */
473 			dev_crit_once(dev->dev,
474 				      "resources out of limits, transmission may be halted\n");
475 		}
476 	}
477 
478 	return ret;
479 }
480 
ksz9477_errata_monitor(struct ksz_device * dev,int port,u64 tx_late_col)481 int ksz9477_errata_monitor(struct ksz_device *dev, int port,
482 			   u64 tx_late_col)
483 {
484 	u8 status;
485 	int ret;
486 
487 	ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
488 	if (ret)
489 		return ret;
490 
491 	if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
492 	      == PORT_INTF_SPEED_NONE) &&
493 	    !(status & PORT_INTF_FULL_DUPLEX)) {
494 		ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
495 	}
496 
497 	return ret;
498 }
499 
ksz9477_port_init_cnt(struct ksz_device * dev,int port)500 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
501 {
502 	struct ksz_port_mib *mib = &dev->ports[port].mib;
503 
504 	/* flush all enabled port MIB counters */
505 	mutex_lock(&mib->cnt_mutex);
506 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
507 		     MIB_COUNTER_FLUSH_FREEZE);
508 	ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
509 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
510 	mutex_unlock(&mib->cnt_mutex);
511 }
512 
ksz9477_r_phy_quirks(struct ksz_device * dev,u16 addr,u16 reg,u16 * data)513 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
514 				 u16 *data)
515 {
516 	/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
517 	 * BMSR_ERCAP bits are set.
518 	 */
519 	if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
520 		*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
521 }
522 
ksz9477_r_phy(struct ksz_device * dev,u16 addr,u16 reg,u16 * data)523 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
524 {
525 	u16 val = 0xffff;
526 	int ret;
527 
528 	/* No real PHY after this. Simulate the PHY.
529 	 * A fixed PHY can be setup in the device tree, but this function is
530 	 * still called for that port during initialization.
531 	 * For RGMII PHY there is no way to access it so the fixed PHY should
532 	 * be used.  For SGMII PHY the supporting code will be added later.
533 	 */
534 	if (!dev->info->internal_phy[addr]) {
535 		struct ksz_port *p = &dev->ports[addr];
536 
537 		switch (reg) {
538 		case MII_BMCR:
539 			val = 0x1140;
540 			break;
541 		case MII_BMSR:
542 			val = 0x796d;
543 			break;
544 		case MII_PHYSID1:
545 			val = 0x0022;
546 			break;
547 		case MII_PHYSID2:
548 			val = 0x1631;
549 			break;
550 		case MII_ADVERTISE:
551 			val = 0x05e1;
552 			break;
553 		case MII_LPA:
554 			val = 0xc5e1;
555 			break;
556 		case MII_CTRL1000:
557 			val = 0x0700;
558 			break;
559 		case MII_STAT1000:
560 			if (p->phydev.speed == SPEED_1000)
561 				val = 0x3800;
562 			else
563 				val = 0;
564 			break;
565 		}
566 	} else {
567 		ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
568 		if (ret)
569 			return ret;
570 
571 		ksz9477_r_phy_quirks(dev, addr, reg, &val);
572 	}
573 
574 	*data = val;
575 
576 	return 0;
577 }
578 
ksz9477_w_phy(struct ksz_device * dev,u16 addr,u16 reg,u16 val)579 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
580 {
581 	u32 mask, val32;
582 
583 	/* No real PHY after this. */
584 	if (!dev->info->internal_phy[addr])
585 		return 0;
586 
587 	if (reg < 0x10)
588 		return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
589 
590 	/* Errata: When using SPI, I2C, or in-band register access,
591 	 * writes to certain PHY registers should be performed as
592 	 * 32-bit writes instead of 16-bit writes.
593 	 */
594 	val32 = val;
595 	mask = 0xffff;
596 	if ((reg & 1) == 0) {
597 		val32 <<= 16;
598 		mask <<= 16;
599 	}
600 	reg &= ~1;
601 	return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
602 }
603 
ksz9477_cfg_port_member(struct ksz_device * dev,int port,u8 member)604 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
605 {
606 	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
607 }
608 
ksz9477_flush_dyn_mac_table(struct ksz_device * dev,int port)609 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
610 {
611 	const u16 *regs = dev->info->regs;
612 	u8 data;
613 
614 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
615 			   SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
616 			   SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
617 
618 	if (port < dev->info->port_cnt) {
619 		/* flush individual port */
620 		ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
621 		if (!(data & PORT_LEARN_DISABLE))
622 			ksz_pwrite8(dev, port, regs[P_STP_CTRL],
623 				    data | PORT_LEARN_DISABLE);
624 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
625 		ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
626 	} else {
627 		/* flush all */
628 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
629 	}
630 }
631 
ksz9477_port_vlan_filtering(struct ksz_device * dev,int port,bool flag,struct netlink_ext_ack * extack)632 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
633 				bool flag, struct netlink_ext_ack *extack)
634 {
635 	if (flag) {
636 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
637 			     PORT_VLAN_LOOKUP_VID_0, true);
638 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
639 	} else {
640 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
641 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
642 			     PORT_VLAN_LOOKUP_VID_0, false);
643 	}
644 
645 	return 0;
646 }
647 
ksz9477_port_vlan_add(struct ksz_device * dev,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)648 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
649 			  const struct switchdev_obj_port_vlan *vlan,
650 			  struct netlink_ext_ack *extack)
651 {
652 	u32 vlan_table[3];
653 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
654 	int err;
655 
656 	err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
657 	if (err) {
658 		NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
659 		return err;
660 	}
661 
662 	vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
663 	if (untagged)
664 		vlan_table[1] |= BIT(port);
665 	else
666 		vlan_table[1] &= ~BIT(port);
667 	vlan_table[1] &= ~(BIT(dev->cpu_port));
668 
669 	vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
670 
671 	err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
672 	if (err) {
673 		NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
674 		return err;
675 	}
676 
677 	/* change PVID */
678 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
679 		ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
680 
681 	return 0;
682 }
683 
ksz9477_port_vlan_del(struct ksz_device * dev,int port,const struct switchdev_obj_port_vlan * vlan)684 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
685 			  const struct switchdev_obj_port_vlan *vlan)
686 {
687 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
688 	u32 vlan_table[3];
689 	u16 pvid;
690 
691 	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
692 	pvid = pvid & 0xFFF;
693 
694 	if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
695 		dev_dbg(dev->dev, "Failed to get vlan table\n");
696 		return -ETIMEDOUT;
697 	}
698 
699 	vlan_table[2] &= ~BIT(port);
700 
701 	if (pvid == vlan->vid)
702 		pvid = 1;
703 
704 	if (untagged)
705 		vlan_table[1] &= ~BIT(port);
706 
707 	if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
708 		dev_dbg(dev->dev, "Failed to set vlan table\n");
709 		return -ETIMEDOUT;
710 	}
711 
712 	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
713 
714 	return 0;
715 }
716 
ksz9477_fdb_add(struct ksz_device * dev,int port,const unsigned char * addr,u16 vid,struct dsa_db db)717 int ksz9477_fdb_add(struct ksz_device *dev, int port,
718 		    const unsigned char *addr, u16 vid, struct dsa_db db)
719 {
720 	u32 alu_table[4];
721 	u32 data;
722 	int ret = 0;
723 
724 	mutex_lock(&dev->alu_mutex);
725 
726 	/* find any entry with mac & vid */
727 	data = vid << ALU_FID_INDEX_S;
728 	data |= ((addr[0] << 8) | addr[1]);
729 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
730 
731 	data = ((addr[2] << 24) | (addr[3] << 16));
732 	data |= ((addr[4] << 8) | addr[5]);
733 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
734 
735 	/* start read operation */
736 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
737 
738 	/* wait to be finished */
739 	ret = ksz9477_wait_alu_ready(dev);
740 	if (ret) {
741 		dev_dbg(dev->dev, "Failed to read ALU\n");
742 		goto exit;
743 	}
744 
745 	/* read ALU entry */
746 	ksz9477_read_table(dev, alu_table);
747 
748 	/* update ALU entry */
749 	alu_table[0] = ALU_V_STATIC_VALID;
750 	alu_table[1] |= BIT(port);
751 	if (vid)
752 		alu_table[1] |= ALU_V_USE_FID;
753 	alu_table[2] = (vid << ALU_V_FID_S);
754 	alu_table[2] |= ((addr[0] << 8) | addr[1]);
755 	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
756 	alu_table[3] |= ((addr[4] << 8) | addr[5]);
757 
758 	ksz9477_write_table(dev, alu_table);
759 
760 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
761 
762 	/* wait to be finished */
763 	ret = ksz9477_wait_alu_ready(dev);
764 	if (ret)
765 		dev_dbg(dev->dev, "Failed to write ALU\n");
766 
767 exit:
768 	mutex_unlock(&dev->alu_mutex);
769 
770 	return ret;
771 }
772 
ksz9477_fdb_del(struct ksz_device * dev,int port,const unsigned char * addr,u16 vid,struct dsa_db db)773 int ksz9477_fdb_del(struct ksz_device *dev, int port,
774 		    const unsigned char *addr, u16 vid, struct dsa_db db)
775 {
776 	u32 alu_table[4];
777 	u32 data;
778 	int ret = 0;
779 
780 	mutex_lock(&dev->alu_mutex);
781 
782 	/* read any entry with mac & vid */
783 	data = vid << ALU_FID_INDEX_S;
784 	data |= ((addr[0] << 8) | addr[1]);
785 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
786 
787 	data = ((addr[2] << 24) | (addr[3] << 16));
788 	data |= ((addr[4] << 8) | addr[5]);
789 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
790 
791 	/* start read operation */
792 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
793 
794 	/* wait to be finished */
795 	ret = ksz9477_wait_alu_ready(dev);
796 	if (ret) {
797 		dev_dbg(dev->dev, "Failed to read ALU\n");
798 		goto exit;
799 	}
800 
801 	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
802 	if (alu_table[0] & ALU_V_STATIC_VALID) {
803 		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
804 		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
805 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
806 
807 		/* clear forwarding port */
808 		alu_table[1] &= ~BIT(port);
809 
810 		/* if there is no port to forward, clear table */
811 		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
812 			alu_table[0] = 0;
813 			alu_table[1] = 0;
814 			alu_table[2] = 0;
815 			alu_table[3] = 0;
816 		}
817 	} else {
818 		alu_table[0] = 0;
819 		alu_table[1] = 0;
820 		alu_table[2] = 0;
821 		alu_table[3] = 0;
822 	}
823 
824 	ksz9477_write_table(dev, alu_table);
825 
826 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
827 
828 	/* wait to be finished */
829 	ret = ksz9477_wait_alu_ready(dev);
830 	if (ret)
831 		dev_dbg(dev->dev, "Failed to write ALU\n");
832 
833 exit:
834 	mutex_unlock(&dev->alu_mutex);
835 
836 	return ret;
837 }
838 
ksz9477_convert_alu(struct alu_struct * alu,u32 * alu_table)839 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
840 {
841 	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
842 	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
843 	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
844 	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
845 			ALU_V_PRIO_AGE_CNT_M;
846 	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
847 
848 	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
849 	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
850 	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
851 
852 	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
853 
854 	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
855 	alu->mac[1] = alu_table[2] & 0xFF;
856 	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
857 	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
858 	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
859 	alu->mac[5] = alu_table[3] & 0xFF;
860 }
861 
ksz9477_fdb_dump(struct ksz_device * dev,int port,dsa_fdb_dump_cb_t * cb,void * data)862 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
863 		     dsa_fdb_dump_cb_t *cb, void *data)
864 {
865 	int ret = 0;
866 	u32 ksz_data;
867 	u32 alu_table[4];
868 	struct alu_struct alu;
869 	int timeout;
870 
871 	mutex_lock(&dev->alu_mutex);
872 
873 	/* start ALU search */
874 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
875 
876 	do {
877 		timeout = 1000;
878 		do {
879 			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
880 			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
881 				break;
882 			usleep_range(1, 10);
883 		} while (timeout-- > 0);
884 
885 		if (!timeout) {
886 			dev_dbg(dev->dev, "Failed to search ALU\n");
887 			ret = -ETIMEDOUT;
888 			goto exit;
889 		}
890 
891 		if (!(ksz_data & ALU_VALID))
892 			continue;
893 
894 		/* read ALU table */
895 		ksz9477_read_table(dev, alu_table);
896 
897 		ksz9477_convert_alu(&alu, alu_table);
898 
899 		if (alu.port_forward & BIT(port)) {
900 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
901 			if (ret)
902 				goto exit;
903 		}
904 	} while (ksz_data & ALU_START);
905 
906 exit:
907 
908 	/* stop ALU search */
909 	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
910 
911 	mutex_unlock(&dev->alu_mutex);
912 
913 	return ret;
914 }
915 
ksz9477_mdb_add(struct ksz_device * dev,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)916 int ksz9477_mdb_add(struct ksz_device *dev, int port,
917 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
918 {
919 	u32 static_table[4];
920 	const u8 *shifts;
921 	const u32 *masks;
922 	u32 data;
923 	int index;
924 	u32 mac_hi, mac_lo;
925 	int err = 0;
926 
927 	shifts = dev->info->shifts;
928 	masks = dev->info->masks;
929 
930 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
931 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
932 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
933 
934 	mutex_lock(&dev->alu_mutex);
935 
936 	for (index = 0; index < dev->info->num_statics; index++) {
937 		/* find empty slot first */
938 		data = (index << shifts[ALU_STAT_INDEX]) |
939 			masks[ALU_STAT_READ] | ALU_STAT_START;
940 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
941 
942 		/* wait to be finished */
943 		err = ksz9477_wait_alu_sta_ready(dev);
944 		if (err) {
945 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
946 			goto exit;
947 		}
948 
949 		/* read ALU static table */
950 		ksz9477_read_table(dev, static_table);
951 
952 		if (static_table[0] & ALU_V_STATIC_VALID) {
953 			/* check this has same vid & mac address */
954 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
955 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
956 			    static_table[3] == mac_lo) {
957 				/* found matching one */
958 				break;
959 			}
960 		} else {
961 			/* found empty one */
962 			break;
963 		}
964 	}
965 
966 	/* no available entry */
967 	if (index == dev->info->num_statics) {
968 		err = -ENOSPC;
969 		goto exit;
970 	}
971 
972 	/* add entry */
973 	static_table[0] = ALU_V_STATIC_VALID;
974 	static_table[1] |= BIT(port);
975 	if (mdb->vid)
976 		static_table[1] |= ALU_V_USE_FID;
977 	static_table[2] = (mdb->vid << ALU_V_FID_S);
978 	static_table[2] |= mac_hi;
979 	static_table[3] = mac_lo;
980 
981 	ksz9477_write_table(dev, static_table);
982 
983 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
984 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
985 
986 	/* wait to be finished */
987 	if (ksz9477_wait_alu_sta_ready(dev))
988 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
989 
990 exit:
991 	mutex_unlock(&dev->alu_mutex);
992 	return err;
993 }
994 
ksz9477_mdb_del(struct ksz_device * dev,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)995 int ksz9477_mdb_del(struct ksz_device *dev, int port,
996 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
997 {
998 	u32 static_table[4];
999 	const u8 *shifts;
1000 	const u32 *masks;
1001 	u32 data;
1002 	int index;
1003 	int ret = 0;
1004 	u32 mac_hi, mac_lo;
1005 
1006 	shifts = dev->info->shifts;
1007 	masks = dev->info->masks;
1008 
1009 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
1010 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
1011 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
1012 
1013 	mutex_lock(&dev->alu_mutex);
1014 
1015 	for (index = 0; index < dev->info->num_statics; index++) {
1016 		/* find empty slot first */
1017 		data = (index << shifts[ALU_STAT_INDEX]) |
1018 			masks[ALU_STAT_READ] | ALU_STAT_START;
1019 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1020 
1021 		/* wait to be finished */
1022 		ret = ksz9477_wait_alu_sta_ready(dev);
1023 		if (ret) {
1024 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
1025 			goto exit;
1026 		}
1027 
1028 		/* read ALU static table */
1029 		ksz9477_read_table(dev, static_table);
1030 
1031 		if (static_table[0] & ALU_V_STATIC_VALID) {
1032 			/* check this has same vid & mac address */
1033 
1034 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
1035 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
1036 			    static_table[3] == mac_lo) {
1037 				/* found matching one */
1038 				break;
1039 			}
1040 		}
1041 	}
1042 
1043 	/* no available entry */
1044 	if (index == dev->info->num_statics)
1045 		goto exit;
1046 
1047 	/* clear port */
1048 	static_table[1] &= ~BIT(port);
1049 
1050 	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
1051 		/* delete entry */
1052 		static_table[0] = 0;
1053 		static_table[1] = 0;
1054 		static_table[2] = 0;
1055 		static_table[3] = 0;
1056 	}
1057 
1058 	ksz9477_write_table(dev, static_table);
1059 
1060 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
1061 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1062 
1063 	/* wait to be finished */
1064 	ret = ksz9477_wait_alu_sta_ready(dev);
1065 	if (ret)
1066 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
1067 
1068 exit:
1069 	mutex_unlock(&dev->alu_mutex);
1070 
1071 	return ret;
1072 }
1073 
ksz9477_port_mirror_add(struct ksz_device * dev,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)1074 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
1075 			    struct dsa_mall_mirror_tc_entry *mirror,
1076 			    bool ingress, struct netlink_ext_ack *extack)
1077 {
1078 	u8 data;
1079 	int p;
1080 
1081 	/* Limit to one sniffer port
1082 	 * Check if any of the port is already set for sniffing
1083 	 * If yes, instruct the user to remove the previous entry & exit
1084 	 */
1085 	for (p = 0; p < dev->info->port_cnt; p++) {
1086 		/* Skip the current sniffing port */
1087 		if (p == mirror->to_local_port)
1088 			continue;
1089 
1090 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
1091 
1092 		if (data & PORT_MIRROR_SNIFFER) {
1093 			NL_SET_ERR_MSG_MOD(extack,
1094 					   "Sniffer port is already configured, delete existing rules & retry");
1095 			return -EBUSY;
1096 		}
1097 	}
1098 
1099 	if (ingress)
1100 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
1101 	else
1102 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
1103 
1104 	/* configure mirror port */
1105 	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
1106 		     PORT_MIRROR_SNIFFER, true);
1107 
1108 	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
1109 
1110 	return 0;
1111 }
1112 
ksz9477_port_mirror_del(struct ksz_device * dev,int port,struct dsa_mall_mirror_tc_entry * mirror)1113 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
1114 			     struct dsa_mall_mirror_tc_entry *mirror)
1115 {
1116 	bool in_use = false;
1117 	u8 data;
1118 	int p;
1119 
1120 	if (mirror->ingress)
1121 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
1122 	else
1123 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
1124 
1125 
1126 	/* Check if any of the port is still referring to sniffer port */
1127 	for (p = 0; p < dev->info->port_cnt; p++) {
1128 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
1129 
1130 		if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
1131 			in_use = true;
1132 			break;
1133 		}
1134 	}
1135 
1136 	/* delete sniffing if there are no other mirroring rules */
1137 	if (!in_use)
1138 		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
1139 			     PORT_MIRROR_SNIFFER, false);
1140 }
1141 
ksz9477_get_interface(struct ksz_device * dev,int port)1142 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
1143 {
1144 	phy_interface_t interface;
1145 	bool gbit;
1146 
1147 	if (dev->info->internal_phy[port])
1148 		return PHY_INTERFACE_MODE_NA;
1149 
1150 	gbit = ksz_get_gbit(dev, port);
1151 
1152 	interface = ksz_get_xmii(dev, port, gbit);
1153 
1154 	return interface;
1155 }
1156 
ksz9477_get_caps(struct ksz_device * dev,int port,struct phylink_config * config)1157 void ksz9477_get_caps(struct ksz_device *dev, int port,
1158 		      struct phylink_config *config)
1159 {
1160 	config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
1161 				   MAC_SYM_PAUSE;
1162 
1163 	if (dev->info->gbit_capable[port])
1164 		config->mac_capabilities |= MAC_1000FD;
1165 
1166 	if (ksz_is_sgmii_port(dev, port)) {
1167 		struct ksz_port *p = &dev->ports[port];
1168 
1169 		phy_interface_or(config->supported_interfaces,
1170 				 config->supported_interfaces,
1171 				 p->pcs->supported_interfaces);
1172 	}
1173 }
1174 
ksz9477_set_ageing_time(struct ksz_device * dev,unsigned int msecs)1175 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
1176 {
1177 	u32 secs = msecs / 1000;
1178 	u8 data, mult, value;
1179 	u32 max_val;
1180 	int ret;
1181 
1182 #define MAX_TIMER_VAL	((1 << 8) - 1)
1183 
1184 	/* The aging timer comprises a 3-bit multiplier and an 8-bit second
1185 	 * value.  Either of them cannot be zero.  The maximum timer is then
1186 	 * 7 * 255 = 1785 seconds.
1187 	 */
1188 	if (!secs)
1189 		secs = 1;
1190 
1191 	/* Return error if too large. */
1192 	else if (secs > 7 * MAX_TIMER_VAL)
1193 		return -EINVAL;
1194 
1195 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
1196 	if (ret < 0)
1197 		return ret;
1198 
1199 	/* Check whether there is need to update the multiplier. */
1200 	mult = FIELD_GET(SW_AGE_CNT_M, value);
1201 	max_val = MAX_TIMER_VAL;
1202 	if (mult > 0) {
1203 		/* Try to use the same multiplier already in the register as
1204 		 * the hardware default uses multiplier 4 and 75 seconds for
1205 		 * 300 seconds.
1206 		 */
1207 		max_val = DIV_ROUND_UP(secs, mult);
1208 		if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
1209 			max_val = MAX_TIMER_VAL;
1210 	}
1211 
1212 	data = DIV_ROUND_UP(secs, max_val);
1213 	if (mult != data) {
1214 		value &= ~SW_AGE_CNT_M;
1215 		value |= FIELD_PREP(SW_AGE_CNT_M, data);
1216 		ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
1217 		if (ret < 0)
1218 			return ret;
1219 	}
1220 
1221 	value = DIV_ROUND_UP(secs, data);
1222 	return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
1223 }
1224 
ksz9477_port_queue_split(struct ksz_device * dev,int port)1225 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
1226 {
1227 	u8 data;
1228 
1229 	if (dev->info->num_tx_queues == 8)
1230 		data = PORT_EIGHT_QUEUE;
1231 	else if (dev->info->num_tx_queues == 4)
1232 		data = PORT_FOUR_QUEUE;
1233 	else if (dev->info->num_tx_queues == 2)
1234 		data = PORT_TWO_QUEUE;
1235 	else
1236 		data = PORT_SINGLE_QUEUE;
1237 
1238 	ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
1239 }
1240 
ksz9477_port_setup(struct ksz_device * dev,int port,bool cpu_port)1241 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
1242 {
1243 	const u16 *regs = dev->info->regs;
1244 	struct dsa_switch *ds = dev->ds;
1245 	u16 data16;
1246 	u8 member;
1247 
1248 	/* enable tag tail for host port */
1249 	if (cpu_port)
1250 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
1251 			     true);
1252 
1253 	ksz9477_port_queue_split(dev, port);
1254 
1255 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
1256 
1257 	/* set back pressure */
1258 	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
1259 
1260 	/* enable broadcast storm limit */
1261 	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
1262 
1263 	/* replace priority */
1264 	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
1265 		     false);
1266 	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
1267 			   MTI_PVID_REPLACE, false);
1268 
1269 	/* force flow control for non-PHY ports only */
1270 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
1271 		     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
1272 		     !dev->info->internal_phy[port]);
1273 
1274 	if (cpu_port)
1275 		member = dsa_user_ports(ds);
1276 	else
1277 		member = BIT(dsa_upstream_port(ds, port));
1278 
1279 	ksz9477_cfg_port_member(dev, port, member);
1280 
1281 	/* clear pending interrupts */
1282 	if (dev->info->internal_phy[port])
1283 		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1284 
1285 	ksz9477_port_acl_init(dev, port);
1286 
1287 	/* clear pending wake flags */
1288 	ksz_handle_wake_reason(dev, port);
1289 
1290 	/* Disable all WoL options by default. Otherwise
1291 	 * ksz_switch_macaddr_get/put logic will not work properly.
1292 	 */
1293 	ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
1294 }
1295 
ksz9477_config_cpu_port(struct dsa_switch * ds)1296 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1297 {
1298 	struct ksz_device *dev = ds->priv;
1299 	struct ksz_port *p;
1300 	int i;
1301 
1302 	for (i = 0; i < dev->info->port_cnt; i++) {
1303 		if (dsa_is_cpu_port(ds, i) &&
1304 		    (dev->info->cpu_ports & (1 << i))) {
1305 			phy_interface_t interface;
1306 			const char *prev_msg;
1307 			const char *prev_mode;
1308 
1309 			dev->cpu_port = i;
1310 			p = &dev->ports[i];
1311 
1312 			/* Read from XMII register to determine host port
1313 			 * interface.  If set specifically in device tree
1314 			 * note the difference to help debugging.
1315 			 */
1316 			interface = ksz9477_get_interface(dev, i);
1317 			if (!p->interface) {
1318 				if (dev->compat_interface) {
1319 					dev_warn(dev->dev,
1320 						 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1321 						 "Please update your device tree.\n",
1322 						 i);
1323 					p->interface = dev->compat_interface;
1324 				} else {
1325 					p->interface = interface;
1326 				}
1327 			}
1328 			if (interface && interface != p->interface) {
1329 				prev_msg = " instead of ";
1330 				prev_mode = phy_modes(interface);
1331 			} else {
1332 				prev_msg = "";
1333 				prev_mode = "";
1334 			}
1335 			dev_info(dev->dev,
1336 				 "Port%d: using phy mode %s%s%s\n",
1337 				 i,
1338 				 phy_modes(p->interface),
1339 				 prev_msg,
1340 				 prev_mode);
1341 
1342 			/* enable cpu port */
1343 			ksz9477_port_setup(dev, i, true);
1344 		}
1345 	}
1346 
1347 	for (i = 0; i < dev->info->port_cnt; i++) {
1348 		if (i == dev->cpu_port)
1349 			continue;
1350 		ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1351 
1352 		/* Power down the internal PHY if port is unused. */
1353 		if (dsa_is_unused_port(ds, i) && dev->info->internal_phy[i])
1354 			ksz_pwrite16(dev, i, 0x100, BMCR_PDOWN);
1355 	}
1356 }
1357 
ksz9477_enable_stp_addr(struct ksz_device * dev)1358 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1359 {
1360 	const u32 *masks;
1361 	u32 data;
1362 	int ret;
1363 
1364 	masks = dev->info->masks;
1365 
1366 	/* Enable Reserved multicast table */
1367 	ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1368 
1369 	/* Set the Override bit for forwarding BPDU packet to CPU */
1370 	ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1371 			  ALU_V_OVERRIDE | BIT(dev->cpu_port));
1372 	if (ret < 0)
1373 		return ret;
1374 
1375 	data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1376 
1377 	ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1378 	if (ret < 0)
1379 		return ret;
1380 
1381 	/* wait to be finished */
1382 	ret = ksz9477_wait_alu_sta_ready(dev);
1383 	if (ret < 0) {
1384 		dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1385 		return ret;
1386 	}
1387 
1388 	return 0;
1389 }
1390 
ksz9477_setup(struct dsa_switch * ds)1391 int ksz9477_setup(struct dsa_switch *ds)
1392 {
1393 	struct ksz_device *dev = ds->priv;
1394 	const u16 *regs = dev->info->regs;
1395 	int ret = 0;
1396 
1397 	ds->mtu_enforcement_ingress = true;
1398 
1399 	/* Required for port partitioning. */
1400 	ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1401 		      true);
1402 
1403 	/* Do not work correctly with tail tagging. */
1404 	ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1405 
1406 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1407 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1408 
1409 	/* Use collision based back pressure mode. */
1410 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
1411 		SW_BACK_PRESSURE_COLLISION);
1412 
1413 	/* Now we can configure default MTU value */
1414 	ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
1415 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1416 	if (ret)
1417 		return ret;
1418 
1419 	/* queue based egress rate limit */
1420 	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1421 
1422 	/* enable global MIB counter freeze function */
1423 	ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1424 
1425 	/* Make sure PME (WoL) is not enabled. If requested, it will
1426 	 * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
1427 	 * do not like PME events changes before shutdown.
1428 	 */
1429 	return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
1430 }
1431 
ksz9477_get_port_addr(int port,int offset)1432 u32 ksz9477_get_port_addr(int port, int offset)
1433 {
1434 	return PORT_CTRL_ADDR(port, offset);
1435 }
1436 
ksz9477_tc_cbs_set_cinc(struct ksz_device * dev,int port,u32 val)1437 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1438 {
1439 	val = val >> 8;
1440 
1441 	return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1442 }
1443 
1444 /* The KSZ9477 provides following HW features to accelerate
1445  * HSR frames handling:
1446  *
1447  * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
1448  * 2. RX PACKET DUPLICATION DISCARDING
1449  * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
1450  *
1451  * Only one from point 1. has the NETIF_F* flag available.
1452  *
1453  * Ones from point 2 and 3 are "best effort" - i.e. those will
1454  * work correctly most of the time, but it may happen that some
1455  * frames will not be caught - to be more specific; there is a race
1456  * condition in hardware such that, when duplicate packets are received
1457  * on member ports very close in time to each other, the hardware fails
1458  * to detect that they are duplicates.
1459  *
1460  * Hence, the SW needs to handle those special cases. However, the speed
1461  * up gain is considerable when above features are used.
1462  *
1463  * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
1464  * can be forwarded in the switch fabric between HSR ports.
1465  */
1466 #define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
1467 
ksz9477_hsr_join(struct dsa_switch * ds,int port,struct net_device * hsr)1468 void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
1469 {
1470 	struct ksz_device *dev = ds->priv;
1471 	struct net_device *user;
1472 	struct dsa_port *hsr_dp;
1473 	u8 data, hsr_ports = 0;
1474 
1475 	/* Program which port(s) shall support HSR */
1476 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
1477 
1478 	/* Forward frames between HSR ports (i.e. bridge together HSR ports) */
1479 	if (dev->hsr_ports) {
1480 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1481 			hsr_ports |= BIT(hsr_dp->index);
1482 
1483 		hsr_ports |= BIT(dsa_upstream_port(ds, port));
1484 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1485 			ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
1486 	}
1487 
1488 	if (!dev->hsr_ports) {
1489 		/* Enable discarding of received HSR frames */
1490 		ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
1491 		data |= HSR_DUPLICATE_DISCARD;
1492 		data &= ~HSR_NODE_UNICAST;
1493 		ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
1494 	}
1495 
1496 	/* Enable per port self-address filtering.
1497 	 * The global self-address filtering has already been enabled in the
1498 	 * ksz9477_reset_switch() function.
1499 	 */
1500 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
1501 
1502 	/* Setup HW supported features for lan HSR ports */
1503 	user = dsa_to_port(ds, port)->user;
1504 	user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
1505 }
1506 
ksz9477_hsr_leave(struct dsa_switch * ds,int port,struct net_device * hsr)1507 void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
1508 {
1509 	struct ksz_device *dev = ds->priv;
1510 
1511 	/* Clear port HSR support */
1512 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
1513 
1514 	/* Disable forwarding frames between HSR ports */
1515 	ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
1516 
1517 	/* Disable per port self-address filtering */
1518 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
1519 }
1520 
ksz9477_switch_init(struct ksz_device * dev)1521 int ksz9477_switch_init(struct ksz_device *dev)
1522 {
1523 	u8 data8;
1524 	int ret;
1525 
1526 	dev->port_mask = (1 << dev->info->port_cnt) - 1;
1527 
1528 	/* turn off SPI DO Edge select */
1529 	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1530 	if (ret)
1531 		return ret;
1532 
1533 	data8 &= ~SPI_AUTO_EDGE_DETECTION;
1534 	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1535 	if (ret)
1536 		return ret;
1537 
1538 	return 0;
1539 }
1540 
ksz9477_switch_exit(struct ksz_device * dev)1541 void ksz9477_switch_exit(struct ksz_device *dev)
1542 {
1543 	ksz9477_reset_switch(dev);
1544 }
1545 
1546 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1547 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1548 MODULE_LICENSE("GPL");
1549