xref: /linux/drivers/net/dsa/mxl862xx/mxl862xx.c (revision dc1d9408c961c1c4d4b3b99a1d9390c17e13de71)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for MaxLinear MxL862xx switch family
4  *
5  * Copyright (C) 2024 MaxLinear Inc.
6  * Copyright (C) 2025 John Crispin <john@phrozen.org>
7  * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/delay.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_bridge.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/phy.h>
18 #include <linux/phylink.h>
19 #include <net/dsa.h>
20 
21 #include "mxl862xx.h"
22 #include "mxl862xx-api.h"
23 #include "mxl862xx-cmd.h"
24 #include "mxl862xx-host.h"
25 
26 #define MXL862XX_API_WRITE(dev, cmd, data) \
27 	mxl862xx_api_wrap(dev, cmd, &(data), sizeof((data)), false, false)
28 #define MXL862XX_API_READ(dev, cmd, data) \
29 	mxl862xx_api_wrap(dev, cmd, &(data), sizeof((data)), true, false)
30 #define MXL862XX_API_READ_QUIET(dev, cmd, data) \
31 	mxl862xx_api_wrap(dev, cmd, &(data), sizeof((data)), true, true)
32 
33 /* Polling interval for RMON counter accumulation. At 2.5 Gbps with
34  * minimum-size (64-byte) frames, a 32-bit packet counter wraps in ~880s.
35  * 2s gives a comfortable margin.
36  */
37 #define MXL862XX_STATS_POLL_INTERVAL	(2 * HZ)
38 
39 struct mxl862xx_mib_desc {
40 	unsigned int size;
41 	unsigned int offset;
42 	const char *name;
43 };
44 
45 #define MIB_DESC(_size, _name, _element)					\
46 {									\
47 	.size = _size,							\
48 	.name = _name,							\
49 	.offset = offsetof(struct mxl862xx_rmon_port_cnt, _element)	\
50 }
51 
52 /* Hardware-specific counters not covered by any standardized stats callback. */
53 static const struct mxl862xx_mib_desc mxl862xx_mib[] = {
54 	MIB_DESC(1, "TxAcmDroppedPkts", tx_acm_dropped_pkts),
55 	MIB_DESC(1, "RxFilteredPkts", rx_filtered_pkts),
56 	MIB_DESC(1, "RxExtendedVlanDiscardPkts", rx_extended_vlan_discard_pkts),
57 	MIB_DESC(1, "MtuExceedDiscardPkts", mtu_exceed_discard_pkts),
58 	MIB_DESC(2, "RxBadBytes", rx_bad_bytes),
59 };
60 
61 static const struct ethtool_rmon_hist_range mxl862xx_rmon_ranges[] = {
62 	{ 0, 64 },
63 	{ 65, 127 },
64 	{ 128, 255 },
65 	{ 256, 511 },
66 	{ 512, 1023 },
67 	{ 1024, 10240 },
68 	{}
69 };
70 
71 #define MXL862XX_SDMA_PCTRLP(p)		(0xbc0 + ((p) * 0x6))
72 #define MXL862XX_SDMA_PCTRL_EN		BIT(0)
73 
74 #define MXL862XX_FDMA_PCTRLP(p)		(0xa80 + ((p) * 0x6))
75 #define MXL862XX_FDMA_PCTRL_EN		BIT(0)
76 
77 #define MXL862XX_READY_TIMEOUT_MS	10000
78 #define MXL862XX_READY_POLL_MS		100
79 
80 #define MXL862XX_TCM_INST_SEL		0xe00
81 #define MXL862XX_TCM_CBS		0xe12
82 #define MXL862XX_TCM_EBS		0xe13
83 
84 static const int mxl862xx_flood_meters[] = {
85 	MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_UC,
86 	MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_IP,
87 	MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_NON_IP,
88 	MXL862XX_BRIDGE_PORT_EGRESS_METER_BROADCAST,
89 };
90 
91 enum mxl862xx_evlan_action {
92 	EVLAN_ACCEPT,			/* pass-through, no tag removal */
93 	EVLAN_STRIP_IF_UNTAGGED,	/* remove 1 tag if entry's untagged flag set */
94 	EVLAN_PVID_OR_DISCARD,		/* insert PVID tag or discard if no PVID */
95 	EVLAN_STRIP1_AND_PVID_OR_DISCARD,/* strip 1 tag + insert PVID, or discard */
96 };
97 
98 struct mxl862xx_evlan_rule_desc {
99 	u8 outer_type;		/* enum mxl862xx_extended_vlan_filter_type */
100 	u8 inner_type;		/* enum mxl862xx_extended_vlan_filter_type */
101 	u8 outer_tpid;		/* enum mxl862xx_extended_vlan_filter_tpid */
102 	u8 inner_tpid;		/* enum mxl862xx_extended_vlan_filter_tpid */
103 	bool match_vid;		/* true: match on VID from the vid parameter */
104 	u8 action;		/* enum mxl862xx_evlan_action */
105 };
106 
107 /* Shorthand constants for readability */
108 #define FT_NORMAL	MXL862XX_EXTENDEDVLAN_FILTER_TYPE_NORMAL
109 #define FT_NO_FILTER	MXL862XX_EXTENDEDVLAN_FILTER_TYPE_NO_FILTER
110 #define FT_DEFAULT	MXL862XX_EXTENDEDVLAN_FILTER_TYPE_DEFAULT
111 #define FT_NO_TAG	MXL862XX_EXTENDEDVLAN_FILTER_TYPE_NO_TAG
112 #define TP_NONE		MXL862XX_EXTENDEDVLAN_FILTER_TPID_NO_FILTER
113 #define TP_8021Q	MXL862XX_EXTENDEDVLAN_FILTER_TPID_8021Q
114 
115 /*
116  * VLAN-aware ingress: 7 final catchall rules.
117  *
118  * VLAN Filter handles VID membership for tagged frames, so the
119  * Extended VLAN ingress block only needs to handle:
120  * - Priority-tagged (VID=0): strip + insert PVID
121  * - Untagged: insert PVID or discard
122  * - Standard 802.1Q VID>0: pass through (VF handles membership)
123  * - Non-8021Q TPID (0x88A8 etc.): treat as untagged
124  *
125  * Rule ordering is critical: the EVLAN engine scans entries in
126  * ascending index order and stops at the first match.
127  *
128  * The 802.1Q ACCEPT rules (indices 3--4) must appear BEFORE the
129  * NO_FILTER catchalls (indices 5--6). NO_FILTER matches any tag
130  * regardless of TPID, so without the ACCEPT guard, it would also
131  * catch standard 802.1Q VID>0 frames and corrupt them. With the
132  * guard, 802.1Q VID>0 frames match the ACCEPT rules first and
133  * pass through untouched; only non-8021Q TPID frames pass through
134  * to the NO_FILTER catchalls.
135  */
136 static const struct mxl862xx_evlan_rule_desc ingress_aware_final[] = {
137 	/* 802.1p / priority-tagged (VID 0): strip + PVID */
138 	{ FT_NORMAL,    FT_NORMAL, TP_8021Q, TP_8021Q, true,  EVLAN_STRIP1_AND_PVID_OR_DISCARD },
139 	{ FT_NORMAL,    FT_NO_TAG, TP_8021Q, TP_NONE,  true,  EVLAN_STRIP1_AND_PVID_OR_DISCARD },
140 	/* Untagged: PVID insertion or discard */
141 	{ FT_NO_TAG,    FT_NO_TAG, TP_NONE,  TP_NONE,  false, EVLAN_PVID_OR_DISCARD },
142 	/* 802.1Q VID>0: accept - VF handles membership.
143 	 * match_vid=false means any VID; VID=0 is already caught above.
144 	 */
145 	{ FT_NORMAL,    FT_NORMAL, TP_8021Q, TP_8021Q, false, EVLAN_ACCEPT },
146 	{ FT_NORMAL,    FT_NO_TAG, TP_8021Q, TP_NONE,  false, EVLAN_ACCEPT },
147 	/* Non-8021Q TPID (0x88A8 etc.): treat as untagged - strip + PVID */
148 	{ FT_NO_FILTER, FT_NO_FILTER, TP_NONE, TP_NONE, false, EVLAN_STRIP1_AND_PVID_OR_DISCARD },
149 	{ FT_NO_FILTER, FT_NO_TAG,    TP_NONE, TP_NONE, false, EVLAN_STRIP1_AND_PVID_OR_DISCARD },
150 };
151 
152 /*
153  * VID-specific accept rules (VLAN-aware, standard tag, 2 per VID).
154  * Outer tag carries the VLAN; inner may or may not be present.
155  */
156 static const struct mxl862xx_evlan_rule_desc vid_accept_standard[] = {
157 	{ FT_NORMAL, FT_NORMAL, TP_8021Q, TP_8021Q, true, EVLAN_STRIP_IF_UNTAGGED },
158 	{ FT_NORMAL, FT_NO_TAG, TP_8021Q, TP_NONE,  true, EVLAN_STRIP_IF_UNTAGGED },
159 };
160 
161 /*
162  * Egress tag-stripping rules for VLAN-unaware mode (2 per untagged VID).
163  * The HW sees the MxL tag as outer; the real VLAN tag, if any, is inner.
164  */
165 static const struct mxl862xx_evlan_rule_desc vid_accept_egress_unaware[] = {
166 	{ FT_NO_FILTER, FT_NORMAL, TP_NONE, TP_8021Q, true,  EVLAN_STRIP_IF_UNTAGGED },
167 	{ FT_NO_FILTER, FT_NO_TAG, TP_NONE, TP_NONE,  false, EVLAN_STRIP_IF_UNTAGGED },
168 };
169 
mxl862xx_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol m)170 static enum dsa_tag_protocol mxl862xx_get_tag_protocol(struct dsa_switch *ds,
171 						       int port,
172 						       enum dsa_tag_protocol m)
173 {
174 	return DSA_TAG_PROTO_MXL862;
175 }
176 
177 /* PHY access via firmware relay */
mxl862xx_phy_read_mmd(struct mxl862xx_priv * priv,int addr,int devadd,int regnum)178 static int mxl862xx_phy_read_mmd(struct mxl862xx_priv *priv, int addr,
179 				 int devadd, int regnum)
180 {
181 	struct mdio_relay_data param = {
182 		.phy = addr,
183 		.mmd = devadd,
184 		.reg = cpu_to_le16(regnum),
185 	};
186 	int ret;
187 
188 	ret = MXL862XX_API_READ(priv, INT_GPHY_READ, param);
189 	if (ret)
190 		return ret;
191 
192 	return le16_to_cpu(param.data);
193 }
194 
mxl862xx_phy_write_mmd(struct mxl862xx_priv * priv,int addr,int devadd,int regnum,u16 data)195 static int mxl862xx_phy_write_mmd(struct mxl862xx_priv *priv, int addr,
196 				  int devadd, int regnum, u16 data)
197 {
198 	struct mdio_relay_data param = {
199 		.phy = addr,
200 		.mmd = devadd,
201 		.reg = cpu_to_le16(regnum),
202 		.data = cpu_to_le16(data),
203 	};
204 
205 	return MXL862XX_API_WRITE(priv, INT_GPHY_WRITE, param);
206 }
207 
mxl862xx_phy_read_mii_bus(struct mii_bus * bus,int addr,int regnum)208 static int mxl862xx_phy_read_mii_bus(struct mii_bus *bus, int addr, int regnum)
209 {
210 	return mxl862xx_phy_read_mmd(bus->priv, addr, 0, regnum);
211 }
212 
mxl862xx_phy_write_mii_bus(struct mii_bus * bus,int addr,int regnum,u16 val)213 static int mxl862xx_phy_write_mii_bus(struct mii_bus *bus, int addr,
214 				      int regnum, u16 val)
215 {
216 	return mxl862xx_phy_write_mmd(bus->priv, addr, 0, regnum, val);
217 }
218 
mxl862xx_phy_read_c45_mii_bus(struct mii_bus * bus,int addr,int devadd,int regnum)219 static int mxl862xx_phy_read_c45_mii_bus(struct mii_bus *bus, int addr,
220 					 int devadd, int regnum)
221 {
222 	return mxl862xx_phy_read_mmd(bus->priv, addr, devadd, regnum);
223 }
224 
mxl862xx_phy_write_c45_mii_bus(struct mii_bus * bus,int addr,int devadd,int regnum,u16 val)225 static int mxl862xx_phy_write_c45_mii_bus(struct mii_bus *bus, int addr,
226 					  int devadd, int regnum, u16 val)
227 {
228 	return mxl862xx_phy_write_mmd(bus->priv, addr, devadd, regnum, val);
229 }
230 
mxl862xx_wait_ready(struct dsa_switch * ds)231 static int mxl862xx_wait_ready(struct dsa_switch *ds)
232 {
233 	struct mxl862xx_sys_fw_image_version ver = {};
234 	unsigned long start = jiffies, timeout;
235 	struct mxl862xx_priv *priv = ds->priv;
236 	struct mxl862xx_cfg cfg = {};
237 	int ret;
238 
239 	timeout = start + msecs_to_jiffies(MXL862XX_READY_TIMEOUT_MS);
240 	msleep(2000); /* it always takes at least 2 seconds */
241 	do {
242 		ret = MXL862XX_API_READ_QUIET(priv, SYS_MISC_FW_VERSION, ver);
243 		if (ret || !ver.iv_major)
244 			goto not_ready_yet;
245 
246 		/* being able to perform CFGGET indicates that
247 		 * the firmware is ready
248 		 */
249 		ret = MXL862XX_API_READ_QUIET(priv,
250 					      MXL862XX_COMMON_CFGGET,
251 					      cfg);
252 		if (ret)
253 			goto not_ready_yet;
254 
255 		dev_info(ds->dev, "switch ready after %ums, firmware %u.%u.%u (build %u)\n",
256 			 jiffies_to_msecs(jiffies - start),
257 			 ver.iv_major, ver.iv_minor,
258 			 le16_to_cpu(ver.iv_revision),
259 			 le32_to_cpu(ver.iv_build_num));
260 		return 0;
261 
262 not_ready_yet:
263 		msleep(MXL862XX_READY_POLL_MS);
264 	} while (time_before(jiffies, timeout));
265 
266 	dev_err(ds->dev, "switch not responding after reset\n");
267 	return -ETIMEDOUT;
268 }
269 
mxl862xx_setup_mdio(struct dsa_switch * ds)270 static int mxl862xx_setup_mdio(struct dsa_switch *ds)
271 {
272 	struct mxl862xx_priv *priv = ds->priv;
273 	struct device *dev = ds->dev;
274 	struct device_node *mdio_np;
275 	struct mii_bus *bus;
276 	int ret;
277 
278 	bus = devm_mdiobus_alloc(dev);
279 	if (!bus)
280 		return -ENOMEM;
281 
282 	bus->priv = priv;
283 	bus->name = KBUILD_MODNAME "-mii";
284 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
285 	bus->read_c45 = mxl862xx_phy_read_c45_mii_bus;
286 	bus->write_c45 = mxl862xx_phy_write_c45_mii_bus;
287 	bus->read = mxl862xx_phy_read_mii_bus;
288 	bus->write = mxl862xx_phy_write_mii_bus;
289 	bus->parent = dev;
290 	bus->phy_mask = ~ds->phys_mii_mask;
291 
292 	mdio_np = of_get_child_by_name(dev->of_node, "mdio");
293 	if (!mdio_np)
294 		return -ENODEV;
295 
296 	ret = devm_of_mdiobus_register(dev, bus, mdio_np);
297 	of_node_put(mdio_np);
298 
299 	return ret;
300 }
301 
mxl862xx_bridge_config_fwd(struct dsa_switch * ds,u16 bridge_id,bool ucast_flood,bool mcast_flood,bool bcast_flood)302 static int mxl862xx_bridge_config_fwd(struct dsa_switch *ds, u16 bridge_id,
303 				      bool ucast_flood, bool mcast_flood,
304 				      bool bcast_flood)
305 {
306 	struct mxl862xx_bridge_config bridge_config = {};
307 	struct mxl862xx_priv *priv = ds->priv;
308 	int ret;
309 
310 	bridge_config.mask = cpu_to_le32(MXL862XX_BRIDGE_CONFIG_MASK_FORWARDING_MODE);
311 	bridge_config.bridge_id = cpu_to_le16(bridge_id);
312 
313 	bridge_config.forward_unknown_unicast = cpu_to_le32(ucast_flood ?
314 		MXL862XX_BRIDGE_FORWARD_FLOOD : MXL862XX_BRIDGE_FORWARD_DISCARD);
315 
316 	bridge_config.forward_unknown_multicast_ip = cpu_to_le32(mcast_flood ?
317 		MXL862XX_BRIDGE_FORWARD_FLOOD : MXL862XX_BRIDGE_FORWARD_DISCARD);
318 	bridge_config.forward_unknown_multicast_non_ip =
319 		bridge_config.forward_unknown_multicast_ip;
320 
321 	bridge_config.forward_broadcast = cpu_to_le32(bcast_flood ?
322 		MXL862XX_BRIDGE_FORWARD_FLOOD : MXL862XX_BRIDGE_FORWARD_DISCARD);
323 
324 	ret = MXL862XX_API_WRITE(priv, MXL862XX_BRIDGE_CONFIGSET, bridge_config);
325 	if (ret)
326 		dev_err(ds->dev, "failed to configure bridge %u forwarding: %d\n",
327 			bridge_id, ret);
328 
329 	return ret;
330 }
331 
332 /* Allocate a single zero-rate meter shared by all ports and flood types.
333  * All flood-blocking egress sub-meters point to this one meter so that any
334  * packet hitting this meter is unconditionally dropped.
335  *
336  * The firmware API requires CBS >= 64 (its bs2ls encoder clamps smaller
337  * values), so the meter is initially configured with CBS=EBS=64.
338  * A zero-rate bucket starts full at CBS bytes, which would let one packet
339  * through before the bucket empties. To eliminate this one-packet leak we
340  * override CBS and EBS to zero via direct register writes after the API call;
341  * the hardware accepts CBS=0 and immediately flags the bucket as exceeded,
342  * so no traffic can ever pass.
343  */
mxl862xx_setup_drop_meter(struct dsa_switch * ds)344 static int mxl862xx_setup_drop_meter(struct dsa_switch *ds)
345 {
346 	struct mxl862xx_qos_meter_cfg meter = {};
347 	struct mxl862xx_priv *priv = ds->priv;
348 	struct mxl862xx_register_mod reg;
349 	int ret;
350 
351 	/* meter_id=0 means auto-alloc */
352 	ret = MXL862XX_API_READ(priv, MXL862XX_QOS_METERALLOC, meter);
353 	if (ret)
354 		return ret;
355 
356 	meter.enable = true;
357 	meter.cbs = cpu_to_le32(64);
358 	meter.ebs = cpu_to_le32(64);
359 	snprintf(meter.meter_name, sizeof(meter.meter_name), "drop");
360 
361 	ret = MXL862XX_API_WRITE(priv, MXL862XX_QOS_METERCFGSET, meter);
362 	if (ret)
363 		return ret;
364 
365 	priv->drop_meter = le16_to_cpu(meter.meter_id);
366 
367 	/* Select the meter instance for subsequent TCM register access. */
368 	reg.addr = cpu_to_le16(MXL862XX_TCM_INST_SEL);
369 	reg.data = cpu_to_le16(priv->drop_meter);
370 	reg.mask = cpu_to_le16(0xffff);
371 	ret = MXL862XX_API_WRITE(priv, MXL862XX_COMMON_REGISTERMOD, reg);
372 	if (ret)
373 		return ret;
374 
375 	/* Zero CBS so the committed bucket starts empty (exceeded). */
376 	reg.addr = cpu_to_le16(MXL862XX_TCM_CBS);
377 	reg.data = 0;
378 	ret = MXL862XX_API_WRITE(priv, MXL862XX_COMMON_REGISTERMOD, reg);
379 	if (ret)
380 		return ret;
381 
382 	/* Zero EBS so the excess bucket starts empty (exceeded). */
383 	reg.addr = cpu_to_le16(MXL862XX_TCM_EBS);
384 	return MXL862XX_API_WRITE(priv, MXL862XX_COMMON_REGISTERMOD, reg);
385 }
386 
mxl862xx_set_bridge_port(struct dsa_switch * ds,int port)387 static int mxl862xx_set_bridge_port(struct dsa_switch *ds, int port)
388 {
389 	struct mxl862xx_bridge_port_config br_port_cfg = {};
390 	struct dsa_port *dp = dsa_to_port(ds, port);
391 	struct mxl862xx_priv *priv = ds->priv;
392 	struct mxl862xx_port *p = &priv->ports[port];
393 	struct dsa_port *member_dp;
394 	u16 bridge_id;
395 	u16 vf_scan;
396 	bool enable;
397 	int i, idx;
398 
399 	if (dsa_port_is_unused(dp))
400 		return 0;
401 
402 	if (dsa_port_is_cpu(dp)) {
403 		dsa_switch_for_each_user_port(member_dp, ds) {
404 			if (member_dp->cpu_dp->index != port)
405 				continue;
406 			mxl862xx_fw_portmap_set_bit(br_port_cfg.bridge_port_map,
407 						    member_dp->index);
408 		}
409 	} else if (dp->bridge) {
410 		dsa_switch_for_each_bridge_member(member_dp, ds,
411 						  dp->bridge->dev) {
412 			if (member_dp->index == port)
413 				continue;
414 			mxl862xx_fw_portmap_set_bit(br_port_cfg.bridge_port_map,
415 						    member_dp->index);
416 		}
417 		mxl862xx_fw_portmap_set_bit(br_port_cfg.bridge_port_map,
418 					    dp->cpu_dp->index);
419 	} else {
420 		mxl862xx_fw_portmap_set_bit(br_port_cfg.bridge_port_map,
421 					    dp->cpu_dp->index);
422 		p->flood_block = 0;
423 		p->learning = false;
424 	}
425 
426 	bridge_id = dp->bridge ? priv->bridges[dp->bridge->num] : p->fid;
427 
428 	br_port_cfg.bridge_port_id = cpu_to_le16(port);
429 	br_port_cfg.bridge_id = cpu_to_le16(bridge_id);
430 	br_port_cfg.mask = cpu_to_le32(MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_ID |
431 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP |
432 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_MC_SRC_MAC_LEARNING |
433 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_SUB_METER |
434 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_VLAN |
435 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN |
436 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_INGRESS_VLAN_FILTER |
437 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN_FILTER1 |
438 				       MXL862XX_BRIDGE_PORT_CONFIG_MASK_VLAN_BASED_MAC_LEARNING);
439 	br_port_cfg.src_mac_learning_disable = !p->learning;
440 
441 	/* Extended VLAN block assignments.
442 	 * Ingress: block_size is sent as-is (all entries are finals).
443 	 * Egress: n_active narrows the scan window to only the
444 	 * entries actually written by evlan_program_egress.
445 	 */
446 	br_port_cfg.ingress_extended_vlan_enable = p->ingress_evlan.in_use;
447 	br_port_cfg.ingress_extended_vlan_block_id =
448 		cpu_to_le16(p->ingress_evlan.block_id);
449 	br_port_cfg.ingress_extended_vlan_block_size =
450 		cpu_to_le16(p->ingress_evlan.block_size);
451 	br_port_cfg.egress_extended_vlan_enable = p->egress_evlan.in_use;
452 	br_port_cfg.egress_extended_vlan_block_id =
453 		cpu_to_le16(p->egress_evlan.block_id);
454 	br_port_cfg.egress_extended_vlan_block_size =
455 		cpu_to_le16(p->egress_evlan.n_active);
456 
457 	/* VLAN Filter block assignments (per-port).
458 	 * The block_size sent to the firmware narrows the HW scan
459 	 * window to [block_id, block_id + active_count), relying on
460 	 * discard_unmatched_tagged for frames outside that range.
461 	 * When active_count=0, send 1 to scan only the DISCARD
462 	 * sentinel at index 0 (block_size=0 would disable narrowing
463 	 * and scan the entire allocated block).
464 	 *
465 	 * The bridge check ensures VF is disabled when the port
466 	 * leaves the bridge, without needing to prematurely clear
467 	 * vlan_filtering (which the DSA framework handles later via
468 	 * port_vlan_filtering).
469 	 */
470 	if (p->vf.allocated && p->vlan_filtering &&
471 	    dsa_port_bridge_dev_get(dp)) {
472 		vf_scan = max_t(u16, p->vf.active_count, 1);
473 		br_port_cfg.ingress_vlan_filter_enable = 1;
474 		br_port_cfg.ingress_vlan_filter_block_id =
475 			cpu_to_le16(p->vf.block_id);
476 		br_port_cfg.ingress_vlan_filter_block_size =
477 			cpu_to_le16(vf_scan);
478 
479 		br_port_cfg.egress_vlan_filter1enable = 1;
480 		br_port_cfg.egress_vlan_filter1block_id =
481 			cpu_to_le16(p->vf.block_id);
482 		br_port_cfg.egress_vlan_filter1block_size =
483 			cpu_to_le16(vf_scan);
484 	} else {
485 		br_port_cfg.ingress_vlan_filter_enable = 0;
486 		br_port_cfg.egress_vlan_filter1enable = 0;
487 	}
488 
489 	/* IVL when VLAN-aware: include VID in FDB lookup keys so that
490 	 * learned entries are per-VID. In VLAN-unaware mode, SVL is
491 	 * used (VID excluded from key).
492 	 */
493 	br_port_cfg.vlan_src_mac_vid_enable = p->vlan_filtering;
494 	br_port_cfg.vlan_dst_mac_vid_enable = p->vlan_filtering;
495 
496 	for (i = 0; i < ARRAY_SIZE(mxl862xx_flood_meters); i++) {
497 		idx = mxl862xx_flood_meters[i];
498 		enable = !!(p->flood_block & BIT(idx));
499 
500 		br_port_cfg.egress_traffic_sub_meter_id[idx] =
501 			enable ? cpu_to_le16(priv->drop_meter) : 0;
502 		br_port_cfg.egress_sub_metering_enable[idx] = enable;
503 	}
504 
505 	return MXL862XX_API_WRITE(priv, MXL862XX_BRIDGEPORT_CONFIGSET,
506 				  br_port_cfg);
507 }
508 
mxl862xx_sync_bridge_members(struct dsa_switch * ds,const struct dsa_bridge * bridge)509 static int mxl862xx_sync_bridge_members(struct dsa_switch *ds,
510 					const struct dsa_bridge *bridge)
511 {
512 	struct dsa_port *dp;
513 	int ret = 0, err;
514 
515 	dsa_switch_for_each_bridge_member(dp, ds, bridge->dev) {
516 		err = mxl862xx_set_bridge_port(ds, dp->index);
517 		if (err)
518 			ret = err;
519 	}
520 
521 	return ret;
522 }
523 
mxl862xx_evlan_block_alloc(struct mxl862xx_priv * priv,struct mxl862xx_evlan_block * blk)524 static int mxl862xx_evlan_block_alloc(struct mxl862xx_priv *priv,
525 				      struct mxl862xx_evlan_block *blk)
526 {
527 	struct mxl862xx_extendedvlan_alloc param = {};
528 	int ret;
529 
530 	param.number_of_entries = cpu_to_le16(blk->block_size);
531 
532 	ret = MXL862XX_API_READ(priv, MXL862XX_EXTENDEDVLAN_ALLOC, param);
533 	if (ret)
534 		return ret;
535 
536 	blk->block_id = le16_to_cpu(param.extended_vlan_block_id);
537 	blk->allocated = true;
538 
539 	return 0;
540 }
541 
mxl862xx_vf_block_alloc(struct mxl862xx_priv * priv,u16 size,u16 * block_id)542 static int mxl862xx_vf_block_alloc(struct mxl862xx_priv *priv,
543 				   u16 size, u16 *block_id)
544 {
545 	struct mxl862xx_vlanfilter_alloc param = {};
546 	int ret;
547 
548 	param.number_of_entries = cpu_to_le16(size);
549 	param.discard_untagged = 0;
550 	param.discard_unmatched_tagged = 1;
551 
552 	ret = MXL862XX_API_READ(priv, MXL862XX_VLANFILTER_ALLOC, param);
553 	if (ret)
554 		return ret;
555 
556 	*block_id = le16_to_cpu(param.vlan_filter_block_id);
557 	return 0;
558 }
559 
mxl862xx_vf_entry_discard(struct mxl862xx_priv * priv,u16 block_id,u16 index)560 static int mxl862xx_vf_entry_discard(struct mxl862xx_priv *priv,
561 				     u16 block_id, u16 index)
562 {
563 	struct mxl862xx_vlanfilter_config cfg = {};
564 
565 	cfg.vlan_filter_block_id = cpu_to_le16(block_id);
566 	cfg.entry_index = cpu_to_le16(index);
567 	cfg.vlan_filter_mask = cpu_to_le32(MXL862XX_VLAN_FILTER_TCI_MASK_VID);
568 	cfg.val = cpu_to_le32(0);
569 	cfg.discard_matched = 1;
570 
571 	return MXL862XX_API_WRITE(priv, MXL862XX_VLANFILTER_SET, cfg);
572 }
573 
mxl862xx_vf_alloc(struct mxl862xx_priv * priv,struct mxl862xx_vf_block * vf)574 static int mxl862xx_vf_alloc(struct mxl862xx_priv *priv,
575 			     struct mxl862xx_vf_block *vf)
576 {
577 	int ret;
578 
579 	ret = mxl862xx_vf_block_alloc(priv, vf->block_size, &vf->block_id);
580 	if (ret)
581 		return ret;
582 
583 	vf->allocated = true;
584 	vf->active_count = 0;
585 
586 	/* Sentinel: block VID-0 when scan window covers only index 0 */
587 	return mxl862xx_vf_entry_discard(priv, vf->block_id, 0);
588 }
589 
mxl862xx_allocate_bridge(struct mxl862xx_priv * priv)590 static int mxl862xx_allocate_bridge(struct mxl862xx_priv *priv)
591 {
592 	struct mxl862xx_bridge_alloc br_alloc = {};
593 	int ret;
594 
595 	ret = MXL862XX_API_READ(priv, MXL862XX_BRIDGE_ALLOC, br_alloc);
596 	if (ret)
597 		return ret;
598 
599 	return le16_to_cpu(br_alloc.bridge_id);
600 }
601 
mxl862xx_free_bridge(struct dsa_switch * ds,const struct dsa_bridge * bridge)602 static void mxl862xx_free_bridge(struct dsa_switch *ds,
603 				 const struct dsa_bridge *bridge)
604 {
605 	struct mxl862xx_priv *priv = ds->priv;
606 	u16 fw_id = priv->bridges[bridge->num];
607 	struct mxl862xx_bridge_alloc br_alloc = {
608 		.bridge_id = cpu_to_le16(fw_id),
609 	};
610 	int ret;
611 
612 	ret = MXL862XX_API_WRITE(priv, MXL862XX_BRIDGE_FREE, br_alloc);
613 	if (ret) {
614 		dev_err(ds->dev, "failed to free fw bridge %u: %pe\n",
615 			fw_id, ERR_PTR(ret));
616 		return;
617 	}
618 
619 	priv->bridges[bridge->num] = 0;
620 }
621 
mxl862xx_setup(struct dsa_switch * ds)622 static int mxl862xx_setup(struct dsa_switch *ds)
623 {
624 	struct mxl862xx_priv *priv = ds->priv;
625 	int n_user_ports = 0, max_vlans;
626 	int ingress_finals, vid_rules;
627 	struct dsa_port *dp;
628 	int ret;
629 
630 	ret = mxl862xx_reset(priv);
631 	if (ret)
632 		return ret;
633 
634 	ret = mxl862xx_wait_ready(ds);
635 	if (ret)
636 		return ret;
637 
638 	/* Calculate Extended VLAN block sizes.
639 	 * With VLAN Filter handling VID membership checks:
640 	 *   Ingress: only final catchall rules (PVID insertion, 802.1Q
641 	 *            accept, non-8021Q TPID handling, discard).
642 	 *            Block sized to exactly fit the finals -- no per-VID
643 	 *            ingress EVLAN rules are needed. (7 entries.)
644 	 *   Egress:  2 rules per VID that needs tag stripping (untagged VIDs).
645 	 *            No egress final catchalls -- VLAN Filter does the discard.
646 	 *   CPU:     EVLAN is left disabled on CPU ports -- frames pass
647 	 *            through without EVLAN processing.
648 	 *
649 	 * Total EVLAN budget:
650 	 *   n_user_ports * (ingress + egress) <= 1024.
651 	 * Ingress blocks are small (7 entries), so almost all capacity
652 	 * goes to egress VID rules.
653 	 */
654 	dsa_switch_for_each_user_port(dp, ds)
655 		n_user_ports++;
656 
657 	if (n_user_ports) {
658 		ingress_finals = ARRAY_SIZE(ingress_aware_final);
659 		vid_rules = ARRAY_SIZE(vid_accept_standard);
660 
661 		/* Ingress block: fixed at finals count (7 entries) */
662 		priv->evlan_ingress_size = ingress_finals;
663 
664 		/* Egress block: remaining budget divided equally among
665 		 * user ports. Each untagged VID needs vid_rules (2)
666 		 * EVLAN entries for tag stripping. Tagged-only VIDs
667 		 * need no EVLAN rules at all.
668 		 */
669 		max_vlans = (MXL862XX_TOTAL_EVLAN_ENTRIES -
670 			     n_user_ports * ingress_finals) /
671 			    (n_user_ports * vid_rules);
672 		priv->evlan_egress_size = vid_rules * max_vlans;
673 
674 		/* VLAN Filter block: one per user port. The 1024-entry
675 		 * table is divided equally among user ports. Each port
676 		 * gets its own VF block for per-port VID membership --
677 		 * discard_unmatched_tagged handles the rest.
678 		 */
679 		priv->vf_block_size = MXL862XX_TOTAL_VF_ENTRIES / n_user_ports;
680 	}
681 
682 	ret = mxl862xx_setup_drop_meter(ds);
683 	if (ret)
684 		return ret;
685 
686 	schedule_delayed_work(&priv->stats_work,
687 			      MXL862XX_STATS_POLL_INTERVAL);
688 
689 	return mxl862xx_setup_mdio(ds);
690 }
691 
mxl862xx_port_state(struct dsa_switch * ds,int port,bool enable)692 static int mxl862xx_port_state(struct dsa_switch *ds, int port, bool enable)
693 {
694 	struct mxl862xx_register_mod sdma = {
695 		.addr = cpu_to_le16(MXL862XX_SDMA_PCTRLP(port)),
696 		.data = cpu_to_le16(enable ? MXL862XX_SDMA_PCTRL_EN : 0),
697 		.mask = cpu_to_le16(MXL862XX_SDMA_PCTRL_EN),
698 	};
699 	struct mxl862xx_register_mod fdma = {
700 		.addr = cpu_to_le16(MXL862XX_FDMA_PCTRLP(port)),
701 		.data = cpu_to_le16(enable ? MXL862XX_FDMA_PCTRL_EN : 0),
702 		.mask = cpu_to_le16(MXL862XX_FDMA_PCTRL_EN),
703 	};
704 	int ret;
705 
706 	ret = MXL862XX_API_WRITE(ds->priv, MXL862XX_COMMON_REGISTERMOD, sdma);
707 	if (ret)
708 		return ret;
709 
710 	return MXL862XX_API_WRITE(ds->priv, MXL862XX_COMMON_REGISTERMOD, fdma);
711 }
712 
mxl862xx_port_enable(struct dsa_switch * ds,int port,struct phy_device * phydev)713 static int mxl862xx_port_enable(struct dsa_switch *ds, int port,
714 				struct phy_device *phydev)
715 {
716 	return mxl862xx_port_state(ds, port, true);
717 }
718 
mxl862xx_port_disable(struct dsa_switch * ds,int port)719 static void mxl862xx_port_disable(struct dsa_switch *ds, int port)
720 {
721 	if (mxl862xx_port_state(ds, port, false))
722 		dev_err(ds->dev, "failed to disable port %d\n", port);
723 }
724 
mxl862xx_port_fast_age(struct dsa_switch * ds,int port)725 static void mxl862xx_port_fast_age(struct dsa_switch *ds, int port)
726 {
727 	struct mxl862xx_mac_table_clear param = {
728 		.type = MXL862XX_MAC_CLEAR_PHY_PORT,
729 		.port_id = port,
730 	};
731 
732 	if (MXL862XX_API_WRITE(ds->priv, MXL862XX_MAC_TABLECLEARCOND, param))
733 		dev_err(ds->dev, "failed to clear fdb on port %d\n", port);
734 }
735 
mxl862xx_configure_ctp_port(struct dsa_switch * ds,int port,u16 first_ctp_port_id,u16 number_of_ctp_ports)736 static int mxl862xx_configure_ctp_port(struct dsa_switch *ds, int port,
737 				       u16 first_ctp_port_id,
738 				       u16 number_of_ctp_ports)
739 {
740 	struct mxl862xx_ctp_port_assignment ctp_assign = {
741 		.logical_port_id = port,
742 		.first_ctp_port_id = cpu_to_le16(first_ctp_port_id),
743 		.number_of_ctp_port = cpu_to_le16(number_of_ctp_ports),
744 		.mode = cpu_to_le32(MXL862XX_LOGICAL_PORT_ETHERNET),
745 	};
746 
747 	return MXL862XX_API_WRITE(ds->priv, MXL862XX_CTP_PORTASSIGNMENTSET,
748 				  ctp_assign);
749 }
750 
mxl862xx_configure_sp_tag_proto(struct dsa_switch * ds,int port,bool enable)751 static int mxl862xx_configure_sp_tag_proto(struct dsa_switch *ds, int port,
752 					   bool enable)
753 {
754 	struct mxl862xx_ss_sp_tag tag = {
755 		.pid = port,
756 		.mask = MXL862XX_SS_SP_TAG_MASK_RX | MXL862XX_SS_SP_TAG_MASK_TX,
757 		.rx = enable ? MXL862XX_SS_SP_TAG_RX_TAG_NO_INSERT :
758 			       MXL862XX_SS_SP_TAG_RX_NO_TAG_INSERT,
759 		.tx = enable ? MXL862XX_SS_SP_TAG_TX_TAG_NO_REMOVE :
760 			       MXL862XX_SS_SP_TAG_TX_TAG_REMOVE,
761 	};
762 
763 	return MXL862XX_API_WRITE(ds->priv, MXL862XX_SS_SPTAG_SET, tag);
764 }
765 
mxl862xx_evlan_write_rule(struct mxl862xx_priv * priv,u16 block_id,u16 entry_index,const struct mxl862xx_evlan_rule_desc * desc,u16 vid,bool untagged,u16 pvid)766 static int mxl862xx_evlan_write_rule(struct mxl862xx_priv *priv,
767 				     u16 block_id, u16 entry_index,
768 				     const struct mxl862xx_evlan_rule_desc *desc,
769 				     u16 vid, bool untagged, u16 pvid)
770 {
771 	struct mxl862xx_extendedvlan_config cfg = {};
772 	struct mxl862xx_extendedvlan_filter_vlan *fv;
773 
774 	cfg.extended_vlan_block_id = cpu_to_le16(block_id);
775 	cfg.entry_index = cpu_to_le16(entry_index);
776 
777 	/* Populate filter */
778 	cfg.filter.outer_vlan.type = cpu_to_le32(desc->outer_type);
779 	cfg.filter.inner_vlan.type = cpu_to_le32(desc->inner_type);
780 	cfg.filter.outer_vlan.tpid = cpu_to_le32(desc->outer_tpid);
781 	cfg.filter.inner_vlan.tpid = cpu_to_le32(desc->inner_tpid);
782 
783 	if (desc->match_vid) {
784 		/* For egress unaware: outer=NO_FILTER, match on inner tag */
785 		if (desc->outer_type == FT_NO_FILTER)
786 			fv = &cfg.filter.inner_vlan;
787 		else
788 			fv = &cfg.filter.outer_vlan;
789 
790 		fv->vid_enable = 1;
791 		fv->vid_val = cpu_to_le32(vid);
792 	}
793 
794 	/* Populate treatment based on action */
795 	switch (desc->action) {
796 	case EVLAN_ACCEPT:
797 		cfg.treatment.remove_tag =
798 			cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_NOT_REMOVE_TAG);
799 		break;
800 
801 	case EVLAN_STRIP_IF_UNTAGGED:
802 		cfg.treatment.remove_tag = cpu_to_le32(untagged ?
803 			MXL862XX_EXTENDEDVLAN_TREATMENT_REMOVE_1_TAG :
804 			MXL862XX_EXTENDEDVLAN_TREATMENT_NOT_REMOVE_TAG);
805 		break;
806 
807 	case EVLAN_PVID_OR_DISCARD:
808 		if (pvid) {
809 			cfg.treatment.remove_tag =
810 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_NOT_REMOVE_TAG);
811 			cfg.treatment.add_outer_vlan = 1;
812 			cfg.treatment.outer_vlan.vid_mode =
813 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_VID_VAL);
814 			cfg.treatment.outer_vlan.vid_val = cpu_to_le32(pvid);
815 			cfg.treatment.outer_vlan.tpid =
816 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_8021Q);
817 		} else {
818 			cfg.treatment.remove_tag =
819 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_DISCARD_UPSTREAM);
820 		}
821 		break;
822 
823 	case EVLAN_STRIP1_AND_PVID_OR_DISCARD:
824 		if (pvid) {
825 			cfg.treatment.remove_tag =
826 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_REMOVE_1_TAG);
827 			cfg.treatment.add_outer_vlan = 1;
828 			cfg.treatment.outer_vlan.vid_mode =
829 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_VID_VAL);
830 			cfg.treatment.outer_vlan.vid_val = cpu_to_le32(pvid);
831 			cfg.treatment.outer_vlan.tpid =
832 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_8021Q);
833 		} else {
834 			cfg.treatment.remove_tag =
835 				cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_DISCARD_UPSTREAM);
836 		}
837 		break;
838 	}
839 
840 	return MXL862XX_API_WRITE(priv, MXL862XX_EXTENDEDVLAN_SET, cfg);
841 }
842 
mxl862xx_evlan_deactivate_entry(struct mxl862xx_priv * priv,u16 block_id,u16 entry_index)843 static int mxl862xx_evlan_deactivate_entry(struct mxl862xx_priv *priv,
844 					   u16 block_id, u16 entry_index)
845 {
846 	struct mxl862xx_extendedvlan_config cfg = {};
847 
848 	cfg.extended_vlan_block_id = cpu_to_le16(block_id);
849 	cfg.entry_index = cpu_to_le16(entry_index);
850 
851 	/* Use an unreachable filter (DEFAULT+DEFAULT) with DISCARD treatment.
852 	 * A zeroed entry would have NORMAL+NORMAL filter which matches
853 	 * real double-tagged traffic and passes it through.
854 	 */
855 	cfg.filter.outer_vlan.type =
856 		cpu_to_le32(MXL862XX_EXTENDEDVLAN_FILTER_TYPE_DEFAULT);
857 	cfg.filter.inner_vlan.type =
858 		cpu_to_le32(MXL862XX_EXTENDEDVLAN_FILTER_TYPE_DEFAULT);
859 	cfg.treatment.remove_tag =
860 		cpu_to_le32(MXL862XX_EXTENDEDVLAN_TREATMENT_DISCARD_UPSTREAM);
861 
862 	return MXL862XX_API_WRITE(priv, MXL862XX_EXTENDEDVLAN_SET, cfg);
863 }
864 
mxl862xx_evlan_write_final_rules(struct mxl862xx_priv * priv,struct mxl862xx_evlan_block * blk,const struct mxl862xx_evlan_rule_desc * rules,int n_rules,u16 pvid)865 static int mxl862xx_evlan_write_final_rules(struct mxl862xx_priv *priv,
866 					    struct mxl862xx_evlan_block *blk,
867 					    const struct mxl862xx_evlan_rule_desc *rules,
868 					    int n_rules, u16 pvid)
869 {
870 	u16 start_idx = blk->block_size - n_rules;
871 	int i, ret;
872 
873 	for (i = 0; i < n_rules; i++) {
874 		ret = mxl862xx_evlan_write_rule(priv, blk->block_id,
875 						start_idx + i, &rules[i],
876 						0, false, pvid);
877 		if (ret)
878 			return ret;
879 	}
880 
881 	return 0;
882 }
883 
mxl862xx_vf_entry_set(struct mxl862xx_priv * priv,u16 block_id,u16 index,u16 vid)884 static int mxl862xx_vf_entry_set(struct mxl862xx_priv *priv,
885 				 u16 block_id, u16 index, u16 vid)
886 {
887 	struct mxl862xx_vlanfilter_config cfg = {};
888 
889 	cfg.vlan_filter_block_id = cpu_to_le16(block_id);
890 	cfg.entry_index = cpu_to_le16(index);
891 	cfg.vlan_filter_mask = cpu_to_le32(MXL862XX_VLAN_FILTER_TCI_MASK_VID);
892 	cfg.val = cpu_to_le32(vid);
893 	cfg.discard_matched = 0;
894 
895 	return MXL862XX_API_WRITE(priv, MXL862XX_VLANFILTER_SET, cfg);
896 }
897 
mxl862xx_vf_find_vid(struct mxl862xx_vf_block * vf,u16 vid)898 static struct mxl862xx_vf_vid *mxl862xx_vf_find_vid(struct mxl862xx_vf_block *vf,
899 						    u16 vid)
900 {
901 	struct mxl862xx_vf_vid *ve;
902 
903 	list_for_each_entry(ve, &vf->vids, list)
904 		if (ve->vid == vid)
905 			return ve;
906 
907 	return NULL;
908 }
909 
mxl862xx_vf_add_vid(struct mxl862xx_priv * priv,struct mxl862xx_vf_block * vf,u16 vid,bool untagged)910 static int mxl862xx_vf_add_vid(struct mxl862xx_priv *priv,
911 			       struct mxl862xx_vf_block *vf,
912 			       u16 vid, bool untagged)
913 {
914 	struct mxl862xx_vf_vid *ve;
915 	int ret;
916 
917 	ve = mxl862xx_vf_find_vid(vf, vid);
918 	if (ve) {
919 		ve->untagged = untagged;
920 		return 0;
921 	}
922 
923 	if (vf->active_count >= vf->block_size)
924 		return -ENOSPC;
925 
926 	ve = kzalloc_obj(*ve);
927 	if (!ve)
928 		return -ENOMEM;
929 
930 	ve->vid = vid;
931 	ve->index = vf->active_count;
932 	ve->untagged = untagged;
933 
934 	ret = mxl862xx_vf_entry_set(priv, vf->block_id, ve->index, vid);
935 	if (ret) {
936 		kfree(ve);
937 		return ret;
938 	}
939 
940 	list_add_tail(&ve->list, &vf->vids);
941 	vf->active_count++;
942 
943 	return 0;
944 }
945 
mxl862xx_vf_del_vid(struct mxl862xx_priv * priv,struct mxl862xx_vf_block * vf,u16 vid)946 static int mxl862xx_vf_del_vid(struct mxl862xx_priv *priv,
947 			       struct mxl862xx_vf_block *vf, u16 vid)
948 {
949 	struct mxl862xx_vf_vid *ve, *last_ve;
950 	u16 gap, last;
951 	int ret;
952 
953 	ve = mxl862xx_vf_find_vid(vf, vid);
954 	if (!ve)
955 		return 0;
956 
957 	if (!vf->allocated) {
958 		/* Software-only state -- just remove the tracking entry */
959 		list_del(&ve->list);
960 		kfree(ve);
961 		vf->active_count--;
962 		return 0;
963 	}
964 
965 	gap = ve->index;
966 	last = vf->active_count - 1;
967 
968 	if (vf->active_count == 1) {
969 		/* Last VID -- restore DISCARD sentinel at index 0 */
970 		ret = mxl862xx_vf_entry_discard(priv, vf->block_id, 0);
971 		if (ret)
972 			return ret;
973 	} else if (gap < last) {
974 		/* Swap: move the last ALLOW entry into the gap */
975 		list_for_each_entry(last_ve, &vf->vids, list)
976 			if (last_ve->index == last)
977 				break;
978 
979 		if (WARN_ON(list_entry_is_head(last_ve, &vf->vids, list)))
980 			return -EINVAL;
981 
982 		ret = mxl862xx_vf_entry_set(priv, vf->block_id,
983 					    gap, last_ve->vid);
984 		if (ret)
985 			return ret;
986 
987 		last_ve->index = gap;
988 	}
989 
990 	list_del(&ve->list);
991 	kfree(ve);
992 	vf->active_count--;
993 
994 	return 0;
995 }
996 
mxl862xx_evlan_program_ingress(struct mxl862xx_priv * priv,int port)997 static int mxl862xx_evlan_program_ingress(struct mxl862xx_priv *priv, int port)
998 {
999 	struct mxl862xx_port *p = &priv->ports[port];
1000 	struct mxl862xx_evlan_block *blk = &p->ingress_evlan;
1001 
1002 	if (!p->vlan_filtering)
1003 		return 0;
1004 
1005 	blk->in_use = true;
1006 	blk->n_active = blk->block_size;
1007 
1008 	return mxl862xx_evlan_write_final_rules(priv, blk,
1009 						ingress_aware_final,
1010 						ARRAY_SIZE(ingress_aware_final),
1011 						p->pvid);
1012 }
1013 
mxl862xx_evlan_program_egress(struct mxl862xx_priv * priv,int port)1014 static int mxl862xx_evlan_program_egress(struct mxl862xx_priv *priv, int port)
1015 {
1016 	struct mxl862xx_port *p = &priv->ports[port];
1017 	struct mxl862xx_evlan_block *blk = &p->egress_evlan;
1018 	const struct mxl862xx_evlan_rule_desc *vid_rules;
1019 	struct mxl862xx_vf_vid *vfv;
1020 	u16 old_active = blk->n_active;
1021 	u16 idx = 0, i;
1022 	int n_vid, ret;
1023 
1024 	if (p->vlan_filtering) {
1025 		vid_rules = vid_accept_standard;
1026 		n_vid = ARRAY_SIZE(vid_accept_standard);
1027 	} else {
1028 		vid_rules = vid_accept_egress_unaware;
1029 		n_vid = ARRAY_SIZE(vid_accept_egress_unaware);
1030 	}
1031 
1032 	list_for_each_entry(vfv, &p->vf.vids, list) {
1033 		if (!vfv->untagged)
1034 			continue;
1035 
1036 		if (idx + n_vid > blk->block_size)
1037 			return -ENOSPC;
1038 
1039 		ret = mxl862xx_evlan_write_rule(priv, blk->block_id,
1040 						idx++, &vid_rules[0],
1041 						vfv->vid, vfv->untagged,
1042 						p->pvid);
1043 		if (ret)
1044 			return ret;
1045 
1046 		if (n_vid > 1) {
1047 			ret = mxl862xx_evlan_write_rule(priv, blk->block_id,
1048 							idx++, &vid_rules[1],
1049 							vfv->vid,
1050 							vfv->untagged,
1051 							p->pvid);
1052 			if (ret)
1053 				return ret;
1054 		}
1055 	}
1056 
1057 	/* Deactivate stale entries that are no longer needed.
1058 	 * This closes the brief window between writing the new rules
1059 	 * and set_bridge_port narrowing the scan window.
1060 	 */
1061 	for (i = idx; i < old_active; i++) {
1062 		ret = mxl862xx_evlan_deactivate_entry(priv,
1063 						      blk->block_id,
1064 						      i);
1065 		if (ret)
1066 			return ret;
1067 	}
1068 
1069 	blk->n_active = idx;
1070 	blk->in_use = idx > 0;
1071 
1072 	return 0;
1073 }
1074 
mxl862xx_port_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)1075 static int mxl862xx_port_vlan_filtering(struct dsa_switch *ds, int port,
1076 					bool vlan_filtering,
1077 					struct netlink_ext_ack *extack)
1078 {
1079 	struct mxl862xx_priv *priv = ds->priv;
1080 	struct mxl862xx_port *p = &priv->ports[port];
1081 	bool old_vlan_filtering = p->vlan_filtering;
1082 	bool old_in_use = p->ingress_evlan.in_use;
1083 	bool changed = (p->vlan_filtering != vlan_filtering);
1084 	int ret;
1085 
1086 	p->vlan_filtering = vlan_filtering;
1087 
1088 	if (changed) {
1089 		/* When leaving VLAN-aware mode, release the ingress HW
1090 		 * block. The firmware passes frames through unchanged
1091 		 * when no ingress EVLAN block is assigned, so the block
1092 		 * is unnecessary in unaware mode.
1093 		 */
1094 		if (!vlan_filtering)
1095 			p->ingress_evlan.in_use = false;
1096 
1097 		ret = mxl862xx_evlan_program_ingress(priv, port);
1098 		if (ret)
1099 			goto err_restore;
1100 
1101 		ret = mxl862xx_evlan_program_egress(priv, port);
1102 		if (ret)
1103 			goto err_restore;
1104 	}
1105 
1106 	return mxl862xx_set_bridge_port(ds, port);
1107 
1108 	/* No HW rollback -- restoring SW state is sufficient for a correct retry. */
1109 err_restore:
1110 	p->vlan_filtering = old_vlan_filtering;
1111 	p->ingress_evlan.in_use = old_in_use;
1112 	return ret;
1113 }
1114 
mxl862xx_port_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1115 static int mxl862xx_port_vlan_add(struct dsa_switch *ds, int port,
1116 				  const struct switchdev_obj_port_vlan *vlan,
1117 				  struct netlink_ext_ack *extack)
1118 {
1119 	struct mxl862xx_priv *priv = ds->priv;
1120 	struct mxl862xx_port *p = &priv->ports[port];
1121 	bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
1122 	u16 vid = vlan->vid;
1123 	u16 old_pvid = p->pvid;
1124 	bool pvid_changed = false;
1125 	int ret;
1126 
1127 	/* CPU port is VLAN-transparent: the SP tag handles port
1128 	 * identification and the host-side DSA tagger manages VLAN
1129 	 * delivery. Egress EVLAN catchalls are set up once in
1130 	 * setup_cpu_bridge; no per-VID VF/EVLAN programming needed.
1131 	 */
1132 	if (dsa_is_cpu_port(ds, port))
1133 		return 0;
1134 
1135 	/* Update PVID tracking */
1136 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1137 		if (p->pvid != vid) {
1138 			p->pvid = vid;
1139 			pvid_changed = true;
1140 		}
1141 	} else if (p->pvid == vid) {
1142 		p->pvid = 0;
1143 		pvid_changed = true;
1144 	}
1145 
1146 	/* Add/update VID in this port's VLAN Filter block.
1147 	 * VF must be updated before programming egress EVLAN because
1148 	 * evlan_program_egress walks the VF VID list.
1149 	 */
1150 	ret = mxl862xx_vf_add_vid(priv, &p->vf, vid, untagged);
1151 	if (ret)
1152 		goto err_pvid;
1153 
1154 	/* Reprogram ingress finals if PVID changed */
1155 	if (pvid_changed) {
1156 		ret = mxl862xx_evlan_program_ingress(priv, port);
1157 		if (ret)
1158 			goto err_rollback;
1159 	}
1160 
1161 	/* Reprogram egress tag-stripping rules (walks VF VID list) */
1162 	ret = mxl862xx_evlan_program_egress(priv, port);
1163 	if (ret)
1164 		goto err_rollback;
1165 
1166 	/* Apply VLAN block IDs and MAC learning flags to bridge port */
1167 	ret = mxl862xx_set_bridge_port(ds, port);
1168 	if (ret)
1169 		goto err_rollback;
1170 
1171 	return 0;
1172 
1173 err_rollback:
1174 	/* Best-effort: undo VF add and restore consistent hardware state.
1175 	 * A retry of port_vlan_add will converge since vf_add_vid is
1176 	 * idempotent.
1177 	 */
1178 	p->pvid = old_pvid;
1179 	mxl862xx_vf_del_vid(priv, &p->vf, vid);
1180 	mxl862xx_evlan_program_ingress(priv, port);
1181 	mxl862xx_evlan_program_egress(priv, port);
1182 	mxl862xx_set_bridge_port(ds, port);
1183 	return ret;
1184 err_pvid:
1185 	p->pvid = old_pvid;
1186 	return ret;
1187 }
1188 
mxl862xx_port_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1189 static int mxl862xx_port_vlan_del(struct dsa_switch *ds, int port,
1190 				  const struct switchdev_obj_port_vlan *vlan)
1191 {
1192 	struct mxl862xx_priv *priv = ds->priv;
1193 	struct mxl862xx_port *p = &priv->ports[port];
1194 	struct mxl862xx_vf_vid *ve;
1195 	bool pvid_changed = false;
1196 	u16 vid = vlan->vid;
1197 	bool old_untagged;
1198 	u16 old_pvid;
1199 	int ret;
1200 
1201 	if (dsa_is_cpu_port(ds, port))
1202 		return 0;
1203 
1204 	ve = mxl862xx_vf_find_vid(&p->vf, vid);
1205 	if (!ve)
1206 		return 0;
1207 	old_untagged = ve->untagged;
1208 	old_pvid = p->pvid;
1209 
1210 	/* Clear PVID if we're deleting it */
1211 	if (p->pvid == vid) {
1212 		p->pvid = 0;
1213 		pvid_changed = true;
1214 	}
1215 
1216 	/* Remove VID from this port's VLAN Filter block.
1217 	 * Must happen before egress reprogram so the VID is no
1218 	 * longer in the list that evlan_program_egress walks.
1219 	 */
1220 	ret = mxl862xx_vf_del_vid(priv, &p->vf, vid);
1221 	if (ret)
1222 		goto err_pvid;
1223 
1224 	/* Reprogram egress tag-stripping rules (VID is now gone) */
1225 	ret = mxl862xx_evlan_program_egress(priv, port);
1226 	if (ret)
1227 		goto err_rollback;
1228 
1229 	/* If PVID changed, reprogram ingress finals */
1230 	if (pvid_changed) {
1231 		ret = mxl862xx_evlan_program_ingress(priv, port);
1232 		if (ret)
1233 			goto err_rollback;
1234 	}
1235 
1236 	ret = mxl862xx_set_bridge_port(ds, port);
1237 	if (ret)
1238 		goto err_rollback;
1239 
1240 	return 0;
1241 
1242 err_rollback:
1243 	/* Best-effort: re-add the VID and restore consistent hardware
1244 	 * state. A retry of port_vlan_del will converge.
1245 	 */
1246 	p->pvid = old_pvid;
1247 	mxl862xx_vf_add_vid(priv, &p->vf, vid, old_untagged);
1248 	mxl862xx_evlan_program_egress(priv, port);
1249 	mxl862xx_evlan_program_ingress(priv, port);
1250 	mxl862xx_set_bridge_port(ds, port);
1251 	return ret;
1252 err_pvid:
1253 	p->pvid = old_pvid;
1254 	return ret;
1255 }
1256 
mxl862xx_setup_cpu_bridge(struct dsa_switch * ds,int port)1257 static int mxl862xx_setup_cpu_bridge(struct dsa_switch *ds, int port)
1258 {
1259 	struct mxl862xx_priv *priv = ds->priv;
1260 	struct mxl862xx_port *p = &priv->ports[port];
1261 
1262 	p->fid = MXL862XX_DEFAULT_BRIDGE;
1263 	p->learning = true;
1264 
1265 	/* EVLAN is left disabled on CPU ports -- frames pass through
1266 	 * without EVLAN processing. Only the portmap and bridge
1267 	 * assignment need to be configured.
1268 	 */
1269 
1270 	return mxl862xx_set_bridge_port(ds, port);
1271 }
1272 
mxl862xx_port_bridge_join(struct dsa_switch * ds,int port,const struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)1273 static int mxl862xx_port_bridge_join(struct dsa_switch *ds, int port,
1274 				     const struct dsa_bridge bridge,
1275 				     bool *tx_fwd_offload,
1276 				     struct netlink_ext_ack *extack)
1277 {
1278 	struct mxl862xx_priv *priv = ds->priv;
1279 	int ret;
1280 
1281 	if (!priv->bridges[bridge.num]) {
1282 		ret = mxl862xx_allocate_bridge(priv);
1283 		if (ret < 0)
1284 			return ret;
1285 
1286 		priv->bridges[bridge.num] = ret;
1287 
1288 		/* Free bridge here on error, DSA rollback won't. */
1289 		ret = mxl862xx_sync_bridge_members(ds, &bridge);
1290 		if (ret) {
1291 			mxl862xx_free_bridge(ds, &bridge);
1292 			return ret;
1293 		}
1294 
1295 		return 0;
1296 	}
1297 
1298 	return mxl862xx_sync_bridge_members(ds, &bridge);
1299 }
1300 
mxl862xx_port_bridge_leave(struct dsa_switch * ds,int port,const struct dsa_bridge bridge)1301 static void mxl862xx_port_bridge_leave(struct dsa_switch *ds, int port,
1302 				       const struct dsa_bridge bridge)
1303 {
1304 	struct mxl862xx_priv *priv = ds->priv;
1305 	struct mxl862xx_port *p = &priv->ports[port];
1306 	int err;
1307 
1308 	err = mxl862xx_sync_bridge_members(ds, &bridge);
1309 	if (err)
1310 		dev_err(ds->dev,
1311 			"failed to sync bridge members after port %d left: %pe\n",
1312 			port, ERR_PTR(err));
1313 
1314 	/* Revert leaving port, omitted by the sync above, to its
1315 	 * single-port bridge
1316 	 */
1317 	p->pvid = 0;
1318 	p->ingress_evlan.in_use = false;
1319 	p->egress_evlan.in_use = false;
1320 
1321 	err = mxl862xx_set_bridge_port(ds, port);
1322 	if (err)
1323 		dev_err(ds->dev,
1324 			"failed to update bridge port %d state: %pe\n", port,
1325 			ERR_PTR(err));
1326 
1327 	if (!dsa_bridge_ports(ds, bridge.dev))
1328 		mxl862xx_free_bridge(ds, &bridge);
1329 }
1330 
mxl862xx_port_setup(struct dsa_switch * ds,int port)1331 static int mxl862xx_port_setup(struct dsa_switch *ds, int port)
1332 {
1333 	struct mxl862xx_priv *priv = ds->priv;
1334 	struct dsa_port *dp = dsa_to_port(ds, port);
1335 	bool is_cpu_port = dsa_port_is_cpu(dp);
1336 	int ret;
1337 
1338 	ret = mxl862xx_port_state(ds, port, false);
1339 	if (ret)
1340 		return ret;
1341 
1342 	mxl862xx_port_fast_age(ds, port);
1343 
1344 	if (dsa_port_is_unused(dp))
1345 		return 0;
1346 
1347 	if (dsa_port_is_dsa(dp)) {
1348 		dev_err(ds->dev, "port %d: DSA links not supported\n", port);
1349 		return -EOPNOTSUPP;
1350 	}
1351 
1352 	ret = mxl862xx_configure_sp_tag_proto(ds, port, is_cpu_port);
1353 	if (ret)
1354 		return ret;
1355 
1356 	ret = mxl862xx_configure_ctp_port(ds, port, port,
1357 					  is_cpu_port ? 32 - port : 1);
1358 	if (ret)
1359 		return ret;
1360 
1361 	if (is_cpu_port)
1362 		return mxl862xx_setup_cpu_bridge(ds, port);
1363 
1364 	/* setup single-port bridge for user ports.
1365 	 * If this fails, the FID is leaked -- but the port then transitions
1366 	 * to unused, and the FID pool is sized to tolerate this.
1367 	 */
1368 	ret = mxl862xx_allocate_bridge(priv);
1369 	if (ret < 0) {
1370 		dev_err(ds->dev, "failed to allocate a bridge for port %d\n", port);
1371 		return ret;
1372 	}
1373 	priv->ports[port].fid = ret;
1374 	/* Standalone ports should not flood unknown unicast or multicast
1375 	 * towards the CPU by default; only broadcast is needed initially.
1376 	 */
1377 	ret = mxl862xx_bridge_config_fwd(ds, priv->ports[port].fid,
1378 					 false, false, true);
1379 	if (ret)
1380 		return ret;
1381 	ret = mxl862xx_set_bridge_port(ds, port);
1382 	if (ret)
1383 		return ret;
1384 
1385 	priv->ports[port].ingress_evlan.block_size = priv->evlan_ingress_size;
1386 	ret = mxl862xx_evlan_block_alloc(priv, &priv->ports[port].ingress_evlan);
1387 	if (ret)
1388 		return ret;
1389 
1390 	priv->ports[port].egress_evlan.block_size = priv->evlan_egress_size;
1391 	ret = mxl862xx_evlan_block_alloc(priv, &priv->ports[port].egress_evlan);
1392 	if (ret)
1393 		return ret;
1394 
1395 	priv->ports[port].vf.block_size = priv->vf_block_size;
1396 	INIT_LIST_HEAD(&priv->ports[port].vf.vids);
1397 	ret = mxl862xx_vf_alloc(priv, &priv->ports[port].vf);
1398 	if (ret)
1399 		return ret;
1400 
1401 	priv->ports[port].setup_done = true;
1402 
1403 	return 0;
1404 }
1405 
mxl862xx_port_teardown(struct dsa_switch * ds,int port)1406 static void mxl862xx_port_teardown(struct dsa_switch *ds, int port)
1407 {
1408 	struct mxl862xx_priv *priv = ds->priv;
1409 	struct dsa_port *dp = dsa_to_port(ds, port);
1410 
1411 	if (dsa_port_is_unused(dp))
1412 		return;
1413 
1414 	/* Prevent deferred host_flood_work from acting on stale state.
1415 	 * The flag is checked under rtnl_lock() by the worker; since
1416 	 * teardown also runs under RTNL, this is race-free.
1417 	 *
1418 	 * HW EVLAN/VF blocks are not freed here -- the firmware receives
1419 	 * a full reset on the next probe, which reclaims all resources.
1420 	 */
1421 	priv->ports[port].setup_done = false;
1422 }
1423 
mxl862xx_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)1424 static void mxl862xx_phylink_get_caps(struct dsa_switch *ds, int port,
1425 				      struct phylink_config *config)
1426 {
1427 	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 |
1428 				   MAC_100 | MAC_1000 | MAC_2500FD;
1429 
1430 	__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1431 		  config->supported_interfaces);
1432 }
1433 
mxl862xx_get_fid(struct dsa_switch * ds,struct dsa_db db)1434 static int mxl862xx_get_fid(struct dsa_switch *ds, struct dsa_db db)
1435 {
1436 	struct mxl862xx_priv *priv = ds->priv;
1437 
1438 	switch (db.type) {
1439 	case DSA_DB_PORT:
1440 		return priv->ports[db.dp->index].fid;
1441 
1442 	case DSA_DB_BRIDGE:
1443 		if (!priv->bridges[db.bridge.num])
1444 			return -ENOENT;
1445 		return priv->bridges[db.bridge.num];
1446 
1447 	default:
1448 		return -EOPNOTSUPP;
1449 	}
1450 }
1451 
mxl862xx_port_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1452 static int mxl862xx_port_fdb_add(struct dsa_switch *ds, int port,
1453 				 const unsigned char *addr, u16 vid, struct dsa_db db)
1454 {
1455 	struct mxl862xx_mac_table_add param = {};
1456 	int fid = mxl862xx_get_fid(ds, db), ret;
1457 	struct mxl862xx_priv *priv = ds->priv;
1458 
1459 	if (fid < 0)
1460 		return fid;
1461 
1462 	param.port_id = cpu_to_le32(port);
1463 	param.static_entry = true;
1464 	param.fid = cpu_to_le16(fid);
1465 	param.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, vid));
1466 	ether_addr_copy(param.mac, addr);
1467 
1468 	ret = MXL862XX_API_WRITE(priv, MXL862XX_MAC_TABLEENTRYADD, param);
1469 	if (ret)
1470 		dev_err(ds->dev, "failed to add FDB entry on port %d\n", port);
1471 
1472 	return ret;
1473 }
1474 
mxl862xx_port_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,const struct dsa_db db)1475 static int mxl862xx_port_fdb_del(struct dsa_switch *ds, int port,
1476 				 const unsigned char *addr, u16 vid, const struct dsa_db db)
1477 {
1478 	struct mxl862xx_mac_table_remove param = {};
1479 	int fid = mxl862xx_get_fid(ds, db), ret;
1480 	struct mxl862xx_priv *priv = ds->priv;
1481 
1482 	if (fid < 0)
1483 		return fid;
1484 
1485 	param.fid = cpu_to_le16(fid);
1486 	param.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, vid));
1487 	ether_addr_copy(param.mac, addr);
1488 
1489 	ret = MXL862XX_API_WRITE(priv, MXL862XX_MAC_TABLEENTRYREMOVE, param);
1490 	if (ret)
1491 		dev_err(ds->dev, "failed to remove FDB entry on port %d\n", port);
1492 
1493 	return ret;
1494 }
1495 
mxl862xx_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1496 static int mxl862xx_port_fdb_dump(struct dsa_switch *ds, int port,
1497 				  dsa_fdb_dump_cb_t *cb, void *data)
1498 {
1499 	struct mxl862xx_mac_table_read param = { .initial = 1 };
1500 	struct mxl862xx_priv *priv = ds->priv;
1501 	u32 entry_port_id;
1502 	int ret;
1503 
1504 	while (true) {
1505 		ret = MXL862XX_API_READ(priv, MXL862XX_MAC_TABLEENTRYREAD, param);
1506 		if (ret)
1507 			return ret;
1508 
1509 		if (param.last)
1510 			break;
1511 
1512 		entry_port_id = le32_to_cpu(param.port_id);
1513 
1514 		if (entry_port_id == port) {
1515 			ret = cb(param.mac, FIELD_GET(MXL862XX_TCI_VLAN_ID,
1516 						      le16_to_cpu(param.tci)),
1517 				 param.static_entry, data);
1518 			if (ret)
1519 				return ret;
1520 		}
1521 
1522 		memset(&param, 0, sizeof(param));
1523 	}
1524 
1525 	return 0;
1526 }
1527 
mxl862xx_port_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,const struct dsa_db db)1528 static int mxl862xx_port_mdb_add(struct dsa_switch *ds, int port,
1529 				 const struct switchdev_obj_port_mdb *mdb,
1530 				 const struct dsa_db db)
1531 {
1532 	struct mxl862xx_mac_table_query qparam = {};
1533 	struct mxl862xx_mac_table_add aparam = {};
1534 	struct mxl862xx_priv *priv = ds->priv;
1535 	int fid, ret;
1536 
1537 	fid = mxl862xx_get_fid(ds, db);
1538 	if (fid < 0)
1539 		return fid;
1540 
1541 	ether_addr_copy(qparam.mac, mdb->addr);
1542 	qparam.fid = cpu_to_le16(fid);
1543 	qparam.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, mdb->vid));
1544 
1545 	ret = MXL862XX_API_READ(priv, MXL862XX_MAC_TABLEENTRYQUERY, qparam);
1546 	if (ret)
1547 		return ret;
1548 
1549 	/* Build the ADD command using portmap mode */
1550 	ether_addr_copy(aparam.mac, mdb->addr);
1551 	aparam.fid = cpu_to_le16(fid);
1552 	aparam.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, mdb->vid));
1553 	aparam.static_entry = true;
1554 	aparam.port_id = cpu_to_le32(MXL862XX_PORTMAP_FLAG);
1555 
1556 	if (qparam.found)
1557 		memcpy(aparam.port_map, qparam.port_map,
1558 		       sizeof(aparam.port_map));
1559 
1560 	mxl862xx_fw_portmap_set_bit(aparam.port_map, port);
1561 
1562 	return MXL862XX_API_WRITE(priv, MXL862XX_MAC_TABLEENTRYADD, aparam);
1563 }
1564 
mxl862xx_port_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,const struct dsa_db db)1565 static int mxl862xx_port_mdb_del(struct dsa_switch *ds, int port,
1566 				 const struct switchdev_obj_port_mdb *mdb,
1567 				 const struct dsa_db db)
1568 {
1569 	struct mxl862xx_mac_table_remove rparam = {};
1570 	struct mxl862xx_mac_table_query qparam = {};
1571 	struct mxl862xx_mac_table_add aparam = {};
1572 	int fid = mxl862xx_get_fid(ds, db), ret;
1573 	struct mxl862xx_priv *priv = ds->priv;
1574 
1575 	if (fid < 0)
1576 		return fid;
1577 
1578 	qparam.fid = cpu_to_le16(fid);
1579 	qparam.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, mdb->vid));
1580 	ether_addr_copy(qparam.mac, mdb->addr);
1581 
1582 	ret = MXL862XX_API_READ(priv, MXL862XX_MAC_TABLEENTRYQUERY, qparam);
1583 	if (ret)
1584 		return ret;
1585 
1586 	if (!qparam.found)
1587 		return 0;
1588 
1589 	mxl862xx_fw_portmap_clear_bit(qparam.port_map, port);
1590 
1591 	if (mxl862xx_fw_portmap_is_empty(qparam.port_map)) {
1592 		rparam.fid = cpu_to_le16(fid);
1593 		rparam.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, mdb->vid));
1594 		ether_addr_copy(rparam.mac, mdb->addr);
1595 		ret = MXL862XX_API_WRITE(priv, MXL862XX_MAC_TABLEENTRYREMOVE, rparam);
1596 	} else {
1597 		/* Write back with reduced portmap */
1598 		aparam.fid = cpu_to_le16(fid);
1599 		aparam.tci = cpu_to_le16(FIELD_PREP(MXL862XX_TCI_VLAN_ID, mdb->vid));
1600 		ether_addr_copy(aparam.mac, mdb->addr);
1601 		aparam.static_entry = true;
1602 		aparam.port_id = cpu_to_le32(MXL862XX_PORTMAP_FLAG);
1603 		memcpy(aparam.port_map, qparam.port_map, sizeof(aparam.port_map));
1604 		ret = MXL862XX_API_WRITE(priv, MXL862XX_MAC_TABLEENTRYADD, aparam);
1605 	}
1606 
1607 	return ret;
1608 }
1609 
mxl862xx_set_ageing_time(struct dsa_switch * ds,unsigned int msecs)1610 static int mxl862xx_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
1611 {
1612 	struct mxl862xx_cfg param = {};
1613 	int ret;
1614 
1615 	ret = MXL862XX_API_READ(ds->priv, MXL862XX_COMMON_CFGGET, param);
1616 	if (ret) {
1617 		dev_err(ds->dev, "failed to read switch config\n");
1618 		return ret;
1619 	}
1620 
1621 	param.mac_table_age_timer = cpu_to_le32(MXL862XX_AGETIMER_CUSTOM);
1622 	param.age_timer = cpu_to_le32(msecs / 1000);
1623 	ret = MXL862XX_API_WRITE(ds->priv, MXL862XX_COMMON_CFGSET, param);
1624 	if (ret)
1625 		dev_err(ds->dev, "failed to set ageing\n");
1626 
1627 	return ret;
1628 }
1629 
mxl862xx_port_stp_state_set(struct dsa_switch * ds,int port,u8 state)1630 static void mxl862xx_port_stp_state_set(struct dsa_switch *ds, int port,
1631 					u8 state)
1632 {
1633 	struct mxl862xx_stp_port_cfg param = {
1634 		.port_id = cpu_to_le16(port),
1635 	};
1636 	struct mxl862xx_priv *priv = ds->priv;
1637 	int ret;
1638 
1639 	switch (state) {
1640 	case BR_STATE_DISABLED:
1641 		param.port_state = cpu_to_le32(MXL862XX_STP_PORT_STATE_DISABLE);
1642 		break;
1643 	case BR_STATE_BLOCKING:
1644 	case BR_STATE_LISTENING:
1645 		param.port_state = cpu_to_le32(MXL862XX_STP_PORT_STATE_BLOCKING);
1646 		break;
1647 	case BR_STATE_LEARNING:
1648 		param.port_state = cpu_to_le32(MXL862XX_STP_PORT_STATE_LEARNING);
1649 		break;
1650 	case BR_STATE_FORWARDING:
1651 		param.port_state = cpu_to_le32(MXL862XX_STP_PORT_STATE_FORWARD);
1652 		break;
1653 	default:
1654 		dev_err(ds->dev, "invalid STP state: %d\n", state);
1655 		return;
1656 	}
1657 
1658 	ret = MXL862XX_API_WRITE(priv, MXL862XX_STP_PORTCFGSET, param);
1659 	if (ret) {
1660 		dev_err(ds->dev, "failed to set STP state on port %d\n", port);
1661 		return;
1662 	}
1663 
1664 	/* The firmware may re-enable MAC learning as a side-effect of entering
1665 	 * LEARNING or FORWARDING state (per 802.1D defaults).
1666 	 * Re-apply the driver's intended learning and metering config so that
1667 	 * standalone ports keep learning disabled.
1668 	 */
1669 	ret = mxl862xx_set_bridge_port(ds, port);
1670 	if (ret)
1671 		dev_err(ds->dev, "failed to reapply brport flags on port %d\n",
1672 			port);
1673 
1674 	mxl862xx_port_fast_age(ds, port);
1675 }
1676 
1677 /* Deferred work handler for host flood configuration.
1678  *
1679  * port_set_host_flood is called from atomic context (under
1680  * netif_addr_lock), so firmware calls must be deferred. The worker
1681  * acquires rtnl_lock() to serialize with DSA callbacks that access the
1682  * same driver state.
1683  */
mxl862xx_host_flood_work_fn(struct work_struct * work)1684 static void mxl862xx_host_flood_work_fn(struct work_struct *work)
1685 {
1686 	struct mxl862xx_port *p = container_of(work, struct mxl862xx_port,
1687 					       host_flood_work);
1688 	struct mxl862xx_priv *priv = p->priv;
1689 	struct dsa_switch *ds = priv->ds;
1690 
1691 	rtnl_lock();
1692 
1693 	/* Port may have been torn down between scheduling and now. */
1694 	if (!p->setup_done) {
1695 		rtnl_unlock();
1696 		return;
1697 	}
1698 
1699 	/* Always write to the standalone FID. When standalone it takes effect
1700 	 * immediately; when bridged the port uses the shared bridge FID so the
1701 	 * write is a no-op for current forwarding, but the state is preserved
1702 	 * in hardware and is ready once the port returns to standalone.
1703 	 */
1704 	mxl862xx_bridge_config_fwd(ds, p->fid, p->host_flood_uc,
1705 				   p->host_flood_mc, true);
1706 
1707 	rtnl_unlock();
1708 }
1709 
mxl862xx_port_set_host_flood(struct dsa_switch * ds,int port,bool uc,bool mc)1710 static void mxl862xx_port_set_host_flood(struct dsa_switch *ds, int port,
1711 					 bool uc, bool mc)
1712 {
1713 	struct mxl862xx_priv *priv = ds->priv;
1714 	struct mxl862xx_port *p = &priv->ports[port];
1715 
1716 	p->host_flood_uc = uc;
1717 	p->host_flood_mc = mc;
1718 	schedule_work(&p->host_flood_work);
1719 }
1720 
mxl862xx_port_pre_bridge_flags(struct dsa_switch * ds,int port,const struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1721 static int mxl862xx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
1722 					  const struct switchdev_brport_flags flags,
1723 					  struct netlink_ext_ack *extack)
1724 {
1725 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD |
1726 			   BR_LEARNING))
1727 		return -EINVAL;
1728 
1729 	return 0;
1730 }
1731 
mxl862xx_port_bridge_flags(struct dsa_switch * ds,int port,const struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1732 static int mxl862xx_port_bridge_flags(struct dsa_switch *ds, int port,
1733 				      const struct switchdev_brport_flags flags,
1734 				      struct netlink_ext_ack *extack)
1735 {
1736 	struct mxl862xx_priv *priv = ds->priv;
1737 	unsigned long old_block = priv->ports[port].flood_block;
1738 	unsigned long block = old_block;
1739 	int ret;
1740 
1741 	if (flags.mask & BR_FLOOD) {
1742 		if (flags.val & BR_FLOOD)
1743 			block &= ~BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_UC);
1744 		else
1745 			block |= BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_UC);
1746 	}
1747 
1748 	if (flags.mask & BR_MCAST_FLOOD) {
1749 		if (flags.val & BR_MCAST_FLOOD) {
1750 			block &= ~BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_IP);
1751 			block &= ~BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_NON_IP);
1752 		} else {
1753 			block |= BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_IP);
1754 			block |= BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_UNKNOWN_MC_NON_IP);
1755 		}
1756 	}
1757 
1758 	if (flags.mask & BR_BCAST_FLOOD) {
1759 		if (flags.val & BR_BCAST_FLOOD)
1760 			block &= ~BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_BROADCAST);
1761 		else
1762 			block |= BIT(MXL862XX_BRIDGE_PORT_EGRESS_METER_BROADCAST);
1763 	}
1764 
1765 	if (flags.mask & BR_LEARNING)
1766 		priv->ports[port].learning = !!(flags.val & BR_LEARNING);
1767 
1768 	if (block != old_block || (flags.mask & BR_LEARNING)) {
1769 		priv->ports[port].flood_block = block;
1770 		ret = mxl862xx_set_bridge_port(ds, port);
1771 		if (ret)
1772 			return ret;
1773 	}
1774 
1775 	return 0;
1776 }
1777 
mxl862xx_get_strings(struct dsa_switch * ds,int port,u32 stringset,u8 * data)1778 static void mxl862xx_get_strings(struct dsa_switch *ds, int port,
1779 				 u32 stringset, u8 *data)
1780 {
1781 	int i;
1782 
1783 	if (stringset != ETH_SS_STATS)
1784 		return;
1785 
1786 	for (i = 0; i < ARRAY_SIZE(mxl862xx_mib); i++)
1787 		ethtool_puts(&data, mxl862xx_mib[i].name);
1788 }
1789 
mxl862xx_get_sset_count(struct dsa_switch * ds,int port,int sset)1790 static int mxl862xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
1791 {
1792 	if (sset != ETH_SS_STATS)
1793 		return 0;
1794 
1795 	return ARRAY_SIZE(mxl862xx_mib);
1796 }
1797 
mxl862xx_read_rmon(struct dsa_switch * ds,int port,struct mxl862xx_rmon_port_cnt * cnt)1798 static int mxl862xx_read_rmon(struct dsa_switch *ds, int port,
1799 			      struct mxl862xx_rmon_port_cnt *cnt)
1800 {
1801 	memset(cnt, 0, sizeof(*cnt));
1802 	cnt->port_type = cpu_to_le32(MXL862XX_CTP_PORT);
1803 	cnt->port_id = cpu_to_le16(port);
1804 
1805 	return MXL862XX_API_READ(ds->priv, MXL862XX_RMON_PORT_GET, *cnt);
1806 }
1807 
mxl862xx_get_ethtool_stats(struct dsa_switch * ds,int port,u64 * data)1808 static void mxl862xx_get_ethtool_stats(struct dsa_switch *ds, int port,
1809 				       u64 *data)
1810 {
1811 	const struct mxl862xx_mib_desc *mib;
1812 	struct mxl862xx_rmon_port_cnt cnt;
1813 	int ret, i;
1814 	void *field;
1815 
1816 	ret = mxl862xx_read_rmon(ds, port, &cnt);
1817 	if (ret) {
1818 		dev_err(ds->dev, "failed to read RMON stats on port %d\n", port);
1819 		return;
1820 	}
1821 
1822 	for (i = 0; i < ARRAY_SIZE(mxl862xx_mib); i++) {
1823 		mib = &mxl862xx_mib[i];
1824 		field = (u8 *)&cnt + mib->offset;
1825 
1826 		if (mib->size == 1)
1827 			*data++ = le32_to_cpu(*(__le32 *)field);
1828 		else
1829 			*data++ = le64_to_cpu(*(__le64 *)field);
1830 	}
1831 }
1832 
mxl862xx_get_eth_mac_stats(struct dsa_switch * ds,int port,struct ethtool_eth_mac_stats * mac_stats)1833 static void mxl862xx_get_eth_mac_stats(struct dsa_switch *ds, int port,
1834 				       struct ethtool_eth_mac_stats *mac_stats)
1835 {
1836 	struct mxl862xx_rmon_port_cnt cnt;
1837 
1838 	if (mxl862xx_read_rmon(ds, port, &cnt))
1839 		return;
1840 
1841 	mac_stats->FramesTransmittedOK = le32_to_cpu(cnt.tx_good_pkts);
1842 	mac_stats->SingleCollisionFrames = le32_to_cpu(cnt.tx_single_coll_count);
1843 	mac_stats->MultipleCollisionFrames = le32_to_cpu(cnt.tx_mult_coll_count);
1844 	mac_stats->FramesReceivedOK = le32_to_cpu(cnt.rx_good_pkts);
1845 	mac_stats->FrameCheckSequenceErrors = le32_to_cpu(cnt.rx_fcserror_pkts);
1846 	mac_stats->AlignmentErrors = le32_to_cpu(cnt.rx_align_error_pkts);
1847 	mac_stats->OctetsTransmittedOK = le64_to_cpu(cnt.tx_good_bytes);
1848 	mac_stats->LateCollisions = le32_to_cpu(cnt.tx_late_coll_count);
1849 	mac_stats->FramesAbortedDueToXSColls = le32_to_cpu(cnt.tx_excess_coll_count);
1850 	mac_stats->OctetsReceivedOK = le64_to_cpu(cnt.rx_good_bytes);
1851 	mac_stats->MulticastFramesXmittedOK = le32_to_cpu(cnt.tx_multicast_pkts);
1852 	mac_stats->BroadcastFramesXmittedOK = le32_to_cpu(cnt.tx_broadcast_pkts);
1853 	mac_stats->MulticastFramesReceivedOK = le32_to_cpu(cnt.rx_multicast_pkts);
1854 	mac_stats->BroadcastFramesReceivedOK = le32_to_cpu(cnt.rx_broadcast_pkts);
1855 	mac_stats->FrameTooLongErrors = le32_to_cpu(cnt.rx_oversize_error_pkts);
1856 }
1857 
mxl862xx_get_eth_ctrl_stats(struct dsa_switch * ds,int port,struct ethtool_eth_ctrl_stats * ctrl_stats)1858 static void mxl862xx_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
1859 					struct ethtool_eth_ctrl_stats *ctrl_stats)
1860 {
1861 	struct mxl862xx_rmon_port_cnt cnt;
1862 
1863 	if (mxl862xx_read_rmon(ds, port, &cnt))
1864 		return;
1865 
1866 	ctrl_stats->MACControlFramesTransmitted = le32_to_cpu(cnt.tx_pause_count);
1867 	ctrl_stats->MACControlFramesReceived = le32_to_cpu(cnt.rx_good_pause_pkts);
1868 }
1869 
mxl862xx_get_pause_stats(struct dsa_switch * ds,int port,struct ethtool_pause_stats * pause_stats)1870 static void mxl862xx_get_pause_stats(struct dsa_switch *ds, int port,
1871 				     struct ethtool_pause_stats *pause_stats)
1872 {
1873 	struct mxl862xx_rmon_port_cnt cnt;
1874 
1875 	if (mxl862xx_read_rmon(ds, port, &cnt))
1876 		return;
1877 
1878 	pause_stats->tx_pause_frames = le32_to_cpu(cnt.tx_pause_count);
1879 	pause_stats->rx_pause_frames = le32_to_cpu(cnt.rx_good_pause_pkts);
1880 }
1881 
mxl862xx_get_rmon_stats(struct dsa_switch * ds,int port,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1882 static void mxl862xx_get_rmon_stats(struct dsa_switch *ds, int port,
1883 				    struct ethtool_rmon_stats *rmon_stats,
1884 				    const struct ethtool_rmon_hist_range **ranges)
1885 {
1886 	struct mxl862xx_rmon_port_cnt cnt;
1887 
1888 	if (mxl862xx_read_rmon(ds, port, &cnt))
1889 		return;
1890 
1891 	rmon_stats->undersize_pkts = le32_to_cpu(cnt.rx_under_size_good_pkts);
1892 	rmon_stats->oversize_pkts = le32_to_cpu(cnt.rx_oversize_good_pkts);
1893 	rmon_stats->fragments = le32_to_cpu(cnt.rx_under_size_error_pkts);
1894 	rmon_stats->jabbers = le32_to_cpu(cnt.rx_oversize_error_pkts);
1895 
1896 	rmon_stats->hist[0] = le32_to_cpu(cnt.rx64byte_pkts);
1897 	rmon_stats->hist[1] = le32_to_cpu(cnt.rx127byte_pkts);
1898 	rmon_stats->hist[2] = le32_to_cpu(cnt.rx255byte_pkts);
1899 	rmon_stats->hist[3] = le32_to_cpu(cnt.rx511byte_pkts);
1900 	rmon_stats->hist[4] = le32_to_cpu(cnt.rx1023byte_pkts);
1901 	rmon_stats->hist[5] = le32_to_cpu(cnt.rx_max_byte_pkts);
1902 
1903 	rmon_stats->hist_tx[0] = le32_to_cpu(cnt.tx64byte_pkts);
1904 	rmon_stats->hist_tx[1] = le32_to_cpu(cnt.tx127byte_pkts);
1905 	rmon_stats->hist_tx[2] = le32_to_cpu(cnt.tx255byte_pkts);
1906 	rmon_stats->hist_tx[3] = le32_to_cpu(cnt.tx511byte_pkts);
1907 	rmon_stats->hist_tx[4] = le32_to_cpu(cnt.tx1023byte_pkts);
1908 	rmon_stats->hist_tx[5] = le32_to_cpu(cnt.tx_max_byte_pkts);
1909 
1910 	*ranges = mxl862xx_rmon_ranges;
1911 }
1912 
1913 /* Compute the delta between two 32-bit free-running counter snapshots,
1914  * handling a single wrap-around correctly via unsigned subtraction.
1915  */
mxl862xx_delta32(u32 cur,u32 prev)1916 static u64 mxl862xx_delta32(u32 cur, u32 prev)
1917 {
1918 	return (u32)(cur - prev);
1919 }
1920 
1921 /**
1922  * mxl862xx_stats_poll - Read RMON counters and accumulate into 64-bit stats
1923  * @ds: DSA switch
1924  * @port: port index
1925  *
1926  * The firmware RMON counters are free-running 32-bit values (64-bit for
1927  * byte counters). This function reads the hardware via MDIO (may sleep),
1928  * computes deltas from the previous snapshot, and accumulates them into
1929  * 64-bit per-port stats under a spinlock.
1930  *
1931  * Called only from the stats polling workqueue -- serialized by the
1932  * single-threaded delayed_work, so no MDIO locking is needed here.
1933  */
mxl862xx_stats_poll(struct dsa_switch * ds,int port)1934 static void mxl862xx_stats_poll(struct dsa_switch *ds, int port)
1935 {
1936 	struct mxl862xx_priv *priv = ds->priv;
1937 	struct mxl862xx_port_stats *s = &priv->ports[port].stats;
1938 	u32 rx_fcserr, rx_under, rx_over, rx_align, tx_drop;
1939 	u32 rx_drop, rx_evlan, mtu_exc, tx_acm;
1940 	struct mxl862xx_rmon_port_cnt cnt;
1941 	u64 rx_bytes, tx_bytes;
1942 	u32 rx_mcast, tx_coll;
1943 	u32 rx_pkts, tx_pkts;
1944 
1945 	/* MDIO read -- may sleep, done outside the spinlock. */
1946 	if (mxl862xx_read_rmon(ds, port, &cnt))
1947 		return;
1948 
1949 	rx_pkts   = le32_to_cpu(cnt.rx_good_pkts);
1950 	tx_pkts   = le32_to_cpu(cnt.tx_good_pkts);
1951 	rx_bytes  = le64_to_cpu(cnt.rx_good_bytes);
1952 	tx_bytes  = le64_to_cpu(cnt.tx_good_bytes);
1953 	rx_fcserr = le32_to_cpu(cnt.rx_fcserror_pkts);
1954 	rx_under  = le32_to_cpu(cnt.rx_under_size_error_pkts);
1955 	rx_over   = le32_to_cpu(cnt.rx_oversize_error_pkts);
1956 	rx_align  = le32_to_cpu(cnt.rx_align_error_pkts);
1957 	tx_drop   = le32_to_cpu(cnt.tx_dropped_pkts);
1958 	rx_drop   = le32_to_cpu(cnt.rx_dropped_pkts);
1959 	rx_evlan  = le32_to_cpu(cnt.rx_extended_vlan_discard_pkts);
1960 	mtu_exc   = le32_to_cpu(cnt.mtu_exceed_discard_pkts);
1961 	tx_acm    = le32_to_cpu(cnt.tx_acm_dropped_pkts);
1962 	rx_mcast  = le32_to_cpu(cnt.rx_multicast_pkts);
1963 	tx_coll   = le32_to_cpu(cnt.tx_coll_count);
1964 
1965 	/* Accumulate deltas under spinlock -- .get_stats64 reads these. */
1966 	spin_lock_bh(&priv->ports[port].stats_lock);
1967 
1968 	s->rx_packets += mxl862xx_delta32(rx_pkts, s->prev_rx_good_pkts);
1969 	s->tx_packets += mxl862xx_delta32(tx_pkts, s->prev_tx_good_pkts);
1970 	s->rx_bytes   += rx_bytes - s->prev_rx_good_bytes;
1971 	s->tx_bytes   += tx_bytes - s->prev_tx_good_bytes;
1972 
1973 	s->rx_errors +=
1974 		mxl862xx_delta32(rx_fcserr, s->prev_rx_fcserror_pkts) +
1975 		mxl862xx_delta32(rx_under, s->prev_rx_under_size_error_pkts) +
1976 		mxl862xx_delta32(rx_over, s->prev_rx_oversize_error_pkts) +
1977 		mxl862xx_delta32(rx_align, s->prev_rx_align_error_pkts);
1978 	s->tx_errors +=
1979 		mxl862xx_delta32(tx_drop, s->prev_tx_dropped_pkts);
1980 
1981 	s->rx_dropped +=
1982 		mxl862xx_delta32(rx_drop, s->prev_rx_dropped_pkts) +
1983 		mxl862xx_delta32(rx_evlan, s->prev_rx_evlan_discard_pkts) +
1984 		mxl862xx_delta32(mtu_exc, s->prev_mtu_exceed_discard_pkts);
1985 	s->tx_dropped +=
1986 		mxl862xx_delta32(tx_drop, s->prev_tx_dropped_pkts) +
1987 		mxl862xx_delta32(tx_acm, s->prev_tx_acm_dropped_pkts);
1988 
1989 	s->multicast  += mxl862xx_delta32(rx_mcast, s->prev_rx_multicast_pkts);
1990 	s->collisions += mxl862xx_delta32(tx_coll, s->prev_tx_coll_count);
1991 
1992 	s->rx_length_errors +=
1993 		mxl862xx_delta32(rx_under, s->prev_rx_under_size_error_pkts) +
1994 		mxl862xx_delta32(rx_over, s->prev_rx_oversize_error_pkts);
1995 	s->rx_crc_errors +=
1996 		mxl862xx_delta32(rx_fcserr, s->prev_rx_fcserror_pkts);
1997 	s->rx_frame_errors +=
1998 		mxl862xx_delta32(rx_align, s->prev_rx_align_error_pkts);
1999 
2000 	s->prev_rx_good_pkts             = rx_pkts;
2001 	s->prev_tx_good_pkts             = tx_pkts;
2002 	s->prev_rx_good_bytes            = rx_bytes;
2003 	s->prev_tx_good_bytes            = tx_bytes;
2004 	s->prev_rx_fcserror_pkts         = rx_fcserr;
2005 	s->prev_rx_under_size_error_pkts = rx_under;
2006 	s->prev_rx_oversize_error_pkts   = rx_over;
2007 	s->prev_rx_align_error_pkts      = rx_align;
2008 	s->prev_tx_dropped_pkts          = tx_drop;
2009 	s->prev_rx_dropped_pkts          = rx_drop;
2010 	s->prev_rx_evlan_discard_pkts    = rx_evlan;
2011 	s->prev_mtu_exceed_discard_pkts  = mtu_exc;
2012 	s->prev_tx_acm_dropped_pkts      = tx_acm;
2013 	s->prev_rx_multicast_pkts        = rx_mcast;
2014 	s->prev_tx_coll_count            = tx_coll;
2015 
2016 	spin_unlock_bh(&priv->ports[port].stats_lock);
2017 }
2018 
mxl862xx_stats_work_fn(struct work_struct * work)2019 static void mxl862xx_stats_work_fn(struct work_struct *work)
2020 {
2021 	struct mxl862xx_priv *priv =
2022 		container_of(work, struct mxl862xx_priv, stats_work.work);
2023 	struct dsa_switch *ds = priv->ds;
2024 	struct dsa_port *dp;
2025 
2026 	dsa_switch_for_each_available_port(dp, ds)
2027 		mxl862xx_stats_poll(ds, dp->index);
2028 
2029 	if (!test_bit(MXL862XX_FLAG_WORK_STOPPED, &priv->flags))
2030 		schedule_delayed_work(&priv->stats_work,
2031 				      MXL862XX_STATS_POLL_INTERVAL);
2032 }
2033 
mxl862xx_get_stats64(struct dsa_switch * ds,int port,struct rtnl_link_stats64 * s)2034 static void mxl862xx_get_stats64(struct dsa_switch *ds, int port,
2035 				 struct rtnl_link_stats64 *s)
2036 {
2037 	struct mxl862xx_priv *priv = ds->priv;
2038 	struct mxl862xx_port_stats *ps = &priv->ports[port].stats;
2039 
2040 	spin_lock_bh(&priv->ports[port].stats_lock);
2041 
2042 	s->rx_packets = ps->rx_packets;
2043 	s->tx_packets = ps->tx_packets;
2044 	s->rx_bytes = ps->rx_bytes;
2045 	s->tx_bytes = ps->tx_bytes;
2046 	s->rx_errors = ps->rx_errors;
2047 	s->tx_errors = ps->tx_errors;
2048 	s->rx_dropped = ps->rx_dropped;
2049 	s->tx_dropped = ps->tx_dropped;
2050 	s->multicast = ps->multicast;
2051 	s->collisions = ps->collisions;
2052 	s->rx_length_errors = ps->rx_length_errors;
2053 	s->rx_crc_errors = ps->rx_crc_errors;
2054 	s->rx_frame_errors = ps->rx_frame_errors;
2055 
2056 	spin_unlock_bh(&priv->ports[port].stats_lock);
2057 
2058 	/* Trigger a fresh poll so the next read sees up-to-date counters.
2059 	 * No-op if the work is already pending, running, or teardown started.
2060 	 */
2061 	if (!test_bit(MXL862XX_FLAG_WORK_STOPPED, &priv->flags))
2062 		schedule_delayed_work(&priv->stats_work, 0);
2063 }
2064 
2065 static const struct dsa_switch_ops mxl862xx_switch_ops = {
2066 	.get_tag_protocol = mxl862xx_get_tag_protocol,
2067 	.setup = mxl862xx_setup,
2068 	.port_setup = mxl862xx_port_setup,
2069 	.port_teardown = mxl862xx_port_teardown,
2070 	.phylink_get_caps = mxl862xx_phylink_get_caps,
2071 	.port_enable = mxl862xx_port_enable,
2072 	.port_disable = mxl862xx_port_disable,
2073 	.port_fast_age = mxl862xx_port_fast_age,
2074 	.set_ageing_time = mxl862xx_set_ageing_time,
2075 	.port_bridge_join = mxl862xx_port_bridge_join,
2076 	.port_bridge_leave = mxl862xx_port_bridge_leave,
2077 	.port_pre_bridge_flags = mxl862xx_port_pre_bridge_flags,
2078 	.port_bridge_flags = mxl862xx_port_bridge_flags,
2079 	.port_stp_state_set = mxl862xx_port_stp_state_set,
2080 	.port_set_host_flood = mxl862xx_port_set_host_flood,
2081 	.port_fdb_add = mxl862xx_port_fdb_add,
2082 	.port_fdb_del = mxl862xx_port_fdb_del,
2083 	.port_fdb_dump = mxl862xx_port_fdb_dump,
2084 	.port_mdb_add = mxl862xx_port_mdb_add,
2085 	.port_mdb_del = mxl862xx_port_mdb_del,
2086 	.port_vlan_filtering = mxl862xx_port_vlan_filtering,
2087 	.port_vlan_add = mxl862xx_port_vlan_add,
2088 	.port_vlan_del = mxl862xx_port_vlan_del,
2089 	.get_strings = mxl862xx_get_strings,
2090 	.get_sset_count = mxl862xx_get_sset_count,
2091 	.get_ethtool_stats = mxl862xx_get_ethtool_stats,
2092 	.get_eth_mac_stats = mxl862xx_get_eth_mac_stats,
2093 	.get_eth_ctrl_stats = mxl862xx_get_eth_ctrl_stats,
2094 	.get_pause_stats = mxl862xx_get_pause_stats,
2095 	.get_rmon_stats = mxl862xx_get_rmon_stats,
2096 	.get_stats64 = mxl862xx_get_stats64,
2097 };
2098 
mxl862xx_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2099 static void mxl862xx_phylink_mac_config(struct phylink_config *config,
2100 					unsigned int mode,
2101 					const struct phylink_link_state *state)
2102 {
2103 }
2104 
mxl862xx_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2105 static void mxl862xx_phylink_mac_link_down(struct phylink_config *config,
2106 					   unsigned int mode,
2107 					   phy_interface_t interface)
2108 {
2109 }
2110 
mxl862xx_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2111 static void mxl862xx_phylink_mac_link_up(struct phylink_config *config,
2112 					 struct phy_device *phydev,
2113 					 unsigned int mode,
2114 					 phy_interface_t interface,
2115 					 int speed, int duplex,
2116 					 bool tx_pause, bool rx_pause)
2117 {
2118 }
2119 
2120 static const struct phylink_mac_ops mxl862xx_phylink_mac_ops = {
2121 	.mac_config = mxl862xx_phylink_mac_config,
2122 	.mac_link_down = mxl862xx_phylink_mac_link_down,
2123 	.mac_link_up = mxl862xx_phylink_mac_link_up,
2124 };
2125 
mxl862xx_probe(struct mdio_device * mdiodev)2126 static int mxl862xx_probe(struct mdio_device *mdiodev)
2127 {
2128 	struct device *dev = &mdiodev->dev;
2129 	struct mxl862xx_priv *priv;
2130 	struct dsa_switch *ds;
2131 	int err, i;
2132 
2133 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2134 	if (!priv)
2135 		return -ENOMEM;
2136 
2137 	priv->mdiodev = mdiodev;
2138 
2139 	ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
2140 	if (!ds)
2141 		return -ENOMEM;
2142 
2143 	priv->ds = ds;
2144 	ds->dev = dev;
2145 	ds->priv = priv;
2146 	ds->ops = &mxl862xx_switch_ops;
2147 	ds->phylink_mac_ops = &mxl862xx_phylink_mac_ops;
2148 	ds->num_ports = MXL862XX_MAX_PORTS;
2149 	ds->fdb_isolation = true;
2150 	ds->max_num_bridges = MXL862XX_MAX_BRIDGES;
2151 
2152 	mxl862xx_host_init(priv);
2153 
2154 	for (i = 0; i < MXL862XX_MAX_PORTS; i++) {
2155 		priv->ports[i].priv = priv;
2156 		INIT_WORK(&priv->ports[i].host_flood_work,
2157 			  mxl862xx_host_flood_work_fn);
2158 		spin_lock_init(&priv->ports[i].stats_lock);
2159 	}
2160 
2161 	INIT_DELAYED_WORK(&priv->stats_work, mxl862xx_stats_work_fn);
2162 
2163 	dev_set_drvdata(dev, ds);
2164 
2165 	err = dsa_register_switch(ds);
2166 	if (err) {
2167 		set_bit(MXL862XX_FLAG_WORK_STOPPED, &priv->flags);
2168 		cancel_delayed_work_sync(&priv->stats_work);
2169 		mxl862xx_host_shutdown(priv);
2170 		for (i = 0; i < MXL862XX_MAX_PORTS; i++)
2171 			cancel_work_sync(&priv->ports[i].host_flood_work);
2172 	}
2173 
2174 	return err;
2175 }
2176 
mxl862xx_remove(struct mdio_device * mdiodev)2177 static void mxl862xx_remove(struct mdio_device *mdiodev)
2178 {
2179 	struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
2180 	struct mxl862xx_priv *priv;
2181 	int i;
2182 
2183 	if (!ds)
2184 		return;
2185 
2186 	priv = ds->priv;
2187 
2188 	set_bit(MXL862XX_FLAG_WORK_STOPPED, &priv->flags);
2189 	cancel_delayed_work_sync(&priv->stats_work);
2190 
2191 	dsa_unregister_switch(ds);
2192 
2193 	mxl862xx_host_shutdown(priv);
2194 
2195 	/* Cancel any pending host flood work. dsa_unregister_switch()
2196 	 * has already called port_teardown (which sets setup_done=false),
2197 	 * but a worker could still be blocked on rtnl_lock(). Since we
2198 	 * are now outside RTNL, cancel_work_sync() will not deadlock.
2199 	 */
2200 	for (i = 0; i < MXL862XX_MAX_PORTS; i++)
2201 		cancel_work_sync(&priv->ports[i].host_flood_work);
2202 }
2203 
mxl862xx_shutdown(struct mdio_device * mdiodev)2204 static void mxl862xx_shutdown(struct mdio_device *mdiodev)
2205 {
2206 	struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
2207 	struct mxl862xx_priv *priv;
2208 	int i;
2209 
2210 	if (!ds)
2211 		return;
2212 
2213 	priv = ds->priv;
2214 
2215 	dsa_switch_shutdown(ds);
2216 
2217 	set_bit(MXL862XX_FLAG_WORK_STOPPED, &priv->flags);
2218 	cancel_delayed_work_sync(&priv->stats_work);
2219 
2220 	mxl862xx_host_shutdown(priv);
2221 
2222 	for (i = 0; i < MXL862XX_MAX_PORTS; i++)
2223 		cancel_work_sync(&priv->ports[i].host_flood_work);
2224 
2225 	dev_set_drvdata(&mdiodev->dev, NULL);
2226 }
2227 
2228 static const struct of_device_id mxl862xx_of_match[] = {
2229 	{ .compatible = "maxlinear,mxl86282" },
2230 	{ .compatible = "maxlinear,mxl86252" },
2231 	{ /* sentinel */ }
2232 };
2233 MODULE_DEVICE_TABLE(of, mxl862xx_of_match);
2234 
2235 static struct mdio_driver mxl862xx_driver = {
2236 	.probe  = mxl862xx_probe,
2237 	.remove = mxl862xx_remove,
2238 	.shutdown = mxl862xx_shutdown,
2239 	.mdiodrv.driver = {
2240 		.name = "mxl862xx",
2241 		.of_match_table = mxl862xx_of_match,
2242 	},
2243 };
2244 
2245 mdio_module_driver(mxl862xx_driver);
2246 
2247 MODULE_DESCRIPTION("Driver for MaxLinear MxL862xx switch family");
2248 MODULE_LICENSE("GPL");
2249