1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12 #include <linux/skbuff.h>
13 #include <linux/if_vlan.h>
14 
15 #include "pci.h"
16 #include "core.h"
17 #include "reg.h"
18 #include "port.h"
19 #include "trap.h"
20 #include "txheader.h"
21 #include "ib.h"
22 
23 static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
24 static const char mlxsw_sx_driver_version[] = "1.0";
25 
26 struct mlxsw_sx_port;
27 
28 struct mlxsw_sx {
29 	struct mlxsw_sx_port **ports;
30 	struct mlxsw_core *core;
31 	const struct mlxsw_bus_info *bus_info;
32 	u8 hw_id[ETH_ALEN];
33 };
34 
35 struct mlxsw_sx_port_pcpu_stats {
36 	u64			rx_packets;
37 	u64			rx_bytes;
38 	u64			tx_packets;
39 	u64			tx_bytes;
40 	struct u64_stats_sync	syncp;
41 	u32			tx_dropped;
42 };
43 
44 struct mlxsw_sx_port {
45 	struct net_device *dev;
46 	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
47 	struct mlxsw_sx *mlxsw_sx;
48 	u8 local_port;
49 	struct {
50 		u8 module;
51 	} mapping;
52 };
53 
54 /* tx_hdr_version
55  * Tx header version.
56  * Must be set to 0.
57  */
58 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
59 
60 /* tx_hdr_ctl
61  * Packet control type.
62  * 0 - Ethernet control (e.g. EMADs, LACP)
63  * 1 - Ethernet data
64  */
65 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
66 
67 /* tx_hdr_proto
68  * Packet protocol type. Must be set to 1 (Ethernet).
69  */
70 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
71 
72 /* tx_hdr_etclass
73  * Egress TClass to be used on the egress device on the egress port.
74  * The MSB is specified in the 'ctclass3' field.
75  * Range is 0-15, where 15 is the highest priority.
76  */
77 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
78 
79 /* tx_hdr_swid
80  * Switch partition ID.
81  */
82 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
83 
84 /* tx_hdr_port_mid
85  * Destination local port for unicast packets.
86  * Destination multicast ID for multicast packets.
87  *
88  * Control packets are directed to a specific egress port, while data
89  * packets are transmitted through the CPU port (0) into the switch partition,
90  * where forwarding rules are applied.
91  */
92 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
93 
94 /* tx_hdr_ctclass3
95  * See field 'etclass'.
96  */
97 MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
98 
99 /* tx_hdr_rdq
100  * RDQ for control packets sent to remote CPU.
101  * Must be set to 0x1F for EMADs, otherwise 0.
102  */
103 MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
104 
105 /* tx_hdr_cpu_sig
106  * Signature control for packets going to CPU. Must be set to 0.
107  */
108 MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
109 
110 /* tx_hdr_sig
111  * Stacking protocl signature. Must be set to 0xE0E0.
112  */
113 MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
114 
115 /* tx_hdr_stclass
116  * Stacking TClass.
117  */
118 MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
119 
120 /* tx_hdr_emad
121  * EMAD bit. Must be set for EMADs.
122  */
123 MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
124 
125 /* tx_hdr_type
126  * 0 - Data packets
127  * 6 - Control packets
128  */
129 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
130 
mlxsw_sx_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)131 static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
132 				     const struct mlxsw_tx_info *tx_info)
133 {
134 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
135 	bool is_emad = tx_info->is_emad;
136 
137 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
138 
139 	/* We currently set default values for the egress tclass (QoS). */
140 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
141 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
142 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
143 	mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
144 						  MLXSW_TXHDR_ETCLASS_5);
145 	mlxsw_tx_hdr_swid_set(txhdr, 0);
146 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 	mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
148 	mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
149 					      MLXSW_TXHDR_RDQ_OTHER);
150 	mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
151 	mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
152 	mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
153 	mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
154 					       MLXSW_TXHDR_NOT_EMAD);
155 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
156 }
157 
mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port * mlxsw_sx_port,bool is_up)158 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
159 					  bool is_up)
160 {
161 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
162 	char paos_pl[MLXSW_REG_PAOS_LEN];
163 
164 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
165 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
166 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
167 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
168 }
169 
mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port * mlxsw_sx_port,bool * p_is_up)170 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
171 					 bool *p_is_up)
172 {
173 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
174 	char paos_pl[MLXSW_REG_PAOS_LEN];
175 	u8 oper_status;
176 	int err;
177 
178 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
179 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
180 	if (err)
181 		return err;
182 	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
183 	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP;
184 	return 0;
185 }
186 
__mlxsw_sx_port_mtu_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)187 static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
188 				   u16 mtu)
189 {
190 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
191 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
192 	int max_mtu;
193 	int err;
194 
195 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
196 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
197 	if (err)
198 		return err;
199 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
200 
201 	if (mtu > max_mtu)
202 		return -EINVAL;
203 
204 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
205 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
206 }
207 
mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)208 static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
209 				     u16 mtu)
210 {
211 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
212 	return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
213 }
214 
mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)215 static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
216 				    u16 mtu)
217 {
218 	return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
219 }
220 
mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 ib_port)221 static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
222 				     u8 ib_port)
223 {
224 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
225 	char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
226 	int err;
227 
228 	mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
229 	mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
230 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
231 	return err;
232 }
233 
mlxsw_sx_port_swid_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 swid)234 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
235 {
236 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
237 	char pspa_pl[MLXSW_REG_PSPA_LEN];
238 
239 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
240 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
241 }
242 
243 static int
mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port * mlxsw_sx_port)244 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
245 {
246 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
247 	char sspr_pl[MLXSW_REG_SSPR_LEN];
248 
249 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
250 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
251 }
252 
mlxsw_sx_port_module_info_get(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 * p_module,u8 * p_width)253 static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
254 					 u8 local_port, u8 *p_module,
255 					 u8 *p_width)
256 {
257 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
258 	int err;
259 
260 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
261 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
262 	if (err)
263 		return err;
264 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
265 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
266 	return 0;
267 }
268 
mlxsw_sx_port_open(struct net_device * dev)269 static int mlxsw_sx_port_open(struct net_device *dev)
270 {
271 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
272 	int err;
273 
274 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
275 	if (err)
276 		return err;
277 	netif_start_queue(dev);
278 	return 0;
279 }
280 
mlxsw_sx_port_stop(struct net_device * dev)281 static int mlxsw_sx_port_stop(struct net_device *dev)
282 {
283 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
284 
285 	netif_stop_queue(dev);
286 	return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
287 }
288 
mlxsw_sx_port_xmit(struct sk_buff * skb,struct net_device * dev)289 static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
290 				      struct net_device *dev)
291 {
292 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
293 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
294 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
295 	const struct mlxsw_tx_info tx_info = {
296 		.local_port = mlxsw_sx_port->local_port,
297 		.is_emad = false,
298 	};
299 	u64 len;
300 	int err;
301 
302 	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
303 		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
304 		dev_kfree_skb_any(skb);
305 		return NETDEV_TX_OK;
306 	}
307 
308 	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
309 
310 	if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
311 		return NETDEV_TX_BUSY;
312 
313 	mlxsw_sx_txhdr_construct(skb, &tx_info);
314 	/* TX header is consumed by HW on the way so we shouldn't count its
315 	 * bytes as being sent.
316 	 */
317 	len = skb->len - MLXSW_TXHDR_LEN;
318 	/* Due to a race we might fail here because of a full queue. In that
319 	 * unlikely case we simply drop the packet.
320 	 */
321 	err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
322 
323 	if (!err) {
324 		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
325 		u64_stats_update_begin(&pcpu_stats->syncp);
326 		pcpu_stats->tx_packets++;
327 		pcpu_stats->tx_bytes += len;
328 		u64_stats_update_end(&pcpu_stats->syncp);
329 	} else {
330 		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
331 		dev_kfree_skb_any(skb);
332 	}
333 	return NETDEV_TX_OK;
334 }
335 
mlxsw_sx_port_change_mtu(struct net_device * dev,int mtu)336 static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
337 {
338 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
339 	int err;
340 
341 	err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
342 	if (err)
343 		return err;
344 	dev->mtu = mtu;
345 	return 0;
346 }
347 
348 static void
mlxsw_sx_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)349 mlxsw_sx_port_get_stats64(struct net_device *dev,
350 			  struct rtnl_link_stats64 *stats)
351 {
352 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
353 	struct mlxsw_sx_port_pcpu_stats *p;
354 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
355 	u32 tx_dropped = 0;
356 	unsigned int start;
357 	int i;
358 
359 	for_each_possible_cpu(i) {
360 		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
361 		do {
362 			start = u64_stats_fetch_begin_irq(&p->syncp);
363 			rx_packets	= p->rx_packets;
364 			rx_bytes	= p->rx_bytes;
365 			tx_packets	= p->tx_packets;
366 			tx_bytes	= p->tx_bytes;
367 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
368 
369 		stats->rx_packets	+= rx_packets;
370 		stats->rx_bytes		+= rx_bytes;
371 		stats->tx_packets	+= tx_packets;
372 		stats->tx_bytes		+= tx_bytes;
373 		/* tx_dropped is u32, updated without syncp protection. */
374 		tx_dropped	+= p->tx_dropped;
375 	}
376 	stats->tx_dropped	= tx_dropped;
377 }
378 
379 static struct devlink_port *
mlxsw_sx_port_get_devlink_port(struct net_device * dev)380 mlxsw_sx_port_get_devlink_port(struct net_device *dev)
381 {
382 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
383 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
384 
385 	return mlxsw_core_port_devlink_port_get(mlxsw_sx->core,
386 						mlxsw_sx_port->local_port);
387 }
388 
389 static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
390 	.ndo_open		= mlxsw_sx_port_open,
391 	.ndo_stop		= mlxsw_sx_port_stop,
392 	.ndo_start_xmit		= mlxsw_sx_port_xmit,
393 	.ndo_change_mtu		= mlxsw_sx_port_change_mtu,
394 	.ndo_get_stats64	= mlxsw_sx_port_get_stats64,
395 	.ndo_get_devlink_port	= mlxsw_sx_port_get_devlink_port,
396 };
397 
mlxsw_sx_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)398 static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
399 				      struct ethtool_drvinfo *drvinfo)
400 {
401 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
402 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
403 
404 	strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
405 	strlcpy(drvinfo->version, mlxsw_sx_driver_version,
406 		sizeof(drvinfo->version));
407 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
408 		 "%d.%d.%d",
409 		 mlxsw_sx->bus_info->fw_rev.major,
410 		 mlxsw_sx->bus_info->fw_rev.minor,
411 		 mlxsw_sx->bus_info->fw_rev.subminor);
412 	strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
413 		sizeof(drvinfo->bus_info));
414 }
415 
416 struct mlxsw_sx_port_hw_stats {
417 	char str[ETH_GSTRING_LEN];
418 	u64 (*getter)(const char *payload);
419 };
420 
421 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
422 	{
423 		.str = "a_frames_transmitted_ok",
424 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
425 	},
426 	{
427 		.str = "a_frames_received_ok",
428 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
429 	},
430 	{
431 		.str = "a_frame_check_sequence_errors",
432 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
433 	},
434 	{
435 		.str = "a_alignment_errors",
436 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
437 	},
438 	{
439 		.str = "a_octets_transmitted_ok",
440 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
441 	},
442 	{
443 		.str = "a_octets_received_ok",
444 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
445 	},
446 	{
447 		.str = "a_multicast_frames_xmitted_ok",
448 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
449 	},
450 	{
451 		.str = "a_broadcast_frames_xmitted_ok",
452 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
453 	},
454 	{
455 		.str = "a_multicast_frames_received_ok",
456 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
457 	},
458 	{
459 		.str = "a_broadcast_frames_received_ok",
460 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
461 	},
462 	{
463 		.str = "a_in_range_length_errors",
464 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
465 	},
466 	{
467 		.str = "a_out_of_range_length_field",
468 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
469 	},
470 	{
471 		.str = "a_frame_too_long_errors",
472 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
473 	},
474 	{
475 		.str = "a_symbol_error_during_carrier",
476 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
477 	},
478 	{
479 		.str = "a_mac_control_frames_transmitted",
480 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
481 	},
482 	{
483 		.str = "a_mac_control_frames_received",
484 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
485 	},
486 	{
487 		.str = "a_unsupported_opcodes_received",
488 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
489 	},
490 	{
491 		.str = "a_pause_mac_ctrl_frames_received",
492 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
493 	},
494 	{
495 		.str = "a_pause_mac_ctrl_frames_xmitted",
496 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
497 	},
498 };
499 
500 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
501 
mlxsw_sx_port_get_strings(struct net_device * dev,u32 stringset,u8 * data)502 static void mlxsw_sx_port_get_strings(struct net_device *dev,
503 				      u32 stringset, u8 *data)
504 {
505 	u8 *p = data;
506 	int i;
507 
508 	switch (stringset) {
509 	case ETH_SS_STATS:
510 		for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
511 			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
512 			       ETH_GSTRING_LEN);
513 			p += ETH_GSTRING_LEN;
514 		}
515 		break;
516 	}
517 }
518 
mlxsw_sx_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)519 static void mlxsw_sx_port_get_stats(struct net_device *dev,
520 				    struct ethtool_stats *stats, u64 *data)
521 {
522 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
523 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
524 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
525 	int i;
526 	int err;
527 
528 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
529 			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
530 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
531 	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
532 		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
533 }
534 
mlxsw_sx_port_get_sset_count(struct net_device * dev,int sset)535 static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
536 {
537 	switch (sset) {
538 	case ETH_SS_STATS:
539 		return MLXSW_SX_PORT_HW_STATS_LEN;
540 	default:
541 		return -EOPNOTSUPP;
542 	}
543 }
544 
545 struct mlxsw_sx_port_link_mode {
546 	u32 mask;
547 	u32 supported;
548 	u32 advertised;
549 	u32 speed;
550 };
551 
552 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
553 	{
554 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
555 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
556 		.supported	= SUPPORTED_1000baseKX_Full,
557 		.advertised	= ADVERTISED_1000baseKX_Full,
558 		.speed		= 1000,
559 	},
560 	{
561 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
562 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
563 		.supported	= SUPPORTED_10000baseKX4_Full,
564 		.advertised	= ADVERTISED_10000baseKX4_Full,
565 		.speed		= 10000,
566 	},
567 	{
568 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
569 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
570 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
571 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
572 		.supported	= SUPPORTED_10000baseKR_Full,
573 		.advertised	= ADVERTISED_10000baseKR_Full,
574 		.speed		= 10000,
575 	},
576 	{
577 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
578 		.supported	= SUPPORTED_40000baseCR4_Full,
579 		.advertised	= ADVERTISED_40000baseCR4_Full,
580 		.speed		= 40000,
581 	},
582 	{
583 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
584 		.supported	= SUPPORTED_40000baseKR4_Full,
585 		.advertised	= ADVERTISED_40000baseKR4_Full,
586 		.speed		= 40000,
587 	},
588 	{
589 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
590 		.supported	= SUPPORTED_40000baseSR4_Full,
591 		.advertised	= ADVERTISED_40000baseSR4_Full,
592 		.speed		= 40000,
593 	},
594 	{
595 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
596 		.supported	= SUPPORTED_40000baseLR4_Full,
597 		.advertised	= ADVERTISED_40000baseLR4_Full,
598 		.speed		= 40000,
599 	},
600 	{
601 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
602 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
603 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
604 		.speed		= 25000,
605 	},
606 	{
607 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
608 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
609 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
610 		.speed		= 50000,
611 	},
612 	{
613 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
614 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
615 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
616 		.speed		= 100000,
617 	},
618 };
619 
620 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
621 #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
622 
mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)623 static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
624 {
625 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
626 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
627 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
628 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
629 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
630 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
631 		return SUPPORTED_FIBRE;
632 
633 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
634 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
635 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
636 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
637 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
638 		return SUPPORTED_Backplane;
639 	return 0;
640 }
641 
mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)642 static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
643 {
644 	u32 modes = 0;
645 	int i;
646 
647 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
648 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
649 			modes |= mlxsw_sx_port_link_mode[i].supported;
650 	}
651 	return modes;
652 }
653 
mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)654 static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
655 {
656 	u32 modes = 0;
657 	int i;
658 
659 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
660 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
661 			modes |= mlxsw_sx_port_link_mode[i].advertised;
662 	}
663 	return modes;
664 }
665 
mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok,u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)666 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
667 					    struct ethtool_link_ksettings *cmd)
668 {
669 	u32 speed = SPEED_UNKNOWN;
670 	u8 duplex = DUPLEX_UNKNOWN;
671 	int i;
672 
673 	if (!carrier_ok)
674 		goto out;
675 
676 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
677 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
678 			speed = mlxsw_sx_port_link_mode[i].speed;
679 			duplex = DUPLEX_FULL;
680 			break;
681 		}
682 	}
683 out:
684 	cmd->base.speed = speed;
685 	cmd->base.duplex = duplex;
686 }
687 
mlxsw_sx_port_connector_port(u32 ptys_eth_proto)688 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
689 {
690 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
691 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
692 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
693 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
694 		return PORT_FIBRE;
695 
696 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
697 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
698 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
699 		return PORT_DA;
700 
701 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
702 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
703 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
704 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
705 		return PORT_NONE;
706 
707 	return PORT_OTHER;
708 }
709 
710 static int
mlxsw_sx_port_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)711 mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
712 				 struct ethtool_link_ksettings *cmd)
713 {
714 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
715 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
716 	char ptys_pl[MLXSW_REG_PTYS_LEN];
717 	u32 eth_proto_cap;
718 	u32 eth_proto_admin;
719 	u32 eth_proto_oper;
720 	u32 supported, advertising, lp_advertising;
721 	int err;
722 
723 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
724 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
725 	if (err) {
726 		netdev_err(dev, "Failed to get proto");
727 		return err;
728 	}
729 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
730 				  &eth_proto_admin, &eth_proto_oper);
731 
732 	supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
733 			 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
734 			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
735 	advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
736 	mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
737 					eth_proto_oper, cmd);
738 
739 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
740 	cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
741 	lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
742 
743 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
744 						supported);
745 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
746 						advertising);
747 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
748 						lp_advertising);
749 
750 	return 0;
751 }
752 
mlxsw_sx_to_ptys_advert_link(u32 advertising)753 static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
754 {
755 	u32 ptys_proto = 0;
756 	int i;
757 
758 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
759 		if (advertising & mlxsw_sx_port_link_mode[i].advertised)
760 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
761 	}
762 	return ptys_proto;
763 }
764 
mlxsw_sx_to_ptys_speed(u32 speed)765 static u32 mlxsw_sx_to_ptys_speed(u32 speed)
766 {
767 	u32 ptys_proto = 0;
768 	int i;
769 
770 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
771 		if (speed == mlxsw_sx_port_link_mode[i].speed)
772 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
773 	}
774 	return ptys_proto;
775 }
776 
mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)777 static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
778 {
779 	u32 ptys_proto = 0;
780 	int i;
781 
782 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
783 		if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
784 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
785 	}
786 	return ptys_proto;
787 }
788 
789 static int
mlxsw_sx_port_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)790 mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
791 				 const struct ethtool_link_ksettings *cmd)
792 {
793 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
794 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
795 	char ptys_pl[MLXSW_REG_PTYS_LEN];
796 	u32 speed;
797 	u32 eth_proto_new;
798 	u32 eth_proto_cap;
799 	u32 eth_proto_admin;
800 	u32 advertising;
801 	bool is_up;
802 	int err;
803 
804 	speed = cmd->base.speed;
805 
806 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
807 						cmd->link_modes.advertising);
808 
809 	eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
810 		mlxsw_sx_to_ptys_advert_link(advertising) :
811 		mlxsw_sx_to_ptys_speed(speed);
812 
813 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
814 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
815 	if (err) {
816 		netdev_err(dev, "Failed to get proto");
817 		return err;
818 	}
819 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
820 				  NULL);
821 
822 	eth_proto_new = eth_proto_new & eth_proto_cap;
823 	if (!eth_proto_new) {
824 		netdev_err(dev, "Not supported proto admin requested");
825 		return -EINVAL;
826 	}
827 	if (eth_proto_new == eth_proto_admin)
828 		return 0;
829 
830 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
831 				eth_proto_new, true);
832 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
833 	if (err) {
834 		netdev_err(dev, "Failed to set proto admin");
835 		return err;
836 	}
837 
838 	err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
839 	if (err) {
840 		netdev_err(dev, "Failed to get oper status");
841 		return err;
842 	}
843 	if (!is_up)
844 		return 0;
845 
846 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
847 	if (err) {
848 		netdev_err(dev, "Failed to set admin status");
849 		return err;
850 	}
851 
852 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
853 	if (err) {
854 		netdev_err(dev, "Failed to set admin status");
855 		return err;
856 	}
857 
858 	return 0;
859 }
860 
861 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
862 	.get_drvinfo		= mlxsw_sx_port_get_drvinfo,
863 	.get_link		= ethtool_op_get_link,
864 	.get_strings		= mlxsw_sx_port_get_strings,
865 	.get_ethtool_stats	= mlxsw_sx_port_get_stats,
866 	.get_sset_count		= mlxsw_sx_port_get_sset_count,
867 	.get_link_ksettings	= mlxsw_sx_port_get_link_ksettings,
868 	.set_link_ksettings	= mlxsw_sx_port_set_link_ksettings,
869 };
870 
mlxsw_sx_hw_id_get(struct mlxsw_sx * mlxsw_sx)871 static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
872 {
873 	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
874 	int err;
875 
876 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
877 	if (err)
878 		return err;
879 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
880 	return 0;
881 }
882 
mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port * mlxsw_sx_port)883 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
884 {
885 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
886 	struct net_device *dev = mlxsw_sx_port->dev;
887 	char ppad_pl[MLXSW_REG_PPAD_LEN];
888 	int err;
889 
890 	mlxsw_reg_ppad_pack(ppad_pl, false, 0);
891 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
892 	if (err)
893 		return err;
894 	mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
895 	/* The last byte value in base mac address is guaranteed
896 	 * to be such it does not overflow when adding local_port
897 	 * value.
898 	 */
899 	dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
900 	return 0;
901 }
902 
mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 vid,enum mlxsw_reg_spms_state state)903 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
904 				       u16 vid, enum mlxsw_reg_spms_state state)
905 {
906 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
907 	char *spms_pl;
908 	int err;
909 
910 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
911 	if (!spms_pl)
912 		return -ENOMEM;
913 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
914 	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
915 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
916 	kfree(spms_pl);
917 	return err;
918 }
919 
mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 speed,u16 width)920 static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
921 				      u16 speed, u16 width)
922 {
923 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
924 	char ptys_pl[MLXSW_REG_PTYS_LEN];
925 
926 	mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
927 			       width);
928 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
929 }
930 
931 static int
mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 width)932 mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
933 {
934 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
935 	u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
936 	char ptys_pl[MLXSW_REG_PTYS_LEN];
937 	u32 eth_proto_admin;
938 
939 	eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
940 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
941 				eth_proto_admin, true);
942 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
943 }
944 
945 static int
mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_spmlr_learn_mode mode)946 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
947 				    enum mlxsw_reg_spmlr_learn_mode mode)
948 {
949 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
950 	char spmlr_pl[MLXSW_REG_SPMLR_LEN];
951 
952 	mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
953 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
954 }
955 
__mlxsw_sx_port_eth_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)956 static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
957 				      u8 module, u8 width)
958 {
959 	struct mlxsw_sx_port *mlxsw_sx_port;
960 	struct net_device *dev;
961 	int err;
962 
963 	dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
964 	if (!dev)
965 		return -ENOMEM;
966 	SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
967 	dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core));
968 	mlxsw_sx_port = netdev_priv(dev);
969 	mlxsw_sx_port->dev = dev;
970 	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
971 	mlxsw_sx_port->local_port = local_port;
972 	mlxsw_sx_port->mapping.module = module;
973 
974 	mlxsw_sx_port->pcpu_stats =
975 		netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
976 	if (!mlxsw_sx_port->pcpu_stats) {
977 		err = -ENOMEM;
978 		goto err_alloc_stats;
979 	}
980 
981 	dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
982 	dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
983 
984 	err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
985 	if (err) {
986 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
987 			mlxsw_sx_port->local_port);
988 		goto err_dev_addr_get;
989 	}
990 
991 	netif_carrier_off(dev);
992 
993 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
994 			 NETIF_F_VLAN_CHALLENGED;
995 
996 	dev->min_mtu = 0;
997 	dev->max_mtu = ETH_MAX_MTU;
998 
999 	/* Each packet needs to have a Tx header (metadata) on top all other
1000 	 * headers.
1001 	 */
1002 	dev->needed_headroom = MLXSW_TXHDR_LEN;
1003 
1004 	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1005 	if (err) {
1006 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1007 			mlxsw_sx_port->local_port);
1008 		goto err_port_system_port_mapping_set;
1009 	}
1010 
1011 	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1012 	if (err) {
1013 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1014 			mlxsw_sx_port->local_port);
1015 		goto err_port_swid_set;
1016 	}
1017 
1018 	err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
1019 	if (err) {
1020 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1021 			mlxsw_sx_port->local_port);
1022 		goto err_port_speed_set;
1023 	}
1024 
1025 	err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
1026 	if (err) {
1027 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1028 			mlxsw_sx_port->local_port);
1029 		goto err_port_mtu_set;
1030 	}
1031 
1032 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1033 	if (err)
1034 		goto err_port_admin_status_set;
1035 
1036 	err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1037 					  MLXSW_PORT_DEFAULT_VID,
1038 					  MLXSW_REG_SPMS_STATE_FORWARDING);
1039 	if (err) {
1040 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1041 			mlxsw_sx_port->local_port);
1042 		goto err_port_stp_state_set;
1043 	}
1044 
1045 	err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1046 						  MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1047 	if (err) {
1048 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1049 			mlxsw_sx_port->local_port);
1050 		goto err_port_mac_learning_mode_set;
1051 	}
1052 
1053 	err = register_netdev(dev);
1054 	if (err) {
1055 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1056 			mlxsw_sx_port->local_port);
1057 		goto err_register_netdev;
1058 	}
1059 
1060 	mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1061 				mlxsw_sx_port, dev);
1062 	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1063 	return 0;
1064 
1065 err_register_netdev:
1066 err_port_mac_learning_mode_set:
1067 err_port_stp_state_set:
1068 err_port_admin_status_set:
1069 err_port_mtu_set:
1070 err_port_speed_set:
1071 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1072 err_port_swid_set:
1073 err_port_system_port_mapping_set:
1074 err_dev_addr_get:
1075 	free_percpu(mlxsw_sx_port->pcpu_stats);
1076 err_alloc_stats:
1077 	free_netdev(dev);
1078 	return err;
1079 }
1080 
mlxsw_sx_port_eth_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1081 static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1082 				    u8 module, u8 width)
1083 {
1084 	int err;
1085 
1086 	err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
1087 				   module + 1, false, 0, false, 0,
1088 				   mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
1089 	if (err) {
1090 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
1091 			local_port);
1092 		return err;
1093 	}
1094 	err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
1095 	if (err)
1096 		goto err_port_create;
1097 
1098 	return 0;
1099 
1100 err_port_create:
1101 	mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1102 	return err;
1103 }
1104 
__mlxsw_sx_port_eth_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1105 static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1106 {
1107 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1108 
1109 	mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1110 	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1111 	mlxsw_sx->ports[local_port] = NULL;
1112 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1113 	free_percpu(mlxsw_sx_port->pcpu_stats);
1114 	free_netdev(mlxsw_sx_port->dev);
1115 }
1116 
mlxsw_sx_port_created(struct mlxsw_sx * mlxsw_sx,u8 local_port)1117 static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1118 {
1119 	return mlxsw_sx->ports[local_port] != NULL;
1120 }
1121 
__mlxsw_sx_port_ib_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1122 static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1123 				     u8 module, u8 width)
1124 {
1125 	struct mlxsw_sx_port *mlxsw_sx_port;
1126 	int err;
1127 
1128 	mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
1129 	if (!mlxsw_sx_port)
1130 		return -ENOMEM;
1131 	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1132 	mlxsw_sx_port->local_port = local_port;
1133 	mlxsw_sx_port->mapping.module = module;
1134 
1135 	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1136 	if (err) {
1137 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1138 			mlxsw_sx_port->local_port);
1139 		goto err_port_system_port_mapping_set;
1140 	}
1141 
1142 	/* Adding port to Infiniband swid (1) */
1143 	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
1144 	if (err) {
1145 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1146 			mlxsw_sx_port->local_port);
1147 		goto err_port_swid_set;
1148 	}
1149 
1150 	/* Expose the IB port number as it's front panel name */
1151 	err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
1152 	if (err) {
1153 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
1154 			mlxsw_sx_port->local_port);
1155 		goto err_port_ib_set;
1156 	}
1157 
1158 	/* Supports all speeds from SDR to FDR (bitmask) and support bus width
1159 	 * of 1x, 2x and 4x (3 bits bitmask)
1160 	 */
1161 	err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
1162 					 MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
1163 					 BIT(3) - 1);
1164 	if (err) {
1165 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1166 			mlxsw_sx_port->local_port);
1167 		goto err_port_speed_set;
1168 	}
1169 
1170 	/* Change to the maximum MTU the device supports, the SMA will take
1171 	 * care of the active MTU
1172 	 */
1173 	err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
1174 	if (err) {
1175 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1176 			mlxsw_sx_port->local_port);
1177 		goto err_port_mtu_set;
1178 	}
1179 
1180 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
1181 	if (err) {
1182 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
1183 			mlxsw_sx_port->local_port);
1184 		goto err_port_admin_set;
1185 	}
1186 
1187 	mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1188 			       mlxsw_sx_port);
1189 	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1190 	return 0;
1191 
1192 err_port_admin_set:
1193 err_port_mtu_set:
1194 err_port_speed_set:
1195 err_port_ib_set:
1196 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1197 err_port_swid_set:
1198 err_port_system_port_mapping_set:
1199 	kfree(mlxsw_sx_port);
1200 	return err;
1201 }
1202 
__mlxsw_sx_port_ib_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1203 static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1204 {
1205 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1206 
1207 	mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1208 	mlxsw_sx->ports[local_port] = NULL;
1209 	mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1210 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1211 	kfree(mlxsw_sx_port);
1212 }
1213 
__mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1214 static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1215 {
1216 	enum devlink_port_type port_type =
1217 		mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1218 
1219 	if (port_type == DEVLINK_PORT_TYPE_ETH)
1220 		__mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
1221 	else if (port_type == DEVLINK_PORT_TYPE_IB)
1222 		__mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
1223 }
1224 
mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1225 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1226 {
1227 	__mlxsw_sx_port_remove(mlxsw_sx, local_port);
1228 	mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1229 }
1230 
mlxsw_sx_ports_remove(struct mlxsw_sx * mlxsw_sx)1231 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1232 {
1233 	int i;
1234 
1235 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
1236 		if (mlxsw_sx_port_created(mlxsw_sx, i))
1237 			mlxsw_sx_port_remove(mlxsw_sx, i);
1238 	kfree(mlxsw_sx->ports);
1239 	mlxsw_sx->ports = NULL;
1240 }
1241 
mlxsw_sx_ports_create(struct mlxsw_sx * mlxsw_sx)1242 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1243 {
1244 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
1245 	size_t alloc_size;
1246 	u8 module, width;
1247 	int i;
1248 	int err;
1249 
1250 	alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
1251 	mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1252 	if (!mlxsw_sx->ports)
1253 		return -ENOMEM;
1254 
1255 	for (i = 1; i < max_ports; i++) {
1256 		err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
1257 						    &width);
1258 		if (err)
1259 			goto err_port_module_info_get;
1260 		if (!width)
1261 			continue;
1262 		err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
1263 		if (err)
1264 			goto err_port_create;
1265 	}
1266 	return 0;
1267 
1268 err_port_create:
1269 err_port_module_info_get:
1270 	for (i--; i >= 1; i--)
1271 		if (mlxsw_sx_port_created(mlxsw_sx, i))
1272 			mlxsw_sx_port_remove(mlxsw_sx, i);
1273 	kfree(mlxsw_sx->ports);
1274 	mlxsw_sx->ports = NULL;
1275 	return err;
1276 }
1277 
mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_pude_oper_status status)1278 static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1279 					 enum mlxsw_reg_pude_oper_status status)
1280 {
1281 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
1282 		netdev_info(mlxsw_sx_port->dev, "link up\n");
1283 		netif_carrier_on(mlxsw_sx_port->dev);
1284 	} else {
1285 		netdev_info(mlxsw_sx_port->dev, "link down\n");
1286 		netif_carrier_off(mlxsw_sx_port->dev);
1287 	}
1288 }
1289 
mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_pude_oper_status status)1290 static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1291 					enum mlxsw_reg_pude_oper_status status)
1292 {
1293 	if (status == MLXSW_PORT_OPER_STATUS_UP)
1294 		pr_info("ib link for port %d - up\n",
1295 			mlxsw_sx_port->mapping.module + 1);
1296 	else
1297 		pr_info("ib link for port %d - down\n",
1298 			mlxsw_sx_port->mapping.module + 1);
1299 }
1300 
mlxsw_sx_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)1301 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1302 				     char *pude_pl, void *priv)
1303 {
1304 	struct mlxsw_sx *mlxsw_sx = priv;
1305 	struct mlxsw_sx_port *mlxsw_sx_port;
1306 	enum mlxsw_reg_pude_oper_status status;
1307 	enum devlink_port_type port_type;
1308 	u8 local_port;
1309 
1310 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1311 	mlxsw_sx_port = mlxsw_sx->ports[local_port];
1312 	if (!mlxsw_sx_port) {
1313 		dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1314 			 local_port);
1315 		return;
1316 	}
1317 
1318 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
1319 	port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1320 	if (port_type == DEVLINK_PORT_TYPE_ETH)
1321 		mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
1322 	else if (port_type == DEVLINK_PORT_TYPE_IB)
1323 		mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
1324 }
1325 
mlxsw_sx_rx_listener_func(struct sk_buff * skb,u8 local_port,void * priv)1326 static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1327 				      void *priv)
1328 {
1329 	struct mlxsw_sx *mlxsw_sx = priv;
1330 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1331 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1332 
1333 	if (unlikely(!mlxsw_sx_port)) {
1334 		dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1335 				     local_port);
1336 		return;
1337 	}
1338 
1339 	skb->dev = mlxsw_sx_port->dev;
1340 
1341 	pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1342 	u64_stats_update_begin(&pcpu_stats->syncp);
1343 	pcpu_stats->rx_packets++;
1344 	pcpu_stats->rx_bytes += skb->len;
1345 	u64_stats_update_end(&pcpu_stats->syncp);
1346 
1347 	skb->protocol = eth_type_trans(skb, skb->dev);
1348 	netif_receive_skb(skb);
1349 }
1350 
mlxsw_sx_port_type_set(struct mlxsw_core * mlxsw_core,u8 local_port,enum devlink_port_type new_type)1351 static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1352 				  enum devlink_port_type new_type)
1353 {
1354 	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1355 	u8 module, width;
1356 	int err;
1357 
1358 	if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
1359 		dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
1360 			local_port);
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (new_type == DEVLINK_PORT_TYPE_AUTO)
1365 		return -EOPNOTSUPP;
1366 
1367 	__mlxsw_sx_port_remove(mlxsw_sx, local_port);
1368 	err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
1369 					    &width);
1370 	if (err)
1371 		goto err_port_module_info_get;
1372 
1373 	if (new_type == DEVLINK_PORT_TYPE_ETH)
1374 		err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
1375 						 width);
1376 	else if (new_type == DEVLINK_PORT_TYPE_IB)
1377 		err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
1378 						width);
1379 
1380 err_port_module_info_get:
1381 	return err;
1382 }
1383 
1384 enum {
1385 	MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX = 1,
1386 	MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL = 2,
1387 };
1388 
1389 #define MLXSW_SX_RXL(_trap_id) \
1390 	MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU,	\
1391 		  false, SX2_RX, FORWARD)
1392 
1393 static const struct mlxsw_listener mlxsw_sx_listener[] = {
1394 	MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
1395 	MLXSW_SX_RXL(FDB_MC),
1396 	MLXSW_SX_RXL(STP),
1397 	MLXSW_SX_RXL(LACP),
1398 	MLXSW_SX_RXL(EAPOL),
1399 	MLXSW_SX_RXL(LLDP),
1400 	MLXSW_SX_RXL(MMRP),
1401 	MLXSW_SX_RXL(MVRP),
1402 	MLXSW_SX_RXL(RPVST),
1403 	MLXSW_SX_RXL(DHCP),
1404 	MLXSW_SX_RXL(IGMP_QUERY),
1405 	MLXSW_SX_RXL(IGMP_V1_REPORT),
1406 	MLXSW_SX_RXL(IGMP_V2_REPORT),
1407 	MLXSW_SX_RXL(IGMP_V2_LEAVE),
1408 	MLXSW_SX_RXL(IGMP_V3_REPORT),
1409 };
1410 
mlxsw_sx_traps_init(struct mlxsw_sx * mlxsw_sx)1411 static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1412 {
1413 	char htgt_pl[MLXSW_REG_HTGT_LEN];
1414 	int i;
1415 	int err;
1416 
1417 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
1418 			    MLXSW_REG_HTGT_INVALID_POLICER,
1419 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1420 			    MLXSW_REG_HTGT_DEFAULT_TC);
1421 	mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1422 					  MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
1423 
1424 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1425 	if (err)
1426 		return err;
1427 
1428 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
1429 			    MLXSW_REG_HTGT_INVALID_POLICER,
1430 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1431 			    MLXSW_REG_HTGT_DEFAULT_TC);
1432 	mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1433 					MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
1434 
1435 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1436 	if (err)
1437 		return err;
1438 
1439 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1440 		err = mlxsw_core_trap_register(mlxsw_sx->core,
1441 					       &mlxsw_sx_listener[i],
1442 					       mlxsw_sx);
1443 		if (err)
1444 			goto err_listener_register;
1445 
1446 	}
1447 	return 0;
1448 
1449 err_listener_register:
1450 	for (i--; i >= 0; i--) {
1451 		mlxsw_core_trap_unregister(mlxsw_sx->core,
1452 					   &mlxsw_sx_listener[i],
1453 					   mlxsw_sx);
1454 	}
1455 	return err;
1456 }
1457 
mlxsw_sx_traps_fini(struct mlxsw_sx * mlxsw_sx)1458 static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1459 {
1460 	int i;
1461 
1462 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1463 		mlxsw_core_trap_unregister(mlxsw_sx->core,
1464 					   &mlxsw_sx_listener[i],
1465 					   mlxsw_sx);
1466 	}
1467 }
1468 
mlxsw_sx_flood_init(struct mlxsw_sx * mlxsw_sx)1469 static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1470 {
1471 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
1472 	char sgcr_pl[MLXSW_REG_SGCR_LEN];
1473 	char *sftr_pl;
1474 	int err;
1475 
1476 	/* Configure a flooding table, which includes only CPU port. */
1477 	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1478 	if (!sftr_pl)
1479 		return -ENOMEM;
1480 	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1481 			    MLXSW_PORT_CPU_PORT, true);
1482 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1483 	kfree(sftr_pl);
1484 	if (err)
1485 		return err;
1486 
1487 	/* Flood different packet types using the flooding table. */
1488 	mlxsw_reg_sfgc_pack(sfgc_pl,
1489 			    MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1490 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1491 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1492 			    0);
1493 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1494 	if (err)
1495 		return err;
1496 
1497 	mlxsw_reg_sfgc_pack(sfgc_pl,
1498 			    MLXSW_REG_SFGC_TYPE_BROADCAST,
1499 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1500 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1501 			    0);
1502 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1503 	if (err)
1504 		return err;
1505 
1506 	mlxsw_reg_sfgc_pack(sfgc_pl,
1507 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1508 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1509 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1510 			    0);
1511 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1512 	if (err)
1513 		return err;
1514 
1515 	mlxsw_reg_sfgc_pack(sfgc_pl,
1516 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1517 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1518 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1519 			    0);
1520 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1521 	if (err)
1522 		return err;
1523 
1524 	mlxsw_reg_sfgc_pack(sfgc_pl,
1525 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1526 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1527 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1528 			    0);
1529 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1530 	if (err)
1531 		return err;
1532 
1533 	mlxsw_reg_sgcr_pack(sgcr_pl, true);
1534 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1535 }
1536 
mlxsw_sx_basic_trap_groups_set(struct mlxsw_core * mlxsw_core)1537 static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
1538 {
1539 	char htgt_pl[MLXSW_REG_HTGT_LEN];
1540 
1541 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
1542 			    MLXSW_REG_HTGT_INVALID_POLICER,
1543 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1544 			    MLXSW_REG_HTGT_DEFAULT_TC);
1545 	mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
1546 	mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1547 					MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
1548 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
1549 }
1550 
mlxsw_sx_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)1551 static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
1552 			 const struct mlxsw_bus_info *mlxsw_bus_info,
1553 			 struct netlink_ext_ack *extack)
1554 {
1555 	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1556 	int err;
1557 
1558 	mlxsw_sx->core = mlxsw_core;
1559 	mlxsw_sx->bus_info = mlxsw_bus_info;
1560 
1561 	err = mlxsw_sx_hw_id_get(mlxsw_sx);
1562 	if (err) {
1563 		dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1564 		return err;
1565 	}
1566 
1567 	err = mlxsw_sx_ports_create(mlxsw_sx);
1568 	if (err) {
1569 		dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1570 		return err;
1571 	}
1572 
1573 	err = mlxsw_sx_traps_init(mlxsw_sx);
1574 	if (err) {
1575 		dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
1576 		goto err_listener_register;
1577 	}
1578 
1579 	err = mlxsw_sx_flood_init(mlxsw_sx);
1580 	if (err) {
1581 		dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1582 		goto err_flood_init;
1583 	}
1584 
1585 	return 0;
1586 
1587 err_flood_init:
1588 	mlxsw_sx_traps_fini(mlxsw_sx);
1589 err_listener_register:
1590 	mlxsw_sx_ports_remove(mlxsw_sx);
1591 	return err;
1592 }
1593 
mlxsw_sx_fini(struct mlxsw_core * mlxsw_core)1594 static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
1595 {
1596 	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1597 
1598 	mlxsw_sx_traps_fini(mlxsw_sx);
1599 	mlxsw_sx_ports_remove(mlxsw_sx);
1600 }
1601 
1602 static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
1603 	.used_max_vepa_channels		= 1,
1604 	.max_vepa_channels		= 0,
1605 	.used_max_mid			= 1,
1606 	.max_mid			= 7000,
1607 	.used_max_pgt			= 1,
1608 	.max_pgt			= 0,
1609 	.used_max_system_port		= 1,
1610 	.max_system_port		= 48000,
1611 	.used_max_vlan_groups		= 1,
1612 	.max_vlan_groups		= 127,
1613 	.used_max_regions		= 1,
1614 	.max_regions			= 400,
1615 	.used_flood_tables		= 1,
1616 	.max_flood_tables		= 2,
1617 	.max_vid_flood_tables		= 1,
1618 	.used_flood_mode		= 1,
1619 	.flood_mode			= 3,
1620 	.used_max_ib_mc			= 1,
1621 	.max_ib_mc			= 6,
1622 	.used_max_pkey			= 1,
1623 	.max_pkey			= 0,
1624 	.swid_config			= {
1625 		{
1626 			.used_type	= 1,
1627 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
1628 		},
1629 		{
1630 			.used_type	= 1,
1631 			.type		= MLXSW_PORT_SWID_TYPE_IB,
1632 		}
1633 	},
1634 };
1635 
1636 static struct mlxsw_driver mlxsw_sx_driver = {
1637 	.kind			= mlxsw_sx_driver_name,
1638 	.priv_size		= sizeof(struct mlxsw_sx),
1639 	.init			= mlxsw_sx_init,
1640 	.fini			= mlxsw_sx_fini,
1641 	.basic_trap_groups_set	= mlxsw_sx_basic_trap_groups_set,
1642 	.txhdr_construct	= mlxsw_sx_txhdr_construct,
1643 	.txhdr_len		= MLXSW_TXHDR_LEN,
1644 	.profile		= &mlxsw_sx_config_profile,
1645 	.port_type_set		= mlxsw_sx_port_type_set,
1646 };
1647 
1648 static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
1649 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
1650 	{0, },
1651 };
1652 
1653 static struct pci_driver mlxsw_sx_pci_driver = {
1654 	.name = mlxsw_sx_driver_name,
1655 	.id_table = mlxsw_sx_pci_id_table,
1656 };
1657 
mlxsw_sx_module_init(void)1658 static int __init mlxsw_sx_module_init(void)
1659 {
1660 	int err;
1661 
1662 	err = mlxsw_core_driver_register(&mlxsw_sx_driver);
1663 	if (err)
1664 		return err;
1665 
1666 	err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
1667 	if (err)
1668 		goto err_pci_driver_register;
1669 
1670 	return 0;
1671 
1672 err_pci_driver_register:
1673 	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1674 	return err;
1675 }
1676 
mlxsw_sx_module_exit(void)1677 static void __exit mlxsw_sx_module_exit(void)
1678 {
1679 	mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
1680 	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1681 }
1682 
1683 module_init(mlxsw_sx_module_init);
1684 module_exit(mlxsw_sx_module_exit);
1685 
1686 MODULE_LICENSE("Dual BSD/GPL");
1687 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1688 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1689 MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);
1690