xref: /linux/drivers/net/ethernet/freescale/enetc/enetc4_pf.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2024 NXP */
3 
4 #include <linux/clk.h>
5 #include <linux/module.h>
6 #include <linux/of_net.h>
7 #include <linux/of_platform.h>
8 #include <linux/unaligned.h>
9 
10 #include "enetc_pf_common.h"
11 #include "enetc4_debugfs.h"
12 
13 #define ENETC_SI_MAX_RING_NUM	8
14 
15 #define ENETC_MAC_FILTER_TYPE_UC	BIT(0)
16 #define ENETC_MAC_FILTER_TYPE_MC	BIT(1)
17 #define ENETC_MAC_FILTER_TYPE_ALL	(ENETC_MAC_FILTER_TYPE_UC | \
18 					 ENETC_MAC_FILTER_TYPE_MC)
19 
20 struct enetc_mac_addr {
21 	u8 addr[ETH_ALEN];
22 };
23 
enetc4_get_port_caps(struct enetc_pf * pf)24 static void enetc4_get_port_caps(struct enetc_pf *pf)
25 {
26 	struct enetc_hw *hw = &pf->si->hw;
27 	u32 val;
28 
29 	val = enetc_port_rd(hw, ENETC4_ECAPR1);
30 	pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
31 	pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
32 
33 	val = enetc_port_rd(hw, ENETC4_ECAPR2);
34 	pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
35 	pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
36 
37 	val = enetc_port_rd(hw, ENETC4_PMCAPR);
38 	pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
39 
40 	val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
41 	pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
42 }
43 
enetc4_pf_set_si_primary_mac(struct enetc_hw * hw,int si,const u8 * addr)44 static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
45 					 const u8 *addr)
46 {
47 	u16 lower = get_unaligned_le16(addr + 4);
48 	u32 upper = get_unaligned_le32(addr);
49 
50 	if (si != 0) {
51 		__raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
52 		__raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
53 	} else {
54 		__raw_writel(upper, hw->port + ENETC4_PMAR0);
55 		__raw_writew(lower, hw->port + ENETC4_PMAR1);
56 	}
57 }
58 
enetc4_pf_get_si_primary_mac(struct enetc_hw * hw,int si,u8 * addr)59 static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
60 					 u8 *addr)
61 {
62 	u32 upper;
63 	u16 lower;
64 
65 	upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
66 	lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
67 
68 	put_unaligned_le32(upper, addr);
69 	put_unaligned_le16(lower, addr + 4);
70 }
71 
enetc4_pf_set_si_mac_promisc(struct enetc_hw * hw,int si,bool uc_promisc,bool mc_promisc)72 static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
73 					 bool uc_promisc, bool mc_promisc)
74 {
75 	u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
76 
77 	if (uc_promisc)
78 		val |= PSIPMMR_SI_MAC_UP(si);
79 	else
80 		val &= ~PSIPMMR_SI_MAC_UP(si);
81 
82 	if (mc_promisc)
83 		val |= PSIPMMR_SI_MAC_MP(si);
84 	else
85 		val &= ~PSIPMMR_SI_MAC_MP(si);
86 
87 	enetc_port_wr(hw, ENETC4_PSIPMMR, val);
88 }
89 
enetc4_pf_set_si_uc_hash_filter(struct enetc_hw * hw,int si,u64 hash)90 static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
91 					    u64 hash)
92 {
93 	enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
94 	enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
95 }
96 
enetc4_pf_set_si_mc_hash_filter(struct enetc_hw * hw,int si,u64 hash)97 static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
98 					    u64 hash)
99 {
100 	enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
101 	enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
102 }
103 
enetc4_pf_set_loopback(struct net_device * ndev,bool en)104 static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
105 {
106 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
107 	struct enetc_si *si = priv->si;
108 	u32 val;
109 
110 	val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
111 	val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
112 	/* Default to select MAC level loopback mode if loopback is enabled. */
113 	val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
114 			       PM_CMD_CFG_LPBK_MODE);
115 
116 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
117 }
118 
enetc4_pf_clear_maft_entries(struct enetc_pf * pf)119 static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
120 {
121 	int i;
122 
123 	for (i = 0; i < pf->num_mfe; i++)
124 		ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
125 
126 	pf->num_mfe = 0;
127 }
128 
enetc4_pf_add_maft_entries(struct enetc_pf * pf,struct enetc_mac_addr * mac,int mac_cnt)129 static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
130 				      struct enetc_mac_addr *mac,
131 				      int mac_cnt)
132 {
133 	struct maft_entry_data maft = {};
134 	u16 si_bit = BIT(0);
135 	int i, err;
136 
137 	maft.cfge.si_bitmap = cpu_to_le16(si_bit);
138 	for (i = 0; i < mac_cnt; i++) {
139 		ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
140 		err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
141 		if (unlikely(err)) {
142 			pf->num_mfe = i;
143 			goto clear_maft_entries;
144 		}
145 	}
146 
147 	pf->num_mfe = mac_cnt;
148 
149 	return 0;
150 
151 clear_maft_entries:
152 	enetc4_pf_clear_maft_entries(pf);
153 
154 	return  err;
155 }
156 
enetc4_pf_set_uc_exact_filter(struct enetc_pf * pf)157 static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
158 {
159 	int max_num_mfe = pf->caps.mac_filter_num;
160 	struct enetc_mac_filter mac_filter = {};
161 	struct net_device *ndev = pf->si->ndev;
162 	struct enetc_hw *hw = &pf->si->hw;
163 	struct enetc_mac_addr *mac_tbl;
164 	struct netdev_hw_addr *ha;
165 	int i = 0, err;
166 	int mac_cnt;
167 
168 	netif_addr_lock_bh(ndev);
169 
170 	mac_cnt = netdev_uc_count(ndev);
171 	if (!mac_cnt) {
172 		netif_addr_unlock_bh(ndev);
173 		/* clear both MAC hash and exact filters */
174 		enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
175 		enetc4_pf_clear_maft_entries(pf);
176 
177 		return 0;
178 	}
179 
180 	if (mac_cnt > max_num_mfe) {
181 		err = -ENOSPC;
182 		goto unlock_netif_addr;
183 	}
184 
185 	mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
186 	if (!mac_tbl) {
187 		err = -ENOMEM;
188 		goto unlock_netif_addr;
189 	}
190 
191 	netdev_for_each_uc_addr(ha, ndev) {
192 		enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
193 		ether_addr_copy(mac_tbl[i++].addr, ha->addr);
194 	}
195 
196 	netif_addr_unlock_bh(ndev);
197 
198 	/* Set temporary unicast hash filters in case of Rx loss when
199 	 * updating MAC address filter table
200 	 */
201 	enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
202 	enetc4_pf_clear_maft_entries(pf);
203 
204 	if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
205 		enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
206 
207 	kfree(mac_tbl);
208 
209 	return 0;
210 
211 unlock_netif_addr:
212 	netif_addr_unlock_bh(ndev);
213 
214 	return err;
215 }
216 
enetc4_pf_set_mac_hash_filter(struct enetc_pf * pf,int type)217 static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
218 {
219 	struct net_device *ndev = pf->si->ndev;
220 	struct enetc_mac_filter *mac_filter;
221 	struct enetc_hw *hw = &pf->si->hw;
222 	struct netdev_hw_addr *ha;
223 
224 	netif_addr_lock_bh(ndev);
225 	if (type & ENETC_MAC_FILTER_TYPE_UC) {
226 		mac_filter = &pf->mac_filter[UC];
227 		enetc_reset_mac_addr_filter(mac_filter);
228 		netdev_for_each_uc_addr(ha, ndev)
229 			enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
230 
231 		enetc4_pf_set_si_uc_hash_filter(hw, 0,
232 						*mac_filter->mac_hash_table);
233 	}
234 
235 	if (type & ENETC_MAC_FILTER_TYPE_MC) {
236 		mac_filter = &pf->mac_filter[MC];
237 		enetc_reset_mac_addr_filter(mac_filter);
238 		netdev_for_each_mc_addr(ha, ndev)
239 			enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
240 
241 		enetc4_pf_set_si_mc_hash_filter(hw, 0,
242 						*mac_filter->mac_hash_table);
243 	}
244 	netif_addr_unlock_bh(ndev);
245 }
246 
enetc4_pf_set_mac_filter(struct enetc_pf * pf,int type)247 static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
248 {
249 	/* Currently, the MAC address filter table (MAFT) only has 4 entries,
250 	 * and multiple multicast addresses for filtering will be configured
251 	 * in the default network configuration, so MAFT is only suitable for
252 	 * unicast filtering. If the number of unicast addresses exceeds the
253 	 * table capacity, the MAC hash filter will be used.
254 	 */
255 	if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
256 		/* Fall back to the MAC hash filter */
257 		enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
258 		/* Clear the old MAC exact filter */
259 		enetc4_pf_clear_maft_entries(pf);
260 	}
261 
262 	if (type & ENETC_MAC_FILTER_TYPE_MC)
263 		enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
264 }
265 
266 static const struct enetc_pf_ops enetc4_pf_ops = {
267 	.set_si_primary_mac = enetc4_pf_set_si_primary_mac,
268 	.get_si_primary_mac = enetc4_pf_get_si_primary_mac,
269 };
270 
enetc4_pf_struct_init(struct enetc_si * si)271 static int enetc4_pf_struct_init(struct enetc_si *si)
272 {
273 	struct enetc_pf *pf = enetc_si_priv(si);
274 
275 	pf->si = si;
276 	pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
277 	pf->ops = &enetc4_pf_ops;
278 
279 	enetc4_get_port_caps(pf);
280 
281 	return 0;
282 }
283 
enetc4_psicfgr0_val_construct(bool is_vf,u32 num_tx_bdr,u32 num_rx_bdr)284 static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
285 {
286 	u32 val;
287 
288 	val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
289 	val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
290 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
291 
292 	if (is_vf)
293 		val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
294 
295 	return val;
296 }
297 
enetc4_default_rings_allocation(struct enetc_pf * pf)298 static void enetc4_default_rings_allocation(struct enetc_pf *pf)
299 {
300 	struct enetc_hw *hw = &pf->si->hw;
301 	u32 num_rx_bdr, num_tx_bdr, val;
302 	u32 vf_tx_bdr, vf_rx_bdr;
303 	int i, rx_rem, tx_rem;
304 
305 	if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
306 		num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
307 	else
308 		num_rx_bdr = ENETC_SI_MAX_RING_NUM;
309 
310 	if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
311 		num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
312 	else
313 		num_tx_bdr = ENETC_SI_MAX_RING_NUM;
314 
315 	val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
316 	enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
317 
318 	num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
319 	rx_rem = num_rx_bdr % pf->caps.num_vsi;
320 	num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
321 
322 	num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
323 	tx_rem = num_tx_bdr % pf->caps.num_vsi;
324 	num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
325 
326 	for (i = 0; i < pf->caps.num_vsi; i++) {
327 		vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
328 		vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
329 		val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
330 		enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
331 	}
332 }
333 
enetc4_allocate_si_rings(struct enetc_pf * pf)334 static void enetc4_allocate_si_rings(struct enetc_pf *pf)
335 {
336 	enetc4_default_rings_allocation(pf);
337 }
338 
enetc4_pf_set_si_vlan_promisc(struct enetc_hw * hw,int si,bool en)339 static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
340 {
341 	u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
342 
343 	if (en)
344 		val |= BIT(si);
345 	else
346 		val &= ~BIT(si);
347 
348 	enetc_port_wr(hw, ENETC4_PSIPVMR, val);
349 }
350 
enetc4_set_default_si_vlan_promisc(struct enetc_pf * pf)351 static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
352 {
353 	struct enetc_hw *hw = &pf->si->hw;
354 	int num_si = pf->caps.num_vsi + 1;
355 	int i;
356 
357 	/* enforce VLAN promiscuous mode for all SIs */
358 	for (i = 0; i < num_si; i++)
359 		enetc4_pf_set_si_vlan_promisc(hw, i, true);
360 }
361 
362 /* Allocate the number of MSI-X vectors for per SI. */
enetc4_set_si_msix_num(struct enetc_pf * pf)363 static void enetc4_set_si_msix_num(struct enetc_pf *pf)
364 {
365 	struct enetc_hw *hw = &pf->si->hw;
366 	int i, num_msix, total_si;
367 	u32 val;
368 
369 	total_si = pf->caps.num_vsi + 1;
370 
371 	num_msix = pf->caps.num_msix / total_si +
372 		   pf->caps.num_msix % total_si - 1;
373 	val = num_msix & PSICFGR2_NUM_MSIX;
374 	enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
375 
376 	num_msix = pf->caps.num_msix / total_si - 1;
377 	val = num_msix & PSICFGR2_NUM_MSIX;
378 	for (i = 0; i < pf->caps.num_vsi; i++)
379 		enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
380 }
381 
enetc4_enable_all_si(struct enetc_pf * pf)382 static void enetc4_enable_all_si(struct enetc_pf *pf)
383 {
384 	struct enetc_hw *hw = &pf->si->hw;
385 	int num_si = pf->caps.num_vsi + 1;
386 	u32 si_bitmap = 0;
387 	int i;
388 
389 	/* Master enable for all SIs */
390 	for (i = 0; i < num_si; i++)
391 		si_bitmap |= PMR_SI_EN(i);
392 
393 	enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
394 }
395 
enetc4_configure_port_si(struct enetc_pf * pf)396 static void enetc4_configure_port_si(struct enetc_pf *pf)
397 {
398 	struct enetc_hw *hw = &pf->si->hw;
399 
400 	enetc4_allocate_si_rings(pf);
401 
402 	/* Outer VLAN tag will be used for VLAN filtering */
403 	enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
404 
405 	enetc4_set_default_si_vlan_promisc(pf);
406 
407 	/* Disable SI MAC multicast & unicast promiscuous */
408 	enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
409 
410 	enetc4_set_si_msix_num(pf);
411 
412 	enetc4_enable_all_si(pf);
413 }
414 
enetc4_pf_reset_tc_msdu(struct enetc_hw * hw)415 static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
416 {
417 	u32 val = ENETC_MAC_MAXFRM_SIZE;
418 	int tc;
419 
420 	val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
421 
422 	for (tc = 0; tc < ENETC_NUM_TC; tc++)
423 		enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
424 }
425 
enetc4_set_trx_frame_size(struct enetc_pf * pf)426 static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
427 {
428 	struct enetc_si *si = pf->si;
429 
430 	enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
431 			  ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
432 
433 	enetc4_pf_reset_tc_msdu(&si->hw);
434 }
435 
enetc4_enable_trx(struct enetc_pf * pf)436 static void enetc4_enable_trx(struct enetc_pf *pf)
437 {
438 	struct enetc_hw *hw = &pf->si->hw;
439 
440 	/* Enable port transmit/receive */
441 	enetc_port_wr(hw, ENETC4_POR, 0);
442 }
443 
enetc4_configure_port(struct enetc_pf * pf)444 static void enetc4_configure_port(struct enetc_pf *pf)
445 {
446 	enetc4_configure_port_si(pf);
447 	enetc4_set_trx_frame_size(pf);
448 	enetc_set_default_rss_key(pf);
449 	enetc4_enable_trx(pf);
450 }
451 
enetc4_init_ntmp_user(struct enetc_si * si)452 static int enetc4_init_ntmp_user(struct enetc_si *si)
453 {
454 	struct ntmp_user *user = &si->ntmp_user;
455 
456 	/* For ENETC 4.1, all table versions are 0 */
457 	memset(&user->tbl, 0, sizeof(user->tbl));
458 
459 	return enetc4_setup_cbdr(si);
460 }
461 
enetc4_free_ntmp_user(struct enetc_si * si)462 static void enetc4_free_ntmp_user(struct enetc_si *si)
463 {
464 	enetc4_teardown_cbdr(si);
465 }
466 
enetc4_pf_init(struct enetc_pf * pf)467 static int enetc4_pf_init(struct enetc_pf *pf)
468 {
469 	struct device *dev = &pf->si->pdev->dev;
470 	int err;
471 
472 	/* Initialize the MAC address for PF and VFs */
473 	err = enetc_setup_mac_addresses(dev->of_node, pf);
474 	if (err) {
475 		dev_err(dev, "Failed to set MAC addresses\n");
476 		return err;
477 	}
478 
479 	err = enetc4_init_ntmp_user(pf->si);
480 	if (err) {
481 		dev_err(dev, "Failed to init CBDR\n");
482 		return err;
483 	}
484 
485 	enetc4_configure_port(pf);
486 
487 	return 0;
488 }
489 
enetc4_pf_free(struct enetc_pf * pf)490 static void enetc4_pf_free(struct enetc_pf *pf)
491 {
492 	enetc4_free_ntmp_user(pf->si);
493 }
494 
enetc4_psi_do_set_rx_mode(struct work_struct * work)495 static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
496 {
497 	struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
498 	struct enetc_pf *pf = enetc_si_priv(si);
499 	struct net_device *ndev = si->ndev;
500 	struct enetc_hw *hw = &si->hw;
501 	bool uc_promisc = false;
502 	bool mc_promisc = false;
503 	int type = 0;
504 
505 	rtnl_lock();
506 
507 	if (ndev->flags & IFF_PROMISC) {
508 		uc_promisc = true;
509 		mc_promisc = true;
510 	} else if (ndev->flags & IFF_ALLMULTI) {
511 		mc_promisc = true;
512 		type = ENETC_MAC_FILTER_TYPE_UC;
513 	} else {
514 		type = ENETC_MAC_FILTER_TYPE_ALL;
515 	}
516 
517 	enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
518 
519 	if (uc_promisc) {
520 		enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
521 		enetc4_pf_clear_maft_entries(pf);
522 	}
523 
524 	if (mc_promisc)
525 		enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
526 
527 	/* Set new MAC filter */
528 	enetc4_pf_set_mac_filter(pf, type);
529 
530 	rtnl_unlock();
531 }
532 
enetc4_pf_set_rx_mode(struct net_device * ndev)533 static void enetc4_pf_set_rx_mode(struct net_device *ndev)
534 {
535 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
536 	struct enetc_si *si = priv->si;
537 
538 	queue_work(si->workqueue, &si->rx_mode_task);
539 }
540 
enetc4_pf_set_features(struct net_device * ndev,netdev_features_t features)541 static int enetc4_pf_set_features(struct net_device *ndev,
542 				  netdev_features_t features)
543 {
544 	netdev_features_t changed = ndev->features ^ features;
545 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
546 	struct enetc_hw *hw = &priv->si->hw;
547 
548 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
549 		bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
550 
551 		enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
552 	}
553 
554 	if (changed & NETIF_F_LOOPBACK)
555 		enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
556 
557 	enetc_set_features(ndev, features);
558 
559 	return 0;
560 }
561 
562 static const struct net_device_ops enetc4_ndev_ops = {
563 	.ndo_open		= enetc_open,
564 	.ndo_stop		= enetc_close,
565 	.ndo_start_xmit		= enetc_xmit,
566 	.ndo_get_stats		= enetc_get_stats,
567 	.ndo_set_mac_address	= enetc_pf_set_mac_addr,
568 	.ndo_set_rx_mode	= enetc4_pf_set_rx_mode,
569 	.ndo_set_features	= enetc4_pf_set_features,
570 	.ndo_vlan_rx_add_vid	= enetc_vlan_rx_add_vid,
571 	.ndo_vlan_rx_kill_vid	= enetc_vlan_rx_del_vid,
572 };
573 
574 static struct phylink_pcs *
enetc4_pl_mac_select_pcs(struct phylink_config * config,phy_interface_t iface)575 enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
576 {
577 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
578 
579 	return pf->pcs;
580 }
581 
enetc4_mac_config(struct enetc_pf * pf,unsigned int mode,phy_interface_t phy_mode)582 static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
583 			      phy_interface_t phy_mode)
584 {
585 	struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
586 	struct enetc_si *si = pf->si;
587 	u32 val;
588 
589 	val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
590 	val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
591 
592 	switch (phy_mode) {
593 	case PHY_INTERFACE_MODE_RGMII:
594 	case PHY_INTERFACE_MODE_RGMII_ID:
595 	case PHY_INTERFACE_MODE_RGMII_RXID:
596 	case PHY_INTERFACE_MODE_RGMII_TXID:
597 		val |= IFMODE_RGMII;
598 		/* We need to enable auto-negotiation for the MAC
599 		 * if its RGMII interface support In-Band status.
600 		 */
601 		if (phylink_autoneg_inband(mode))
602 			val |= PM_IF_MODE_ENA;
603 		break;
604 	case PHY_INTERFACE_MODE_RMII:
605 		val |= IFMODE_RMII;
606 		break;
607 	case PHY_INTERFACE_MODE_SGMII:
608 	case PHY_INTERFACE_MODE_2500BASEX:
609 		val |= IFMODE_SGMII;
610 		break;
611 	case PHY_INTERFACE_MODE_10GBASER:
612 	case PHY_INTERFACE_MODE_XGMII:
613 	case PHY_INTERFACE_MODE_USXGMII:
614 		val |= IFMODE_XGMII;
615 		break;
616 	default:
617 		dev_err(priv->dev,
618 			"Unsupported PHY mode:%d\n", phy_mode);
619 		return;
620 	}
621 
622 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
623 }
624 
enetc4_pl_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)625 static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
626 				 const struct phylink_link_state *state)
627 {
628 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
629 
630 	enetc4_mac_config(pf, mode, state->interface);
631 }
632 
enetc4_set_port_speed(struct enetc_ndev_priv * priv,int speed)633 static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
634 {
635 	u32 old_speed = priv->speed;
636 	u32 val;
637 
638 	if (speed == old_speed)
639 		return;
640 
641 	val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
642 	val &= ~PCR_PSPEED;
643 
644 	switch (speed) {
645 	case SPEED_100:
646 	case SPEED_1000:
647 	case SPEED_2500:
648 	case SPEED_10000:
649 		val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
650 		break;
651 	case SPEED_10:
652 	default:
653 		val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
654 	}
655 
656 	priv->speed = speed;
657 	enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
658 }
659 
enetc4_set_rgmii_mac(struct enetc_pf * pf,int speed,int duplex)660 static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
661 {
662 	struct enetc_si *si = pf->si;
663 	u32 old_val, val;
664 
665 	old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
666 	val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
667 
668 	switch (speed) {
669 	case SPEED_1000:
670 		val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
671 		break;
672 	case SPEED_100:
673 		val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
674 		break;
675 	case SPEED_10:
676 		val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
677 	}
678 
679 	val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
680 			       PM_IF_MODE_HD);
681 
682 	if (val == old_val)
683 		return;
684 
685 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
686 }
687 
enetc4_set_rmii_mac(struct enetc_pf * pf,int speed,int duplex)688 static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
689 {
690 	struct enetc_si *si = pf->si;
691 	u32 old_val, val;
692 
693 	old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
694 	val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
695 
696 	switch (speed) {
697 	case SPEED_100:
698 		val &= ~PM_IF_MODE_M10;
699 		break;
700 	case SPEED_10:
701 		val |= PM_IF_MODE_M10;
702 	}
703 
704 	val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
705 			       PM_IF_MODE_HD);
706 
707 	if (val == old_val)
708 		return;
709 
710 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
711 }
712 
enetc4_set_hd_flow_control(struct enetc_pf * pf,bool enable)713 static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
714 {
715 	struct enetc_si *si = pf->si;
716 	u32 old_val, val;
717 
718 	if (!pf->caps.half_duplex)
719 		return;
720 
721 	old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
722 	val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
723 	if (val == old_val)
724 		return;
725 
726 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
727 }
728 
enetc4_set_rx_pause(struct enetc_pf * pf,bool rx_pause)729 static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
730 {
731 	struct enetc_si *si = pf->si;
732 	u32 old_val, val;
733 
734 	old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
735 	val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
736 	if (val == old_val)
737 		return;
738 
739 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
740 }
741 
enetc4_set_tx_pause(struct enetc_pf * pf,int num_rxbdr,bool tx_pause)742 static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
743 {
744 	u32 pause_off_thresh = 0, pause_on_thresh = 0;
745 	u32 init_quanta = 0, refresh_quanta = 0;
746 	struct enetc_hw *hw = &pf->si->hw;
747 	u32 rbmr, old_rbmr;
748 	int i;
749 
750 	for (i = 0; i < num_rxbdr; i++) {
751 		old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
752 		rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
753 		if (rbmr == old_rbmr)
754 			continue;
755 
756 		enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
757 	}
758 
759 	if (tx_pause) {
760 		/* When the port first enters congestion, send a PAUSE request
761 		 * with the maximum number of quanta. When the port exits
762 		 * congestion, it will automatically send a PAUSE frame with
763 		 * zero quanta.
764 		 */
765 		init_quanta = 0xffff;
766 
767 		/* Also, set up the refresh timer to send follow-up PAUSE
768 		 * frames at half the quanta value, in case the congestion
769 		 * condition persists.
770 		 */
771 		refresh_quanta = 0xffff / 2;
772 
773 		/* Start emitting PAUSE frames when 3 large frames (or more
774 		 * smaller frames) have accumulated in the FIFO waiting to be
775 		 * DMAed to the RX ring.
776 		 */
777 		pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
778 		pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
779 	}
780 
781 	enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
782 	enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
783 	enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
784 	enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
785 }
786 
enetc4_enable_mac(struct enetc_pf * pf,bool en)787 static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
788 {
789 	struct enetc_si *si = pf->si;
790 	u32 val;
791 
792 	val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
793 	val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
794 	val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
795 
796 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
797 }
798 
enetc4_pl_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)799 static void enetc4_pl_mac_link_up(struct phylink_config *config,
800 				  struct phy_device *phy, unsigned int mode,
801 				  phy_interface_t interface, int speed,
802 				  int duplex, bool tx_pause, bool rx_pause)
803 {
804 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
805 	struct enetc_si *si = pf->si;
806 	struct enetc_ndev_priv *priv;
807 	bool hd_fc = false;
808 
809 	priv = netdev_priv(si->ndev);
810 	enetc4_set_port_speed(priv, speed);
811 
812 	if (!phylink_autoneg_inband(mode) &&
813 	    phy_interface_mode_is_rgmii(interface))
814 		enetc4_set_rgmii_mac(pf, speed, duplex);
815 
816 	if (interface == PHY_INTERFACE_MODE_RMII)
817 		enetc4_set_rmii_mac(pf, speed, duplex);
818 
819 	if (duplex == DUPLEX_FULL) {
820 		/* When preemption is enabled, generation of PAUSE frames
821 		 * must be disabled, as stated in the IEEE 802.3 standard.
822 		 */
823 		if (priv->active_offloads & ENETC_F_QBU)
824 			tx_pause = false;
825 	} else { /* DUPLEX_HALF */
826 		if (tx_pause || rx_pause)
827 			hd_fc = true;
828 
829 		/* As per 802.3 annex 31B, PAUSE frames are only supported
830 		 * when the link is configured for full duplex operation.
831 		 */
832 		tx_pause = false;
833 		rx_pause = false;
834 	}
835 
836 	enetc4_set_hd_flow_control(pf, hd_fc);
837 	enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
838 	enetc4_set_rx_pause(pf, rx_pause);
839 	enetc4_enable_mac(pf, true);
840 }
841 
enetc4_pl_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)842 static void enetc4_pl_mac_link_down(struct phylink_config *config,
843 				    unsigned int mode,
844 				    phy_interface_t interface)
845 {
846 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
847 
848 	enetc4_enable_mac(pf, false);
849 }
850 
851 static const struct phylink_mac_ops enetc_pl_mac_ops = {
852 	.mac_select_pcs = enetc4_pl_mac_select_pcs,
853 	.mac_config = enetc4_pl_mac_config,
854 	.mac_link_up = enetc4_pl_mac_link_up,
855 	.mac_link_down = enetc4_pl_mac_link_down,
856 };
857 
enetc4_pci_remove(void * data)858 static void enetc4_pci_remove(void *data)
859 {
860 	struct pci_dev *pdev = data;
861 
862 	enetc_pci_remove(pdev);
863 }
864 
enetc4_link_init(struct enetc_ndev_priv * priv,struct device_node * node)865 static int enetc4_link_init(struct enetc_ndev_priv *priv,
866 			    struct device_node *node)
867 {
868 	struct enetc_pf *pf = enetc_si_priv(priv->si);
869 	struct device *dev = priv->dev;
870 	int err;
871 
872 	err = of_get_phy_mode(node, &pf->if_mode);
873 	if (err) {
874 		dev_err(dev, "Failed to get PHY mode\n");
875 		return err;
876 	}
877 
878 	err = enetc_mdiobus_create(pf, node);
879 	if (err) {
880 		dev_err(dev, "Failed to create MDIO bus\n");
881 		return err;
882 	}
883 
884 	err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
885 	if (err) {
886 		dev_err(dev, "Failed to create phylink\n");
887 		goto err_phylink_create;
888 	}
889 
890 	return 0;
891 
892 err_phylink_create:
893 	enetc_mdiobus_destroy(pf);
894 
895 	return err;
896 }
897 
enetc4_link_deinit(struct enetc_ndev_priv * priv)898 static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
899 {
900 	struct enetc_pf *pf = enetc_si_priv(priv->si);
901 
902 	enetc_phylink_destroy(priv);
903 	enetc_mdiobus_destroy(pf);
904 }
905 
enetc4_psi_wq_task_init(struct enetc_si * si)906 static int enetc4_psi_wq_task_init(struct enetc_si *si)
907 {
908 	char wq_name[24];
909 
910 	INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
911 	snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
912 	si->workqueue = create_singlethread_workqueue(wq_name);
913 	if (!si->workqueue)
914 		return -ENOMEM;
915 
916 	return 0;
917 }
918 
enetc4_pf_netdev_create(struct enetc_si * si)919 static int enetc4_pf_netdev_create(struct enetc_si *si)
920 {
921 	struct device *dev = &si->pdev->dev;
922 	struct enetc_ndev_priv *priv;
923 	struct net_device *ndev;
924 	int err;
925 
926 	ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
927 				  si->num_tx_rings, si->num_rx_rings);
928 	if (!ndev)
929 		return  -ENOMEM;
930 
931 	priv = netdev_priv(ndev);
932 	priv->ref_clk = devm_clk_get_optional(dev, "ref");
933 	if (IS_ERR(priv->ref_clk)) {
934 		dev_err(dev, "Get reference clock failed\n");
935 		err = PTR_ERR(priv->ref_clk);
936 		goto err_clk_get;
937 	}
938 
939 	enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
940 
941 	enetc_init_si_rings_params(priv);
942 
943 	err = enetc_configure_si(priv);
944 	if (err) {
945 		dev_err(dev, "Failed to configure SI\n");
946 		goto err_config_si;
947 	}
948 
949 	err = enetc_alloc_msix(priv);
950 	if (err) {
951 		dev_err(dev, "Failed to alloc MSI-X\n");
952 		goto err_alloc_msix;
953 	}
954 
955 	err = enetc4_link_init(priv, dev->of_node);
956 	if (err)
957 		goto err_link_init;
958 
959 	err = enetc4_psi_wq_task_init(si);
960 	if (err) {
961 		dev_err(dev, "Failed to init workqueue\n");
962 		goto err_wq_init;
963 	}
964 
965 	err = register_netdev(ndev);
966 	if (err) {
967 		dev_err(dev, "Failed to register netdev\n");
968 		goto err_reg_netdev;
969 	}
970 
971 	return 0;
972 
973 err_reg_netdev:
974 	destroy_workqueue(si->workqueue);
975 err_wq_init:
976 	enetc4_link_deinit(priv);
977 err_link_init:
978 	enetc_free_msix(priv);
979 err_alloc_msix:
980 err_config_si:
981 err_clk_get:
982 	free_netdev(ndev);
983 
984 	return err;
985 }
986 
enetc4_pf_netdev_destroy(struct enetc_si * si)987 static void enetc4_pf_netdev_destroy(struct enetc_si *si)
988 {
989 	struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
990 	struct net_device *ndev = si->ndev;
991 
992 	unregister_netdev(ndev);
993 	cancel_work(&si->rx_mode_task);
994 	destroy_workqueue(si->workqueue);
995 	enetc4_link_deinit(priv);
996 	enetc_free_msix(priv);
997 	free_netdev(ndev);
998 }
999 
1000 static const struct enetc_si_ops enetc4_psi_ops = {
1001 	.get_rss_table = enetc4_get_rss_table,
1002 	.set_rss_table = enetc4_set_rss_table,
1003 };
1004 
enetc4_pf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1005 static int enetc4_pf_probe(struct pci_dev *pdev,
1006 			   const struct pci_device_id *ent)
1007 {
1008 	struct device *dev = &pdev->dev;
1009 	struct enetc_si *si;
1010 	struct enetc_pf *pf;
1011 	int err;
1012 
1013 	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
1014 	if (err)
1015 		return dev_err_probe(dev, err, "PCIe probing failed\n");
1016 
1017 	err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
1018 	if (err)
1019 		return dev_err_probe(dev, err,
1020 				     "Add enetc4_pci_remove() action failed\n");
1021 
1022 	/* si is the private data. */
1023 	si = pci_get_drvdata(pdev);
1024 	if (!si->hw.port || !si->hw.global)
1025 		return dev_err_probe(dev, -ENODEV,
1026 				     "Couldn't map PF only space\n");
1027 
1028 	si->revision = enetc_get_ip_revision(&si->hw);
1029 	si->ops = &enetc4_psi_ops;
1030 	err = enetc_get_driver_data(si);
1031 	if (err)
1032 		return dev_err_probe(dev, err,
1033 				     "Could not get VF driver data\n");
1034 
1035 	err = enetc4_pf_struct_init(si);
1036 	if (err)
1037 		return err;
1038 
1039 	pf = enetc_si_priv(si);
1040 	err = enetc4_pf_init(pf);
1041 	if (err)
1042 		return err;
1043 
1044 	enetc_get_si_caps(si);
1045 
1046 	err = enetc4_pf_netdev_create(si);
1047 	if (err)
1048 		goto err_netdev_create;
1049 
1050 	enetc_create_debugfs(si);
1051 
1052 	return 0;
1053 
1054 err_netdev_create:
1055 	enetc4_pf_free(pf);
1056 
1057 	return err;
1058 }
1059 
enetc4_pf_remove(struct pci_dev * pdev)1060 static void enetc4_pf_remove(struct pci_dev *pdev)
1061 {
1062 	struct enetc_si *si = pci_get_drvdata(pdev);
1063 	struct enetc_pf *pf = enetc_si_priv(si);
1064 
1065 	enetc_remove_debugfs(si);
1066 	enetc4_pf_netdev_destroy(si);
1067 	enetc4_pf_free(pf);
1068 }
1069 
1070 static const struct pci_device_id enetc4_pf_id_table[] = {
1071 	{ PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
1072 	{ 0, } /* End of table. */
1073 };
1074 MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
1075 
1076 static struct pci_driver enetc4_pf_driver = {
1077 	.name = KBUILD_MODNAME,
1078 	.id_table = enetc4_pf_id_table,
1079 	.probe = enetc4_pf_probe,
1080 	.remove = enetc4_pf_remove,
1081 };
1082 module_pci_driver(enetc4_pf_driver);
1083 
1084 MODULE_DESCRIPTION("ENETC4 PF Driver");
1085 MODULE_LICENSE("Dual BSD/GPL");
1086