1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2024 NXP */
3
4 #include <linux/clk.h>
5 #include <linux/module.h>
6 #include <linux/of_net.h>
7 #include <linux/of_platform.h>
8 #include <linux/unaligned.h>
9
10 #include "enetc_pf_common.h"
11 #include "enetc4_debugfs.h"
12
13 #define ENETC_SI_MAX_RING_NUM 8
14
15 #define ENETC_MAC_FILTER_TYPE_UC BIT(0)
16 #define ENETC_MAC_FILTER_TYPE_MC BIT(1)
17 #define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
18 ENETC_MAC_FILTER_TYPE_MC)
19
20 struct enetc_mac_addr {
21 u8 addr[ETH_ALEN];
22 };
23
enetc4_get_port_caps(struct enetc_pf * pf)24 static void enetc4_get_port_caps(struct enetc_pf *pf)
25 {
26 struct enetc_hw *hw = &pf->si->hw;
27 u32 val;
28
29 val = enetc_port_rd(hw, ENETC4_ECAPR1);
30 pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
31 pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
32
33 val = enetc_port_rd(hw, ENETC4_ECAPR2);
34 pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
35 pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
36
37 val = enetc_port_rd(hw, ENETC4_PMCAPR);
38 pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
39
40 val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
41 pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
42 }
43
enetc4_get_psi_hw_features(struct enetc_si * si)44 static void enetc4_get_psi_hw_features(struct enetc_si *si)
45 {
46 struct enetc_hw *hw = &si->hw;
47 u32 val;
48
49 val = enetc_port_rd(hw, ENETC4_PCAPR);
50 if (val & PCAPR_LINK_TYPE)
51 si->hw_features |= ENETC_SI_F_PPM;
52 }
53
enetc4_pf_set_si_primary_mac(struct enetc_hw * hw,int si,const u8 * addr)54 static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
55 const u8 *addr)
56 {
57 u16 lower = get_unaligned_le16(addr + 4);
58 u32 upper = get_unaligned_le32(addr);
59
60 if (si != 0) {
61 __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
62 __raw_writel(lower, hw->port + ENETC4_PSIPMAR1(si));
63 } else {
64 __raw_writel(upper, hw->port + ENETC4_PMAR0);
65 __raw_writel(lower, hw->port + ENETC4_PMAR1);
66 }
67 }
68
enetc4_pf_get_si_primary_mac(struct enetc_hw * hw,int si,u8 * addr)69 static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
70 u8 *addr)
71 {
72 u32 upper;
73 u16 lower;
74
75 upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
76 lower = __raw_readl(hw->port + ENETC4_PSIPMAR1(si));
77
78 put_unaligned_le32(upper, addr);
79 put_unaligned_le16(lower, addr + 4);
80 }
81
enetc4_pf_set_si_mac_promisc(struct enetc_hw * hw,int si,bool uc_promisc,bool mc_promisc)82 static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
83 bool uc_promisc, bool mc_promisc)
84 {
85 u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
86
87 if (uc_promisc)
88 val |= PSIPMMR_SI_MAC_UP(si);
89 else
90 val &= ~PSIPMMR_SI_MAC_UP(si);
91
92 if (mc_promisc)
93 val |= PSIPMMR_SI_MAC_MP(si);
94 else
95 val &= ~PSIPMMR_SI_MAC_MP(si);
96
97 enetc_port_wr(hw, ENETC4_PSIPMMR, val);
98 }
99
enetc4_pf_set_si_uc_hash_filter(struct enetc_hw * hw,int si,u64 hash)100 static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
101 u64 hash)
102 {
103 enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
104 enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
105 }
106
enetc4_pf_set_si_mc_hash_filter(struct enetc_hw * hw,int si,u64 hash)107 static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
108 u64 hash)
109 {
110 enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
111 enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
112 }
113
enetc4_pf_set_loopback(struct net_device * ndev,bool en)114 static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
115 {
116 struct enetc_ndev_priv *priv = netdev_priv(ndev);
117 struct enetc_si *si = priv->si;
118 u32 val;
119
120 val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
121 val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
122 /* Default to select MAC level loopback mode if loopback is enabled. */
123 val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
124 PM_CMD_CFG_LPBK_MODE);
125
126 enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
127 }
128
enetc4_pf_clear_maft_entries(struct enetc_pf * pf)129 static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
130 {
131 int i;
132
133 for (i = 0; i < pf->num_mfe; i++)
134 ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
135
136 pf->num_mfe = 0;
137 }
138
enetc4_pf_add_maft_entries(struct enetc_pf * pf,struct enetc_mac_addr * mac,int mac_cnt)139 static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
140 struct enetc_mac_addr *mac,
141 int mac_cnt)
142 {
143 struct maft_entry_data maft = {};
144 u16 si_bit = BIT(0);
145 int i, err;
146
147 maft.cfge.si_bitmap = cpu_to_le16(si_bit);
148 for (i = 0; i < mac_cnt; i++) {
149 ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
150 err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
151 if (unlikely(err)) {
152 pf->num_mfe = i;
153 goto clear_maft_entries;
154 }
155 }
156
157 pf->num_mfe = mac_cnt;
158
159 return 0;
160
161 clear_maft_entries:
162 enetc4_pf_clear_maft_entries(pf);
163
164 return err;
165 }
166
enetc4_pf_set_uc_exact_filter(struct enetc_pf * pf)167 static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
168 {
169 int max_num_mfe = pf->caps.mac_filter_num;
170 struct enetc_mac_filter mac_filter = {};
171 struct net_device *ndev = pf->si->ndev;
172 struct enetc_hw *hw = &pf->si->hw;
173 struct enetc_mac_addr *mac_tbl;
174 struct netdev_hw_addr *ha;
175 int i = 0, err;
176 int mac_cnt;
177
178 netif_addr_lock_bh(ndev);
179
180 mac_cnt = netdev_uc_count(ndev);
181 if (!mac_cnt) {
182 netif_addr_unlock_bh(ndev);
183 /* clear both MAC hash and exact filters */
184 enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
185 enetc4_pf_clear_maft_entries(pf);
186
187 return 0;
188 }
189
190 if (mac_cnt > max_num_mfe) {
191 err = -ENOSPC;
192 goto unlock_netif_addr;
193 }
194
195 mac_tbl = kzalloc_objs(*mac_tbl, mac_cnt, GFP_ATOMIC);
196 if (!mac_tbl) {
197 err = -ENOMEM;
198 goto unlock_netif_addr;
199 }
200
201 netdev_for_each_uc_addr(ha, ndev) {
202 enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
203 ether_addr_copy(mac_tbl[i++].addr, ha->addr);
204 }
205
206 netif_addr_unlock_bh(ndev);
207
208 /* Set temporary unicast hash filters in case of Rx loss when
209 * updating MAC address filter table
210 */
211 enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
212 enetc4_pf_clear_maft_entries(pf);
213
214 if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
215 enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
216
217 kfree(mac_tbl);
218
219 return 0;
220
221 unlock_netif_addr:
222 netif_addr_unlock_bh(ndev);
223
224 return err;
225 }
226
enetc4_pf_set_mac_hash_filter(struct enetc_pf * pf,int type)227 static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
228 {
229 struct net_device *ndev = pf->si->ndev;
230 struct enetc_mac_filter *mac_filter;
231 struct enetc_hw *hw = &pf->si->hw;
232 struct netdev_hw_addr *ha;
233
234 netif_addr_lock_bh(ndev);
235 if (type & ENETC_MAC_FILTER_TYPE_UC) {
236 mac_filter = &pf->mac_filter[UC];
237 enetc_reset_mac_addr_filter(mac_filter);
238 netdev_for_each_uc_addr(ha, ndev)
239 enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
240
241 enetc4_pf_set_si_uc_hash_filter(hw, 0,
242 *mac_filter->mac_hash_table);
243 }
244
245 if (type & ENETC_MAC_FILTER_TYPE_MC) {
246 mac_filter = &pf->mac_filter[MC];
247 enetc_reset_mac_addr_filter(mac_filter);
248 netdev_for_each_mc_addr(ha, ndev)
249 enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
250
251 enetc4_pf_set_si_mc_hash_filter(hw, 0,
252 *mac_filter->mac_hash_table);
253 }
254 netif_addr_unlock_bh(ndev);
255 }
256
enetc4_pf_set_mac_filter(struct enetc_pf * pf,int type)257 static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
258 {
259 /* Currently, the MAC address filter table (MAFT) only has 4 entries,
260 * and multiple multicast addresses for filtering will be configured
261 * in the default network configuration, so MAFT is only suitable for
262 * unicast filtering. If the number of unicast addresses exceeds the
263 * table capacity, the MAC hash filter will be used.
264 */
265 if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
266 /* Fall back to the MAC hash filter */
267 enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
268 /* Clear the old MAC exact filter */
269 enetc4_pf_clear_maft_entries(pf);
270 }
271
272 if (type & ENETC_MAC_FILTER_TYPE_MC)
273 enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
274 }
275
276 static const struct enetc_pf_ops enetc4_pf_ops = {
277 .set_si_primary_mac = enetc4_pf_set_si_primary_mac,
278 .get_si_primary_mac = enetc4_pf_get_si_primary_mac,
279 };
280
enetc4_pf_struct_init(struct enetc_si * si)281 static int enetc4_pf_struct_init(struct enetc_si *si)
282 {
283 struct enetc_pf *pf = enetc_si_priv(si);
284
285 pf->si = si;
286 pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
287 pf->ops = &enetc4_pf_ops;
288
289 enetc4_get_port_caps(pf);
290 enetc4_get_psi_hw_features(si);
291
292 return 0;
293 }
294
enetc4_psicfgr0_val_construct(bool is_vf,u32 num_tx_bdr,u32 num_rx_bdr)295 static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
296 {
297 u32 val;
298
299 val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
300 val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
301 val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
302
303 if (is_vf)
304 val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
305
306 return val;
307 }
308
enetc4_default_rings_allocation(struct enetc_pf * pf)309 static void enetc4_default_rings_allocation(struct enetc_pf *pf)
310 {
311 struct enetc_hw *hw = &pf->si->hw;
312 u32 num_rx_bdr, num_tx_bdr, val;
313 u32 vf_tx_bdr, vf_rx_bdr;
314 int i, rx_rem, tx_rem;
315
316 if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
317 num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
318 else
319 num_rx_bdr = ENETC_SI_MAX_RING_NUM;
320
321 if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
322 num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
323 else
324 num_tx_bdr = ENETC_SI_MAX_RING_NUM;
325
326 val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
327 enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
328
329 num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
330 rx_rem = num_rx_bdr % pf->caps.num_vsi;
331 num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
332
333 num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
334 tx_rem = num_tx_bdr % pf->caps.num_vsi;
335 num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
336
337 for (i = 0; i < pf->caps.num_vsi; i++) {
338 vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
339 vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
340 val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
341 enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
342 }
343 }
344
enetc4_allocate_si_rings(struct enetc_pf * pf)345 static void enetc4_allocate_si_rings(struct enetc_pf *pf)
346 {
347 enetc4_default_rings_allocation(pf);
348 }
349
enetc4_pf_set_si_vlan_promisc(struct enetc_hw * hw,int si,bool en)350 static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
351 {
352 u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
353
354 if (en)
355 val |= BIT(si);
356 else
357 val &= ~BIT(si);
358
359 enetc_port_wr(hw, ENETC4_PSIPVMR, val);
360 }
361
enetc4_set_default_si_vlan_promisc(struct enetc_pf * pf)362 static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
363 {
364 struct enetc_hw *hw = &pf->si->hw;
365 int num_si = pf->caps.num_vsi + 1;
366 int i;
367
368 /* enforce VLAN promiscuous mode for all SIs */
369 for (i = 0; i < num_si; i++)
370 enetc4_pf_set_si_vlan_promisc(hw, i, true);
371 }
372
373 /* Allocate the number of MSI-X vectors for per SI. */
enetc4_set_si_msix_num(struct enetc_pf * pf)374 static void enetc4_set_si_msix_num(struct enetc_pf *pf)
375 {
376 struct enetc_hw *hw = &pf->si->hw;
377 int i, num_msix, total_si;
378 u32 val;
379
380 total_si = pf->caps.num_vsi + 1;
381
382 num_msix = pf->caps.num_msix / total_si +
383 pf->caps.num_msix % total_si - 1;
384 val = num_msix & PSICFGR2_NUM_MSIX;
385 enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
386
387 num_msix = pf->caps.num_msix / total_si - 1;
388 val = num_msix & PSICFGR2_NUM_MSIX;
389 for (i = 0; i < pf->caps.num_vsi; i++)
390 enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
391 }
392
enetc4_enable_all_si(struct enetc_pf * pf)393 static void enetc4_enable_all_si(struct enetc_pf *pf)
394 {
395 struct enetc_hw *hw = &pf->si->hw;
396 int num_si = pf->caps.num_vsi + 1;
397 u32 si_bitmap = 0;
398 int i;
399
400 /* Master enable for all SIs */
401 for (i = 0; i < num_si; i++)
402 si_bitmap |= PMR_SI_EN(i);
403
404 enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
405 }
406
enetc4_configure_port_si(struct enetc_pf * pf)407 static void enetc4_configure_port_si(struct enetc_pf *pf)
408 {
409 struct enetc_hw *hw = &pf->si->hw;
410
411 enetc4_allocate_si_rings(pf);
412
413 /* Outer VLAN tag will be used for VLAN filtering */
414 enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
415
416 enetc4_set_default_si_vlan_promisc(pf);
417
418 /* Disable SI MAC multicast & unicast promiscuous */
419 enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
420
421 enetc4_set_si_msix_num(pf);
422
423 enetc4_enable_all_si(pf);
424 }
425
enetc4_pf_reset_tc_msdu(struct enetc_hw * hw)426 static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
427 {
428 u32 val = ENETC_MAC_MAXFRM_SIZE;
429 int tc;
430
431 val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
432
433 for (tc = 0; tc < ENETC_NUM_TC; tc++)
434 enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
435 }
436
enetc4_set_trx_frame_size(struct enetc_pf * pf)437 static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
438 {
439 struct enetc_si *si = pf->si;
440
441 enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
442 ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
443
444 enetc4_pf_reset_tc_msdu(&si->hw);
445 }
446
enetc4_configure_port(struct enetc_pf * pf)447 static void enetc4_configure_port(struct enetc_pf *pf)
448 {
449 enetc4_configure_port_si(pf);
450 enetc4_set_trx_frame_size(pf);
451 enetc_set_default_rss_key(pf);
452 }
453
enetc4_init_ntmp_user(struct enetc_si * si)454 static int enetc4_init_ntmp_user(struct enetc_si *si)
455 {
456 struct ntmp_user *user = &si->ntmp_user;
457
458 /* For ENETC 4.1, all table versions are 0 */
459 memset(&user->tbl, 0, sizeof(user->tbl));
460
461 return enetc4_setup_cbdr(si);
462 }
463
enetc4_free_ntmp_user(struct enetc_si * si)464 static void enetc4_free_ntmp_user(struct enetc_si *si)
465 {
466 enetc4_teardown_cbdr(si);
467 }
468
enetc4_pf_init(struct enetc_pf * pf)469 static int enetc4_pf_init(struct enetc_pf *pf)
470 {
471 struct device *dev = &pf->si->pdev->dev;
472 int err;
473
474 /* Initialize the MAC address for PF and VFs */
475 err = enetc_setup_mac_addresses(dev->of_node, pf);
476 if (err) {
477 dev_err(dev, "Failed to set MAC addresses\n");
478 return err;
479 }
480
481 err = enetc4_init_ntmp_user(pf->si);
482 if (err) {
483 dev_err(dev, "Failed to init CBDR\n");
484 return err;
485 }
486
487 enetc4_configure_port(pf);
488
489 return 0;
490 }
491
enetc4_pf_free(struct enetc_pf * pf)492 static void enetc4_pf_free(struct enetc_pf *pf)
493 {
494 enetc4_free_ntmp_user(pf->si);
495 }
496
enetc4_psi_do_set_rx_mode(struct work_struct * work)497 static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
498 {
499 struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
500 struct enetc_pf *pf = enetc_si_priv(si);
501 struct net_device *ndev = si->ndev;
502 struct enetc_hw *hw = &si->hw;
503 bool uc_promisc = false;
504 bool mc_promisc = false;
505 int type = 0;
506
507 rtnl_lock();
508
509 if (ndev->flags & IFF_PROMISC) {
510 uc_promisc = true;
511 mc_promisc = true;
512 } else if (ndev->flags & IFF_ALLMULTI) {
513 mc_promisc = true;
514 type = ENETC_MAC_FILTER_TYPE_UC;
515 } else {
516 type = ENETC_MAC_FILTER_TYPE_ALL;
517 }
518
519 enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
520
521 if (uc_promisc) {
522 enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
523 enetc4_pf_clear_maft_entries(pf);
524 }
525
526 if (mc_promisc)
527 enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
528
529 /* Set new MAC filter */
530 enetc4_pf_set_mac_filter(pf, type);
531
532 rtnl_unlock();
533 }
534
enetc4_pf_set_rx_mode(struct net_device * ndev)535 static void enetc4_pf_set_rx_mode(struct net_device *ndev)
536 {
537 struct enetc_ndev_priv *priv = netdev_priv(ndev);
538 struct enetc_si *si = priv->si;
539
540 queue_work(si->workqueue, &si->rx_mode_task);
541 }
542
enetc4_pf_set_features(struct net_device * ndev,netdev_features_t features)543 static int enetc4_pf_set_features(struct net_device *ndev,
544 netdev_features_t features)
545 {
546 netdev_features_t changed = ndev->features ^ features;
547 struct enetc_ndev_priv *priv = netdev_priv(ndev);
548 struct enetc_hw *hw = &priv->si->hw;
549
550 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
551 bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
552
553 enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
554 }
555
556 if (changed & NETIF_F_LOOPBACK)
557 enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
558
559 enetc_set_features(ndev, features);
560
561 return 0;
562 }
563
564 static const struct net_device_ops enetc4_ndev_ops = {
565 .ndo_open = enetc_open,
566 .ndo_stop = enetc_close,
567 .ndo_start_xmit = enetc_xmit,
568 .ndo_get_stats = enetc_get_stats,
569 .ndo_set_mac_address = enetc_pf_set_mac_addr,
570 .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
571 .ndo_set_features = enetc4_pf_set_features,
572 .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
573 .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
574 .ndo_eth_ioctl = enetc_ioctl,
575 .ndo_hwtstamp_get = enetc_hwtstamp_get,
576 .ndo_hwtstamp_set = enetc_hwtstamp_set,
577 };
578
579 static struct phylink_pcs *
enetc4_pl_mac_select_pcs(struct phylink_config * config,phy_interface_t iface)580 enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
581 {
582 struct enetc_pf *pf = phylink_to_enetc_pf(config);
583
584 return pf->pcs;
585 }
586
enetc4_mac_config(struct enetc_pf * pf,unsigned int mode,phy_interface_t phy_mode)587 static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
588 phy_interface_t phy_mode)
589 {
590 struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
591 struct enetc_si *si = pf->si;
592 u32 val;
593
594 if (enetc_is_pseudo_mac(si))
595 return;
596
597 val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
598 val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
599
600 switch (phy_mode) {
601 case PHY_INTERFACE_MODE_RGMII:
602 case PHY_INTERFACE_MODE_RGMII_ID:
603 case PHY_INTERFACE_MODE_RGMII_RXID:
604 case PHY_INTERFACE_MODE_RGMII_TXID:
605 val |= IFMODE_RGMII;
606 /* We need to enable auto-negotiation for the MAC
607 * if its RGMII interface support In-Band status.
608 */
609 if (phylink_autoneg_inband(mode))
610 val |= PM_IF_MODE_ENA;
611 break;
612 case PHY_INTERFACE_MODE_RMII:
613 val |= IFMODE_RMII;
614 break;
615 case PHY_INTERFACE_MODE_SGMII:
616 case PHY_INTERFACE_MODE_2500BASEX:
617 val |= IFMODE_SGMII;
618 break;
619 case PHY_INTERFACE_MODE_10GBASER:
620 case PHY_INTERFACE_MODE_XGMII:
621 case PHY_INTERFACE_MODE_USXGMII:
622 val |= IFMODE_XGMII;
623 break;
624 default:
625 dev_err(priv->dev,
626 "Unsupported PHY mode:%d\n", phy_mode);
627 return;
628 }
629
630 enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
631 }
632
enetc4_pl_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)633 static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
634 const struct phylink_link_state *state)
635 {
636 struct enetc_pf *pf = phylink_to_enetc_pf(config);
637
638 enetc4_mac_config(pf, mode, state->interface);
639 }
640
enetc4_set_port_speed(struct enetc_ndev_priv * priv,int speed)641 static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
642 {
643 u32 old_speed = priv->speed;
644 u32 val;
645
646 if (speed == old_speed)
647 return;
648
649 val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
650 val &= ~PCR_PSPEED;
651
652 switch (speed) {
653 case SPEED_100:
654 case SPEED_1000:
655 case SPEED_2500:
656 case SPEED_10000:
657 val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
658 break;
659 case SPEED_10:
660 default:
661 val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
662 }
663
664 priv->speed = speed;
665 enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
666 }
667
enetc4_set_rgmii_mac(struct enetc_pf * pf,int speed,int duplex)668 static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
669 {
670 struct enetc_si *si = pf->si;
671 u32 old_val, val;
672
673 old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
674 val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
675
676 switch (speed) {
677 case SPEED_1000:
678 val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
679 break;
680 case SPEED_100:
681 val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
682 break;
683 case SPEED_10:
684 val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
685 }
686
687 val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
688 PM_IF_MODE_HD);
689
690 if (val == old_val)
691 return;
692
693 enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
694 }
695
enetc4_set_rmii_mac(struct enetc_pf * pf,int speed,int duplex)696 static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
697 {
698 struct enetc_si *si = pf->si;
699 u32 old_val, val;
700
701 old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
702 val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
703
704 switch (speed) {
705 case SPEED_100:
706 val &= ~PM_IF_MODE_M10;
707 break;
708 case SPEED_10:
709 val |= PM_IF_MODE_M10;
710 }
711
712 val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
713 PM_IF_MODE_HD);
714
715 if (val == old_val)
716 return;
717
718 enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
719 }
720
enetc4_set_hd_flow_control(struct enetc_pf * pf,bool enable)721 static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
722 {
723 struct enetc_si *si = pf->si;
724 u32 old_val, val;
725
726 if (!pf->caps.half_duplex)
727 return;
728
729 old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
730 val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
731 if (val == old_val)
732 return;
733
734 enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
735 }
736
enetc4_set_rx_pause(struct enetc_pf * pf,bool rx_pause)737 static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
738 {
739 struct enetc_si *si = pf->si;
740 u32 old_val, val;
741
742 old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
743 val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
744 if (val == old_val)
745 return;
746
747 enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
748 }
749
enetc4_set_tx_pause(struct enetc_pf * pf,int num_rxbdr,bool tx_pause)750 static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
751 {
752 u32 pause_off_thresh = 0, pause_on_thresh = 0;
753 u32 init_quanta = 0, refresh_quanta = 0;
754 struct enetc_hw *hw = &pf->si->hw;
755 u32 rbmr, old_rbmr;
756 int i;
757
758 for (i = 0; i < num_rxbdr; i++) {
759 old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
760 rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
761 if (rbmr == old_rbmr)
762 continue;
763
764 enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
765 }
766
767 if (tx_pause) {
768 /* When the port first enters congestion, send a PAUSE request
769 * with the maximum number of quanta. When the port exits
770 * congestion, it will automatically send a PAUSE frame with
771 * zero quanta.
772 */
773 init_quanta = 0xffff;
774
775 /* Also, set up the refresh timer to send follow-up PAUSE
776 * frames at half the quanta value, in case the congestion
777 * condition persists.
778 */
779 refresh_quanta = 0xffff / 2;
780
781 /* Start emitting PAUSE frames when 3 large frames (or more
782 * smaller frames) have accumulated in the FIFO waiting to be
783 * DMAed to the RX ring.
784 */
785 pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
786 pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
787 }
788
789 enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
790 enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
791 enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
792 enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
793 }
794
enetc4_mac_wait_tx_empty(struct enetc_si * si,int mac)795 static void enetc4_mac_wait_tx_empty(struct enetc_si *si, int mac)
796 {
797 u32 val;
798
799 if (read_poll_timeout(enetc_port_rd, val,
800 val & PM_IEVENT_TX_EMPTY,
801 100, 10000, false, &si->hw,
802 ENETC4_PM_IEVENT(mac)))
803 dev_warn(&si->pdev->dev,
804 "MAC %d TX is not empty\n", mac);
805 }
806
enetc4_mac_tx_graceful_stop(struct enetc_pf * pf)807 static void enetc4_mac_tx_graceful_stop(struct enetc_pf *pf)
808 {
809 struct enetc_hw *hw = &pf->si->hw;
810 struct enetc_si *si = pf->si;
811 u32 val;
812
813 val = enetc_port_rd(hw, ENETC4_POR);
814 val |= POR_TXDIS;
815 enetc_port_wr(hw, ENETC4_POR, val);
816
817 if (enetc_is_pseudo_mac(si))
818 return;
819
820 enetc4_mac_wait_tx_empty(si, 0);
821 if (si->hw_features & ENETC_SI_F_QBU)
822 enetc4_mac_wait_tx_empty(si, 1);
823
824 val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
825 val &= ~PM_CMD_CFG_TX_EN;
826 enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
827 }
828
enetc4_mac_tx_enable(struct enetc_pf * pf)829 static void enetc4_mac_tx_enable(struct enetc_pf *pf)
830 {
831 struct enetc_hw *hw = &pf->si->hw;
832 struct enetc_si *si = pf->si;
833 u32 val;
834
835 val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
836 val |= PM_CMD_CFG_TX_EN;
837 enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
838
839 val = enetc_port_rd(hw, ENETC4_POR);
840 val &= ~POR_TXDIS;
841 enetc_port_wr(hw, ENETC4_POR, val);
842 }
843
enetc4_mac_wait_rx_empty(struct enetc_si * si,int mac)844 static void enetc4_mac_wait_rx_empty(struct enetc_si *si, int mac)
845 {
846 u32 val;
847
848 if (read_poll_timeout(enetc_port_rd, val,
849 val & PM_IEVENT_RX_EMPTY,
850 100, 10000, false, &si->hw,
851 ENETC4_PM_IEVENT(mac)))
852 dev_warn(&si->pdev->dev,
853 "MAC %d RX is not empty\n", mac);
854 }
855
enetc4_mac_rx_graceful_stop(struct enetc_pf * pf)856 static void enetc4_mac_rx_graceful_stop(struct enetc_pf *pf)
857 {
858 struct enetc_hw *hw = &pf->si->hw;
859 struct enetc_si *si = pf->si;
860 u32 val;
861
862 if (enetc_is_pseudo_mac(si))
863 goto check_rx_busy;
864
865 if (si->hw_features & ENETC_SI_F_QBU) {
866 val = enetc_port_rd(hw, ENETC4_PM_CMD_CFG(1));
867 val &= ~PM_CMD_CFG_RX_EN;
868 enetc_port_wr(hw, ENETC4_PM_CMD_CFG(1), val);
869 enetc4_mac_wait_rx_empty(si, 1);
870 }
871
872 val = enetc_port_rd(hw, ENETC4_PM_CMD_CFG(0));
873 val &= ~PM_CMD_CFG_RX_EN;
874 enetc_port_wr(hw, ENETC4_PM_CMD_CFG(0), val);
875 enetc4_mac_wait_rx_empty(si, 0);
876
877 check_rx_busy:
878 if (read_poll_timeout(enetc_port_rd, val,
879 !(val & PSR_RX_BUSY),
880 100, 10000, false, hw,
881 ENETC4_PSR))
882 dev_warn(&si->pdev->dev, "Port RX busy\n");
883
884 val = enetc_port_rd(hw, ENETC4_POR);
885 val |= POR_RXDIS;
886 enetc_port_wr(hw, ENETC4_POR, val);
887 }
888
enetc4_mac_rx_enable(struct enetc_pf * pf)889 static void enetc4_mac_rx_enable(struct enetc_pf *pf)
890 {
891 struct enetc_hw *hw = &pf->si->hw;
892 struct enetc_si *si = pf->si;
893 u32 val;
894
895 val = enetc_port_rd(hw, ENETC4_POR);
896 val &= ~POR_RXDIS;
897 enetc_port_wr(hw, ENETC4_POR, val);
898
899 val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
900 val |= PM_CMD_CFG_RX_EN;
901 enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
902 }
903
enetc4_pl_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)904 static void enetc4_pl_mac_link_up(struct phylink_config *config,
905 struct phy_device *phy, unsigned int mode,
906 phy_interface_t interface, int speed,
907 int duplex, bool tx_pause, bool rx_pause)
908 {
909 struct enetc_pf *pf = phylink_to_enetc_pf(config);
910 struct enetc_si *si = pf->si;
911 struct enetc_ndev_priv *priv;
912 bool hd_fc = false;
913
914 priv = netdev_priv(si->ndev);
915 enetc4_set_port_speed(priv, speed);
916
917 if (!phylink_autoneg_inband(mode) &&
918 phy_interface_mode_is_rgmii(interface))
919 enetc4_set_rgmii_mac(pf, speed, duplex);
920
921 if (interface == PHY_INTERFACE_MODE_RMII)
922 enetc4_set_rmii_mac(pf, speed, duplex);
923
924 if (duplex == DUPLEX_FULL) {
925 /* When preemption is enabled, generation of PAUSE frames
926 * must be disabled, as stated in the IEEE 802.3 standard.
927 */
928 if (priv->active_offloads & ENETC_F_QBU)
929 tx_pause = false;
930 } else { /* DUPLEX_HALF */
931 if (tx_pause || rx_pause)
932 hd_fc = true;
933
934 /* As per 802.3 annex 31B, PAUSE frames are only supported
935 * when the link is configured for full duplex operation.
936 */
937 tx_pause = false;
938 rx_pause = false;
939 }
940
941 enetc4_set_hd_flow_control(pf, hd_fc);
942 enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
943 enetc4_set_rx_pause(pf, rx_pause);
944 enetc4_mac_tx_enable(pf);
945 enetc4_mac_rx_enable(pf);
946 }
947
enetc4_pl_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)948 static void enetc4_pl_mac_link_down(struct phylink_config *config,
949 unsigned int mode,
950 phy_interface_t interface)
951 {
952 struct enetc_pf *pf = phylink_to_enetc_pf(config);
953
954 enetc4_mac_rx_graceful_stop(pf);
955 enetc4_mac_tx_graceful_stop(pf);
956 }
957
958 static const struct phylink_mac_ops enetc_pl_mac_ops = {
959 .mac_select_pcs = enetc4_pl_mac_select_pcs,
960 .mac_config = enetc4_pl_mac_config,
961 .mac_link_up = enetc4_pl_mac_link_up,
962 .mac_link_down = enetc4_pl_mac_link_down,
963 };
964
enetc4_pci_remove(void * data)965 static void enetc4_pci_remove(void *data)
966 {
967 struct pci_dev *pdev = data;
968
969 enetc_pci_remove(pdev);
970 }
971
enetc4_link_init(struct enetc_ndev_priv * priv,struct device_node * node)972 static int enetc4_link_init(struct enetc_ndev_priv *priv,
973 struct device_node *node)
974 {
975 struct enetc_pf *pf = enetc_si_priv(priv->si);
976 struct device *dev = priv->dev;
977 int err;
978
979 err = of_get_phy_mode(node, &pf->if_mode);
980 if (err) {
981 dev_err(dev, "Failed to get PHY mode\n");
982 return err;
983 }
984
985 err = enetc_mdiobus_create(pf, node);
986 if (err) {
987 dev_err(dev, "Failed to create MDIO bus\n");
988 return err;
989 }
990
991 err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
992 if (err) {
993 dev_err(dev, "Failed to create phylink\n");
994 goto err_phylink_create;
995 }
996
997 return 0;
998
999 err_phylink_create:
1000 enetc_mdiobus_destroy(pf);
1001
1002 return err;
1003 }
1004
enetc4_link_deinit(struct enetc_ndev_priv * priv)1005 static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
1006 {
1007 struct enetc_pf *pf = enetc_si_priv(priv->si);
1008
1009 enetc_phylink_destroy(priv);
1010 enetc_mdiobus_destroy(pf);
1011 }
1012
enetc4_psi_wq_task_init(struct enetc_si * si)1013 static int enetc4_psi_wq_task_init(struct enetc_si *si)
1014 {
1015 char wq_name[24];
1016
1017 INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
1018 snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
1019 si->workqueue = create_singlethread_workqueue(wq_name);
1020 if (!si->workqueue)
1021 return -ENOMEM;
1022
1023 return 0;
1024 }
1025
enetc4_pf_netdev_create(struct enetc_si * si)1026 static int enetc4_pf_netdev_create(struct enetc_si *si)
1027 {
1028 struct device *dev = &si->pdev->dev;
1029 struct enetc_ndev_priv *priv;
1030 struct net_device *ndev;
1031 int err;
1032
1033 ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
1034 si->num_tx_rings, si->num_rx_rings);
1035 if (!ndev)
1036 return -ENOMEM;
1037
1038 priv = netdev_priv(ndev);
1039 priv->ref_clk = devm_clk_get_optional(dev, "ref");
1040 if (IS_ERR(priv->ref_clk)) {
1041 dev_err(dev, "Get reference clock failed\n");
1042 err = PTR_ERR(priv->ref_clk);
1043 goto err_clk_get;
1044 }
1045
1046 enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
1047
1048 enetc_init_si_rings_params(priv);
1049
1050 err = enetc_configure_si(priv);
1051 if (err) {
1052 dev_err(dev, "Failed to configure SI\n");
1053 goto err_config_si;
1054 }
1055
1056 err = enetc_alloc_msix(priv);
1057 if (err) {
1058 dev_err(dev, "Failed to alloc MSI-X\n");
1059 goto err_alloc_msix;
1060 }
1061
1062 err = enetc4_link_init(priv, dev->of_node);
1063 if (err)
1064 goto err_link_init;
1065
1066 err = enetc4_psi_wq_task_init(si);
1067 if (err) {
1068 dev_err(dev, "Failed to init workqueue\n");
1069 goto err_wq_init;
1070 }
1071
1072 err = register_netdev(ndev);
1073 if (err) {
1074 dev_err(dev, "Failed to register netdev\n");
1075 goto err_reg_netdev;
1076 }
1077
1078 return 0;
1079
1080 err_reg_netdev:
1081 destroy_workqueue(si->workqueue);
1082 err_wq_init:
1083 enetc4_link_deinit(priv);
1084 err_link_init:
1085 enetc_free_msix(priv);
1086 err_alloc_msix:
1087 err_config_si:
1088 err_clk_get:
1089 free_netdev(ndev);
1090
1091 return err;
1092 }
1093
enetc4_pf_netdev_destroy(struct enetc_si * si)1094 static void enetc4_pf_netdev_destroy(struct enetc_si *si)
1095 {
1096 struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
1097 struct net_device *ndev = si->ndev;
1098
1099 unregister_netdev(ndev);
1100 cancel_work(&si->rx_mode_task);
1101 destroy_workqueue(si->workqueue);
1102 enetc4_link_deinit(priv);
1103 enetc_free_msix(priv);
1104 free_netdev(ndev);
1105 }
1106
1107 static const struct enetc_si_ops enetc4_psi_ops = {
1108 .get_rss_table = enetc4_get_rss_table,
1109 .set_rss_table = enetc4_set_rss_table,
1110 };
1111
enetc4_pf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1112 static int enetc4_pf_probe(struct pci_dev *pdev,
1113 const struct pci_device_id *ent)
1114 {
1115 struct device *dev = &pdev->dev;
1116 struct enetc_si *si;
1117 struct enetc_pf *pf;
1118 int err;
1119
1120 err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
1121 if (err)
1122 return dev_err_probe(dev, err, "PCIe probing failed\n");
1123
1124 err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
1125 if (err)
1126 return err;
1127
1128 /* si is the private data. */
1129 si = pci_get_drvdata(pdev);
1130 if (!si->hw.port || !si->hw.global)
1131 return dev_err_probe(dev, -ENODEV,
1132 "Couldn't map PF only space\n");
1133
1134 si->revision = enetc_get_ip_revision(&si->hw);
1135 si->ops = &enetc4_psi_ops;
1136 err = enetc_get_driver_data(si);
1137 if (err)
1138 return dev_err_probe(dev, err,
1139 "Could not get PF driver data\n");
1140
1141 err = enetc4_pf_struct_init(si);
1142 if (err)
1143 return err;
1144
1145 pf = enetc_si_priv(si);
1146 err = enetc4_pf_init(pf);
1147 if (err)
1148 return err;
1149
1150 enetc_get_si_caps(si);
1151
1152 err = enetc4_pf_netdev_create(si);
1153 if (err)
1154 goto err_netdev_create;
1155
1156 enetc_create_debugfs(si);
1157
1158 return 0;
1159
1160 err_netdev_create:
1161 enetc4_pf_free(pf);
1162
1163 return err;
1164 }
1165
enetc4_pf_remove(struct pci_dev * pdev)1166 static void enetc4_pf_remove(struct pci_dev *pdev)
1167 {
1168 struct enetc_si *si = pci_get_drvdata(pdev);
1169 struct enetc_pf *pf = enetc_si_priv(si);
1170
1171 enetc_remove_debugfs(si);
1172 enetc4_pf_netdev_destroy(si);
1173 enetc4_pf_free(pf);
1174 }
1175
1176 static const struct pci_device_id enetc4_pf_id_table[] = {
1177 { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
1178 { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) },
1179 { 0, } /* End of table. */
1180 };
1181 MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
1182
1183 static struct pci_driver enetc4_pf_driver = {
1184 .name = KBUILD_MODNAME,
1185 .id_table = enetc4_pf_id_table,
1186 .probe = enetc4_pf_probe,
1187 .remove = enetc4_pf_remove,
1188 };
1189 module_pci_driver(enetc4_pf_driver);
1190
1191 MODULE_DESCRIPTION("ENETC4 PF Driver");
1192 MODULE_LICENSE("Dual BSD/GPL");
1193