xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-main.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/module.h>
9 #include <linux/device.h>
10 #include <linux/spinlock.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/io.h>
14 #include <linux/notifier.h>
15 
16 #include "xgbe.h"
17 #include "xgbe-common.h"
18 
19 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
20 MODULE_LICENSE("Dual BSD/GPL");
21 MODULE_DESCRIPTION(XGBE_DRV_DESC);
22 
23 static int debug = -1;
24 module_param(debug, int, 0644);
25 MODULE_PARM_DESC(debug, " Network interface message level setting");
26 
27 static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
28 				      NETIF_MSG_IFUP);
29 
xgbe_default_config(struct xgbe_prv_data * pdata)30 static void xgbe_default_config(struct xgbe_prv_data *pdata)
31 {
32 	DBGPR("-->xgbe_default_config\n");
33 
34 	pdata->blen = DMA_SBMR_BLEN_64;
35 	pdata->pbl = DMA_PBL_128;
36 	pdata->aal = 1;
37 	pdata->rd_osr_limit = 8;
38 	pdata->wr_osr_limit = 8;
39 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
40 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
41 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
42 	pdata->rx_sf_mode = MTL_RSF_DISABLE;
43 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
44 	pdata->pause_autoneg = 1;
45 	pdata->tx_pause = 1;
46 	pdata->rx_pause = 1;
47 	pdata->phy_speed = SPEED_UNKNOWN;
48 	pdata->power_down = 0;
49 
50 	DBGPR("<--xgbe_default_config\n");
51 }
52 
xgbe_init_all_fptrs(struct xgbe_prv_data * pdata)53 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
54 {
55 	xgbe_init_function_ptrs_dev(&pdata->hw_if);
56 	xgbe_init_function_ptrs_phy(&pdata->phy_if);
57 	xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
58 	xgbe_init_function_ptrs_desc(&pdata->desc_if);
59 
60 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
61 }
62 
xgbe_alloc_pdata(struct device * dev)63 struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
64 {
65 	struct xgbe_prv_data *pdata;
66 	struct net_device *netdev;
67 
68 	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
69 				   XGBE_MAX_DMA_CHANNELS);
70 	if (!netdev) {
71 		dev_err(dev, "alloc_etherdev_mq failed\n");
72 		return ERR_PTR(-ENOMEM);
73 	}
74 	SET_NETDEV_DEV(netdev, dev);
75 	pdata = netdev_priv(netdev);
76 	pdata->netdev = netdev;
77 	pdata->dev = dev;
78 
79 	spin_lock_init(&pdata->lock);
80 	spin_lock_init(&pdata->xpcs_lock);
81 	mutex_init(&pdata->rss_mutex);
82 	spin_lock_init(&pdata->tstamp_lock);
83 	mutex_init(&pdata->i2c_mutex);
84 	init_completion(&pdata->i2c_complete);
85 	init_completion(&pdata->mdio_complete);
86 
87 	pdata->msg_enable = netif_msg_init(debug, default_msg_level);
88 
89 	set_bit(XGBE_DOWN, &pdata->dev_state);
90 	set_bit(XGBE_STOPPED, &pdata->dev_state);
91 
92 	return pdata;
93 }
94 
xgbe_free_pdata(struct xgbe_prv_data * pdata)95 void xgbe_free_pdata(struct xgbe_prv_data *pdata)
96 {
97 	struct net_device *netdev = pdata->netdev;
98 
99 	free_netdev(netdev);
100 }
101 
xgbe_set_counts(struct xgbe_prv_data * pdata)102 void xgbe_set_counts(struct xgbe_prv_data *pdata)
103 {
104 	/* Set all the function pointers */
105 	xgbe_init_all_fptrs(pdata);
106 
107 	/* Populate the hardware features */
108 	xgbe_get_all_hw_features(pdata);
109 
110 	/* Set default max values if not provided */
111 	if (!pdata->tx_max_channel_count)
112 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
113 	if (!pdata->rx_max_channel_count)
114 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
115 
116 	if (!pdata->tx_max_q_count)
117 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
118 	if (!pdata->rx_max_q_count)
119 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
120 
121 	/* Calculate the number of Tx and Rx rings to be created
122 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
123 	 *   the number of Tx queues to the number of Tx channels
124 	 *   enabled
125 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
126 	 *   number of Rx queues or maximum allowed
127 	 */
128 	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
129 				     pdata->hw_feat.tx_ch_cnt);
130 	pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
131 				     pdata->tx_max_channel_count);
132 	pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
133 				     pdata->tx_max_q_count);
134 
135 	pdata->tx_q_count = pdata->tx_ring_count;
136 
137 	pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
138 				     pdata->hw_feat.rx_ch_cnt);
139 	pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
140 				     pdata->rx_max_channel_count);
141 
142 	pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
143 				  pdata->rx_max_q_count);
144 
145 	if (netif_msg_probe(pdata)) {
146 		dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
147 			pdata->tx_ring_count, pdata->rx_ring_count);
148 		dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
149 			pdata->tx_q_count, pdata->rx_q_count);
150 	}
151 }
152 
xgbe_config_netdev(struct xgbe_prv_data * pdata)153 int xgbe_config_netdev(struct xgbe_prv_data *pdata)
154 {
155 	struct net_device *netdev = pdata->netdev;
156 	struct device *dev = pdata->dev;
157 	int ret;
158 
159 	netdev->irq = pdata->dev_irq;
160 	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
161 	eth_hw_addr_set(netdev, pdata->mac_addr);
162 
163 	/* Initialize ECC timestamps */
164 	pdata->tx_sec_period = jiffies;
165 	pdata->tx_ded_period = jiffies;
166 	pdata->rx_sec_period = jiffies;
167 	pdata->rx_ded_period = jiffies;
168 	pdata->desc_sec_period = jiffies;
169 	pdata->desc_ded_period = jiffies;
170 
171 	/* Issue software reset to device */
172 	ret = pdata->hw_if.exit(pdata);
173 	if (ret) {
174 		dev_err(dev, "software reset failed\n");
175 		return ret;
176 	}
177 
178 	/* Set default configuration data */
179 	xgbe_default_config(pdata);
180 
181 	/* Set the DMA mask */
182 	ret = dma_set_mask_and_coherent(dev,
183 					DMA_BIT_MASK(pdata->hw_feat.dma_width));
184 	if (ret) {
185 		dev_err(dev, "dma_set_mask_and_coherent failed\n");
186 		return ret;
187 	}
188 
189 	/* Set default max values if not provided */
190 	if (!pdata->tx_max_fifo_size)
191 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
192 	if (!pdata->rx_max_fifo_size)
193 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
194 
195 	/* Set and validate the number of descriptors for a ring */
196 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
197 	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
198 
199 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
200 	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
201 
202 	/* Adjust the number of queues based on interrupts assigned */
203 	if (pdata->channel_irq_count) {
204 		pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
205 					     pdata->channel_irq_count);
206 		pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
207 					     pdata->channel_irq_count);
208 
209 		if (netif_msg_probe(pdata))
210 			dev_dbg(pdata->dev,
211 				"adjusted TX/RX DMA channel count = %u/%u\n",
212 				pdata->tx_ring_count, pdata->rx_ring_count);
213 	}
214 
215 	/* Initialize RSS hash key */
216 	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
217 
218 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
219 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
220 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
221 
222 	/* Call MDIO/PHY initialization routine */
223 	pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
224 	ret = pdata->phy_if.phy_init(pdata);
225 	if (ret)
226 		return ret;
227 
228 	/* Set device operations */
229 	netdev->netdev_ops = xgbe_get_netdev_ops();
230 	netdev->ethtool_ops = xgbe_get_ethtool_ops();
231 #ifdef CONFIG_AMD_XGBE_DCB
232 	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
233 #endif
234 
235 	/* Set device features */
236 	netdev->hw_features = NETIF_F_SG |
237 			      NETIF_F_IP_CSUM |
238 			      NETIF_F_IPV6_CSUM |
239 			      NETIF_F_RXCSUM |
240 			      NETIF_F_TSO |
241 			      NETIF_F_TSO6 |
242 			      NETIF_F_GRO |
243 			      NETIF_F_HW_VLAN_CTAG_RX |
244 			      NETIF_F_HW_VLAN_CTAG_TX |
245 			      NETIF_F_HW_VLAN_CTAG_FILTER;
246 
247 	if (pdata->hw_feat.rss)
248 		netdev->hw_features |= NETIF_F_RXHASH;
249 
250 	if (pdata->hw_feat.vxn) {
251 		netdev->hw_enc_features = NETIF_F_SG |
252 					  NETIF_F_IP_CSUM |
253 					  NETIF_F_IPV6_CSUM |
254 					  NETIF_F_RXCSUM |
255 					  NETIF_F_TSO |
256 					  NETIF_F_TSO6 |
257 					  NETIF_F_GRO |
258 					  NETIF_F_GSO_UDP_TUNNEL |
259 					  NETIF_F_GSO_UDP_TUNNEL_CSUM;
260 
261 		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
262 				       NETIF_F_GSO_UDP_TUNNEL_CSUM;
263 
264 		netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info();
265 	}
266 
267 	netdev->vlan_features |= NETIF_F_SG |
268 				 NETIF_F_IP_CSUM |
269 				 NETIF_F_IPV6_CSUM |
270 				 NETIF_F_TSO |
271 				 NETIF_F_TSO6;
272 
273 	netdev->features |= netdev->hw_features;
274 	pdata->netdev_features = netdev->features;
275 
276 	netdev->priv_flags |= IFF_UNICAST_FLT;
277 	netdev->min_mtu = 0;
278 	netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
279 
280 	/* Use default watchdog timeout */
281 	netdev->watchdog_timeo = 0;
282 
283 	xgbe_init_rx_coalesce(pdata);
284 	xgbe_init_tx_coalesce(pdata);
285 
286 	netif_carrier_off(netdev);
287 	ret = register_netdev(netdev);
288 	if (ret) {
289 		dev_err(dev, "net device registration failed\n");
290 		return ret;
291 	}
292 
293 	if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
294 		xgbe_ptp_register(pdata);
295 
296 	xgbe_debugfs_init(pdata);
297 
298 	netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
299 		  pdata->tx_ring_count);
300 	netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
301 		  pdata->rx_ring_count);
302 
303 	return 0;
304 }
305 
xgbe_deconfig_netdev(struct xgbe_prv_data * pdata)306 void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
307 {
308 	struct net_device *netdev = pdata->netdev;
309 
310 	xgbe_debugfs_exit(pdata);
311 
312 	if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
313 		xgbe_ptp_unregister(pdata);
314 
315 	unregister_netdev(netdev);
316 
317 	pdata->phy_if.phy_exit(pdata);
318 }
319 
xgbe_netdev_event(struct notifier_block * nb,unsigned long event,void * data)320 static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
321 			     void *data)
322 {
323 	struct net_device *netdev = netdev_notifier_info_to_dev(data);
324 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
325 
326 	if (netdev->netdev_ops != xgbe_get_netdev_ops())
327 		goto out;
328 
329 	switch (event) {
330 	case NETDEV_CHANGENAME:
331 		xgbe_debugfs_rename(pdata);
332 		break;
333 
334 	default:
335 		break;
336 	}
337 
338 out:
339 	return NOTIFY_DONE;
340 }
341 
342 static struct notifier_block xgbe_netdev_notifier = {
343 	.notifier_call = xgbe_netdev_event,
344 };
345 
xgbe_mod_init(void)346 static int __init xgbe_mod_init(void)
347 {
348 	int ret;
349 
350 	ret = register_netdevice_notifier(&xgbe_netdev_notifier);
351 	if (ret)
352 		return ret;
353 
354 	ret = xgbe_platform_init();
355 	if (ret)
356 		goto err_platform_init;
357 
358 	ret = xgbe_pci_init();
359 	if (ret)
360 		goto err_pci_init;
361 
362 	return 0;
363 
364 err_pci_init:
365 	xgbe_platform_exit();
366 err_platform_init:
367 	unregister_netdevice_notifier(&xgbe_netdev_notifier);
368 	return ret;
369 }
370 
xgbe_mod_exit(void)371 static void __exit xgbe_mod_exit(void)
372 {
373 	xgbe_pci_exit();
374 
375 	xgbe_platform_exit();
376 
377 	unregister_netdevice_notifier(&xgbe_netdev_notifier);
378 }
379 
380 module_init(xgbe_mod_init);
381 module_exit(xgbe_mod_exit);
382