xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6 
7 #include "wx_type.h"
8 #include "wx_mbx.h"
9 #include "wx_lib.h"
10 #include "wx_vf.h"
11 #include "wx_vf_lib.h"
12 #include "wx_vf_common.h"
13 
wxvf_suspend(struct device * dev_d)14 int wxvf_suspend(struct device *dev_d)
15 {
16 	struct pci_dev *pdev = to_pci_dev(dev_d);
17 	struct wx *wx = pci_get_drvdata(pdev);
18 
19 	netif_device_detach(wx->netdev);
20 	wx_clear_interrupt_scheme(wx);
21 	pci_disable_device(pdev);
22 
23 	return 0;
24 }
25 EXPORT_SYMBOL(wxvf_suspend);
26 
wxvf_shutdown(struct pci_dev * pdev)27 void wxvf_shutdown(struct pci_dev *pdev)
28 {
29 	wxvf_suspend(&pdev->dev);
30 }
31 EXPORT_SYMBOL(wxvf_shutdown);
32 
wxvf_resume(struct device * dev_d)33 int wxvf_resume(struct device *dev_d)
34 {
35 	struct pci_dev *pdev = to_pci_dev(dev_d);
36 	struct wx *wx = pci_get_drvdata(pdev);
37 
38 	pci_set_master(pdev);
39 	wx_init_interrupt_scheme(wx);
40 	netif_device_attach(wx->netdev);
41 
42 	return 0;
43 }
44 EXPORT_SYMBOL(wxvf_resume);
45 
wxvf_remove(struct pci_dev * pdev)46 void wxvf_remove(struct pci_dev *pdev)
47 {
48 	struct wx *wx = pci_get_drvdata(pdev);
49 	struct net_device *netdev;
50 
51 	cancel_work_sync(&wx->service_task);
52 	netdev = wx->netdev;
53 	unregister_netdev(netdev);
54 	kfree(wx->vfinfo);
55 	kfree(wx->rss_key);
56 	kfree(wx->mac_table);
57 	wx_clear_interrupt_scheme(wx);
58 	pci_release_selected_regions(pdev,
59 				     pci_select_bars(pdev, IORESOURCE_MEM));
60 	pci_disable_device(pdev);
61 }
62 EXPORT_SYMBOL(wxvf_remove);
63 
wx_msix_misc_vf(int __always_unused irq,void * data)64 static irqreturn_t wx_msix_misc_vf(int __always_unused irq, void *data)
65 {
66 	struct wx *wx = data;
67 
68 	set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
69 	/* Clear the interrupt */
70 	if (netif_running(wx->netdev))
71 		wr32(wx, WX_VXIMC, wx->eims_other);
72 
73 	return IRQ_HANDLED;
74 }
75 
wx_request_msix_irqs_vf(struct wx * wx)76 int wx_request_msix_irqs_vf(struct wx *wx)
77 {
78 	struct net_device *netdev = wx->netdev;
79 	int vector, err;
80 
81 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
82 		struct wx_q_vector *q_vector = wx->q_vector[vector];
83 		struct msix_entry *entry = &wx->msix_q_entries[vector];
84 
85 		if (q_vector->tx.ring && q_vector->rx.ring)
86 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
87 				 "%s-TxRx-%d", netdev->name, entry->entry);
88 		else
89 			/* skip this unused q_vector */
90 			continue;
91 
92 		err = request_irq(entry->vector, wx_msix_clean_rings, 0,
93 				  q_vector->name, q_vector);
94 		if (err) {
95 			wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
96 			       q_vector->name, err);
97 			goto free_queue_irqs;
98 		}
99 	}
100 
101 	err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
102 				   NULL, IRQF_ONESHOT, netdev->name, wx);
103 	if (err) {
104 		wx_err(wx, "request_irq for msix_other failed: %d\n", err);
105 		goto free_queue_irqs;
106 	}
107 
108 	return 0;
109 
110 free_queue_irqs:
111 	while (vector) {
112 		vector--;
113 		free_irq(wx->msix_q_entries[vector].vector,
114 			 wx->q_vector[vector]);
115 	}
116 	wx_reset_interrupt_capability(wx);
117 	return err;
118 }
119 EXPORT_SYMBOL(wx_request_msix_irqs_vf);
120 
wx_negotiate_api_vf(struct wx * wx)121 void wx_negotiate_api_vf(struct wx *wx)
122 {
123 	int api[] = {
124 		     wx_mbox_api_13,
125 		     wx_mbox_api_null};
126 	int err = 0, idx = 0;
127 
128 	spin_lock_bh(&wx->mbx.mbx_lock);
129 	while (api[idx] != wx_mbox_api_null) {
130 		err = wx_negotiate_api_version(wx, api[idx]);
131 		if (!err)
132 			break;
133 		idx++;
134 	}
135 	spin_unlock_bh(&wx->mbx.mbx_lock);
136 }
137 EXPORT_SYMBOL(wx_negotiate_api_vf);
138 
wx_reset_vf(struct wx * wx)139 void wx_reset_vf(struct wx *wx)
140 {
141 	struct net_device *netdev = wx->netdev;
142 	int ret = 0;
143 
144 	ret = wx_reset_hw_vf(wx);
145 	if (!ret)
146 		wx_init_hw_vf(wx);
147 	wx_negotiate_api_vf(wx);
148 	if (is_valid_ether_addr(wx->mac.addr)) {
149 		eth_hw_addr_set(netdev, wx->mac.addr);
150 		ether_addr_copy(netdev->perm_addr, wx->mac.addr);
151 	}
152 }
153 EXPORT_SYMBOL(wx_reset_vf);
154 
wx_set_rx_mode_vf(struct net_device * netdev)155 void wx_set_rx_mode_vf(struct net_device *netdev)
156 {
157 	struct wx *wx = netdev_priv(netdev);
158 	unsigned int flags = netdev->flags;
159 	int xcast_mode;
160 
161 	xcast_mode = (flags & IFF_ALLMULTI) ? WXVF_XCAST_MODE_ALLMULTI :
162 		     (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
163 		     WXVF_XCAST_MODE_MULTI : WXVF_XCAST_MODE_NONE;
164 	/* request the most inclusive mode we need */
165 	if (flags & IFF_PROMISC)
166 		xcast_mode = WXVF_XCAST_MODE_PROMISC;
167 	else if (flags & IFF_ALLMULTI)
168 		xcast_mode = WXVF_XCAST_MODE_ALLMULTI;
169 	else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
170 		xcast_mode = WXVF_XCAST_MODE_MULTI;
171 	else
172 		xcast_mode = WXVF_XCAST_MODE_NONE;
173 
174 	spin_lock_bh(&wx->mbx.mbx_lock);
175 	wx_update_xcast_mode_vf(wx, xcast_mode);
176 	wx_update_mc_addr_list_vf(wx, netdev);
177 	wx_write_uc_addr_list_vf(netdev);
178 	spin_unlock_bh(&wx->mbx.mbx_lock);
179 }
180 EXPORT_SYMBOL(wx_set_rx_mode_vf);
181 
182 /**
183  * wx_configure_rx_vf - Configure Receive Unit after Reset
184  * @wx: board private structure
185  *
186  * Configure the Rx unit of the MAC after a reset.
187  **/
wx_configure_rx_vf(struct wx * wx)188 static void wx_configure_rx_vf(struct wx *wx)
189 {
190 	struct net_device *netdev = wx->netdev;
191 	int i, ret;
192 
193 	wx_setup_psrtype_vf(wx);
194 	wx_setup_vfmrqc_vf(wx);
195 
196 	spin_lock_bh(&wx->mbx.mbx_lock);
197 	ret = wx_rlpml_set_vf(wx,
198 			      netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
199 	spin_unlock_bh(&wx->mbx.mbx_lock);
200 	if (ret)
201 		wx_dbg(wx, "Failed to set MTU at %d\n", netdev->mtu);
202 
203 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
204 	 * the Base and Length of the Rx Descriptor Ring
205 	 */
206 	for (i = 0; i < wx->num_rx_queues; i++) {
207 		struct wx_ring *rx_ring = wx->rx_ring[i];
208 #ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
209 		wx_set_rx_buffer_len_vf(wx, rx_ring);
210 #endif
211 		wx_configure_rx_ring_vf(wx, rx_ring);
212 	}
213 }
214 
wx_configure_vf(struct wx * wx)215 void wx_configure_vf(struct wx *wx)
216 {
217 	wx_set_rx_mode_vf(wx->netdev);
218 	wx_configure_tx_vf(wx);
219 	wx_configure_rx_vf(wx);
220 }
221 EXPORT_SYMBOL(wx_configure_vf);
222 
wx_set_mac_vf(struct net_device * netdev,void * p)223 int wx_set_mac_vf(struct net_device *netdev, void *p)
224 {
225 	struct wx *wx = netdev_priv(netdev);
226 	struct sockaddr *addr = p;
227 	int ret;
228 
229 	ret = eth_prepare_mac_addr_change(netdev, addr);
230 	if (ret)
231 		return ret;
232 
233 	spin_lock_bh(&wx->mbx.mbx_lock);
234 	ret = wx_set_rar_vf(wx, 1, (u8 *)addr->sa_data, 1);
235 	spin_unlock_bh(&wx->mbx.mbx_lock);
236 
237 	if (ret)
238 		return -EPERM;
239 
240 	memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
241 	memcpy(wx->mac.perm_addr, addr->sa_data, netdev->addr_len);
242 	eth_hw_addr_set(netdev, addr->sa_data);
243 
244 	return 0;
245 }
246 EXPORT_SYMBOL(wx_set_mac_vf);
247 
wxvf_watchdog_update_link(struct wx * wx)248 void wxvf_watchdog_update_link(struct wx *wx)
249 {
250 	int err;
251 
252 	if (!test_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags))
253 		return;
254 
255 	spin_lock_bh(&wx->mbx.mbx_lock);
256 	err = wx_check_mac_link_vf(wx);
257 	spin_unlock_bh(&wx->mbx.mbx_lock);
258 	if (err) {
259 		wx->link = false;
260 		set_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
261 	}
262 	clear_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
263 }
264 EXPORT_SYMBOL(wxvf_watchdog_update_link);
265 
wxvf_irq_enable(struct wx * wx)266 static void wxvf_irq_enable(struct wx *wx)
267 {
268 	wr32(wx, WX_VXIMC, wx->eims_enable_mask);
269 }
270 
wxvf_up_complete(struct wx * wx)271 static void wxvf_up_complete(struct wx *wx)
272 {
273 	/* Always set the carrier off */
274 	netif_carrier_off(wx->netdev);
275 	mod_timer(&wx->service_timer, jiffies + HZ);
276 	set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
277 
278 	wx_configure_msix_vf(wx);
279 	smp_mb__before_atomic();
280 	wx_napi_enable_all(wx);
281 
282 	/* clear any pending interrupts, may auto mask */
283 	wr32(wx, WX_VXICR, U32_MAX);
284 	wxvf_irq_enable(wx);
285 	/* enable transmits */
286 	netif_tx_start_all_queues(wx->netdev);
287 }
288 
wxvf_open(struct net_device * netdev)289 int wxvf_open(struct net_device *netdev)
290 {
291 	struct wx *wx = netdev_priv(netdev);
292 	int err;
293 
294 	err = wx_setup_resources(wx);
295 	if (err)
296 		goto err_reset;
297 	wx_configure_vf(wx);
298 
299 	err = wx_request_msix_irqs_vf(wx);
300 	if (err)
301 		goto err_free_resources;
302 
303 	/* Notify the stack of the actual queue counts. */
304 	err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
305 	if (err)
306 		goto err_free_irq;
307 
308 	err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
309 	if (err)
310 		goto err_free_irq;
311 
312 	wxvf_up_complete(wx);
313 
314 	return 0;
315 err_free_irq:
316 	wx_free_irq(wx);
317 err_free_resources:
318 	wx_free_resources(wx);
319 err_reset:
320 	wx_reset_vf(wx);
321 	return err;
322 }
323 EXPORT_SYMBOL(wxvf_open);
324 
wxvf_down(struct wx * wx)325 static void wxvf_down(struct wx *wx)
326 {
327 	struct net_device *netdev = wx->netdev;
328 
329 	timer_delete_sync(&wx->service_timer);
330 	netif_tx_stop_all_queues(netdev);
331 	netif_tx_disable(netdev);
332 	netif_carrier_off(netdev);
333 	wx_napi_disable_all(wx);
334 	wx_reset_vf(wx);
335 
336 	wx_clean_all_tx_rings(wx);
337 	wx_clean_all_rx_rings(wx);
338 }
339 
wxvf_reinit_locked(struct wx * wx)340 static void wxvf_reinit_locked(struct wx *wx)
341 {
342 	while (test_and_set_bit(WX_STATE_RESETTING, wx->state))
343 		usleep_range(1000, 2000);
344 	wxvf_down(wx);
345 	wx_free_irq(wx);
346 	wx_configure_vf(wx);
347 	wx_request_msix_irqs_vf(wx);
348 	wxvf_up_complete(wx);
349 	clear_bit(WX_STATE_RESETTING, wx->state);
350 }
351 
wxvf_reset_subtask(struct wx * wx)352 static void wxvf_reset_subtask(struct wx *wx)
353 {
354 	if (!test_bit(WX_FLAG_NEED_DO_RESET, wx->flags))
355 		return;
356 	clear_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
357 
358 	rtnl_lock();
359 	if (test_bit(WX_STATE_RESETTING, wx->state) ||
360 	    !(netif_running(wx->netdev))) {
361 		rtnl_unlock();
362 		return;
363 	}
364 	wxvf_reinit_locked(wx);
365 	rtnl_unlock();
366 }
367 
wxvf_close(struct net_device * netdev)368 int wxvf_close(struct net_device *netdev)
369 {
370 	struct wx *wx = netdev_priv(netdev);
371 
372 	wxvf_down(wx);
373 	wx_free_irq(wx);
374 	wx_free_resources(wx);
375 
376 	return 0;
377 }
378 EXPORT_SYMBOL(wxvf_close);
379 
wxvf_link_config_subtask(struct wx * wx)380 static void wxvf_link_config_subtask(struct wx *wx)
381 {
382 	struct net_device *netdev = wx->netdev;
383 
384 	wxvf_watchdog_update_link(wx);
385 	if (wx->link) {
386 		if (netif_carrier_ok(netdev))
387 			return;
388 		netif_carrier_on(netdev);
389 		netdev_info(netdev, "Link is Up - %s\n",
390 			    phy_speed_to_str(wx->speed));
391 	} else {
392 		if (!netif_carrier_ok(netdev))
393 			return;
394 		netif_carrier_off(netdev);
395 		netdev_info(netdev, "Link is Down\n");
396 	}
397 }
398 
wxvf_service_task(struct work_struct * work)399 static void wxvf_service_task(struct work_struct *work)
400 {
401 	struct wx *wx = container_of(work, struct wx, service_task);
402 
403 	wxvf_link_config_subtask(wx);
404 	wxvf_reset_subtask(wx);
405 	wx_service_event_complete(wx);
406 }
407 
wxvf_init_service(struct wx * wx)408 void wxvf_init_service(struct wx *wx)
409 {
410 	timer_setup(&wx->service_timer, wx_service_timer, 0);
411 	INIT_WORK(&wx->service_task, wxvf_service_task);
412 	clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
413 }
414 EXPORT_SYMBOL(wxvf_init_service);
415