1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/string.h>
9 #include <linux/etherdevice.h>
10
11 #include "../libwx/wx_type.h"
12 #include "../libwx/wx_hw.h"
13 #include "../libwx/wx_lib.h"
14 #include "../libwx/wx_mbx.h"
15 #include "../libwx/wx_vf.h"
16 #include "../libwx/wx_vf_common.h"
17 #include "txgbevf_type.h"
18
19 /* txgbevf_pci_tbl - PCI Device ID Table
20 *
21 * Wildcard entries (PCI_ANY_ID) should come last
22 * Last entry must be all 0s
23 *
24 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
25 * Class, Class Mask, private data (not used) }
26 */
27 static const struct pci_device_id txgbevf_pci_tbl[] = {
28 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_SP1000), 0},
29 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_WX1820), 0},
30 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML500F), 0},
31 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML510F), 0},
32 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5024), 0},
33 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5124), 0},
34 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML503F), 0},
35 { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML513F), 0},
36 /* required last entry */
37 { .device = 0 }
38 };
39
40 static const struct net_device_ops txgbevf_netdev_ops = {
41 .ndo_open = wxvf_open,
42 .ndo_stop = wxvf_close,
43 .ndo_start_xmit = wx_xmit_frame,
44 .ndo_validate_addr = eth_validate_addr,
45 .ndo_set_mac_address = wx_set_mac_vf,
46 };
47
txgbevf_set_num_queues(struct wx * wx)48 static void txgbevf_set_num_queues(struct wx *wx)
49 {
50 u32 def_q = 0, num_tcs = 0;
51 u16 rss, queue;
52 int ret = 0;
53
54 /* Start with base case */
55 wx->num_rx_queues = 1;
56 wx->num_tx_queues = 1;
57
58 spin_lock_bh(&wx->mbx.mbx_lock);
59 /* fetch queue configuration from the PF */
60 ret = wx_get_queues_vf(wx, &num_tcs, &def_q);
61 spin_unlock_bh(&wx->mbx.mbx_lock);
62
63 if (ret)
64 return;
65
66 /* we need as many queues as traffic classes */
67 if (num_tcs > 1) {
68 wx->num_rx_queues = num_tcs;
69 } else {
70 rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM);
71 queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues);
72 rss = min_t(u16, queue, rss);
73
74 if (wx->vfinfo->vf_api >= wx_mbox_api_13) {
75 wx->num_rx_queues = rss;
76 wx->num_tx_queues = rss;
77 }
78 }
79 }
80
txgbevf_init_type_code(struct wx * wx)81 static void txgbevf_init_type_code(struct wx *wx)
82 {
83 switch (wx->device_id) {
84 case TXGBEVF_DEV_ID_SP1000:
85 case TXGBEVF_DEV_ID_WX1820:
86 wx->mac.type = wx_mac_sp;
87 break;
88 case TXGBEVF_DEV_ID_AML500F:
89 case TXGBEVF_DEV_ID_AML510F:
90 case TXGBEVF_DEV_ID_AML5024:
91 case TXGBEVF_DEV_ID_AML5124:
92 case TXGBEVF_DEV_ID_AML503F:
93 case TXGBEVF_DEV_ID_AML513F:
94 wx->mac.type = wx_mac_aml;
95 break;
96 default:
97 wx->mac.type = wx_mac_unknown;
98 break;
99 }
100 }
101
txgbevf_sw_init(struct wx * wx)102 static int txgbevf_sw_init(struct wx *wx)
103 {
104 struct net_device *netdev = wx->netdev;
105 struct pci_dev *pdev = wx->pdev;
106 int err;
107
108 /* Initialize pcie info and common capability flags */
109 err = wx_sw_init(wx);
110 if (err < 0)
111 goto err_wx_sw_init;
112
113 /* Initialize the mailbox */
114 err = wx_init_mbx_params_vf(wx);
115 if (err)
116 goto err_init_mbx_params;
117
118 /* max q_vectors */
119 wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS;
120 /* Initialize the device type */
121 txgbevf_init_type_code(wx);
122 /* lock to protect mailbox accesses */
123 spin_lock_init(&wx->mbx.mbx_lock);
124
125 err = wx_reset_hw_vf(wx);
126 if (err) {
127 wx_err(wx, "PF still in reset state. Is the PF interface up?\n");
128 goto err_reset_hw;
129 }
130 wx_init_hw_vf(wx);
131 wx_negotiate_api_vf(wx);
132 if (is_zero_ether_addr(wx->mac.addr))
133 dev_info(&pdev->dev,
134 "MAC address not assigned by administrator.\n");
135 eth_hw_addr_set(netdev, wx->mac.addr);
136
137 if (!is_valid_ether_addr(netdev->dev_addr)) {
138 dev_info(&pdev->dev, "Assigning random MAC address\n");
139 eth_hw_addr_random(netdev);
140 ether_addr_copy(wx->mac.addr, netdev->dev_addr);
141 ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr);
142 }
143
144 wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES;
145 wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES;
146 /* Enable dynamic interrupt throttling rates */
147 wx->rx_itr_setting = 1;
148 wx->tx_itr_setting = 1;
149 /* set default ring sizes */
150 wx->tx_ring_count = TXGBEVF_DEFAULT_TXD;
151 wx->rx_ring_count = TXGBEVF_DEFAULT_RXD;
152 /* set default work limits */
153 wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK;
154 wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK;
155
156 wx->set_num_queues = txgbevf_set_num_queues;
157
158 return 0;
159 err_reset_hw:
160 kfree(wx->vfinfo);
161 err_init_mbx_params:
162 kfree(wx->rss_key);
163 kfree(wx->mac_table);
164 err_wx_sw_init:
165 return err;
166 }
167
168 /**
169 * txgbevf_probe - Device Initialization Routine
170 * @pdev: PCI device information struct
171 * @ent: entry in txgbevf_pci_tbl
172 *
173 * Return: return 0 on success, negative on failure
174 *
175 * txgbevf_probe initializes an adapter identified by a pci_dev structure.
176 * The OS initialization, configuring of the adapter private structure,
177 * and a hardware reset occur.
178 **/
txgbevf_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)179 static int txgbevf_probe(struct pci_dev *pdev,
180 const struct pci_device_id __always_unused *ent)
181 {
182 struct net_device *netdev;
183 struct wx *wx = NULL;
184 int err;
185
186 err = pci_enable_device_mem(pdev);
187 if (err)
188 return err;
189
190 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
191 if (err) {
192 dev_err(&pdev->dev,
193 "No usable DMA configuration, aborting\n");
194 goto err_pci_disable_dev;
195 }
196
197 err = pci_request_selected_regions(pdev,
198 pci_select_bars(pdev, IORESOURCE_MEM),
199 dev_driver_string(&pdev->dev));
200 if (err) {
201 dev_err(&pdev->dev,
202 "pci_request_selected_regions failed 0x%x\n", err);
203 goto err_pci_disable_dev;
204 }
205
206 pci_set_master(pdev);
207
208 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
209 sizeof(struct wx),
210 TXGBEVF_MAX_TX_QUEUES,
211 TXGBEVF_MAX_RX_QUEUES);
212 if (!netdev) {
213 err = -ENOMEM;
214 goto err_pci_release_regions;
215 }
216
217 SET_NETDEV_DEV(netdev, &pdev->dev);
218
219 wx = netdev_priv(netdev);
220 wx->netdev = netdev;
221 wx->pdev = pdev;
222
223 wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
224 NETIF_MSG_PROBE | NETIF_MSG_LINK);
225 wx->hw_addr = devm_ioremap(&pdev->dev,
226 pci_resource_start(pdev, 0),
227 pci_resource_len(pdev, 0));
228 if (!wx->hw_addr) {
229 err = -EIO;
230 goto err_pci_release_regions;
231 }
232
233 wx->b4_addr = devm_ioremap(&pdev->dev,
234 pci_resource_start(pdev, 4),
235 pci_resource_len(pdev, 4));
236 if (!wx->b4_addr) {
237 err = -EIO;
238 goto err_pci_release_regions;
239 }
240
241 netdev->netdev_ops = &txgbevf_netdev_ops;
242
243 /* setup the private structure */
244 err = txgbevf_sw_init(wx);
245 if (err)
246 goto err_pci_release_regions;
247
248 netdev->features |= NETIF_F_HIGHDMA;
249
250 eth_hw_addr_set(netdev, wx->mac.perm_addr);
251 ether_addr_copy(netdev->perm_addr, wx->mac.addr);
252
253 wxvf_init_service(wx);
254 err = wx_init_interrupt_scheme(wx);
255 if (err)
256 goto err_free_sw_init;
257
258 err = register_netdev(netdev);
259 if (err)
260 goto err_register;
261
262 pci_set_drvdata(pdev, wx);
263 netif_tx_stop_all_queues(netdev);
264
265 return 0;
266
267 err_register:
268 wx_clear_interrupt_scheme(wx);
269 err_free_sw_init:
270 timer_delete_sync(&wx->service_timer);
271 cancel_work_sync(&wx->service_task);
272 kfree(wx->vfinfo);
273 kfree(wx->rss_key);
274 kfree(wx->mac_table);
275 err_pci_release_regions:
276 pci_release_selected_regions(pdev,
277 pci_select_bars(pdev, IORESOURCE_MEM));
278 err_pci_disable_dev:
279 pci_disable_device(pdev);
280 return err;
281 }
282
283 /**
284 * txgbevf_remove - Device Removal Routine
285 * @pdev: PCI device information struct
286 *
287 * txgbevf_remove is called by the PCI subsystem to alert the driver
288 * that it should release a PCI device. The could be caused by a
289 * Hot-Plug event, or because the driver is going to be removed from
290 * memory.
291 **/
txgbevf_remove(struct pci_dev * pdev)292 static void txgbevf_remove(struct pci_dev *pdev)
293 {
294 wxvf_remove(pdev);
295 }
296
297 static DEFINE_SIMPLE_DEV_PM_OPS(txgbevf_pm_ops, wxvf_suspend, wxvf_resume);
298
299 static struct pci_driver txgbevf_driver = {
300 .name = KBUILD_MODNAME,
301 .id_table = txgbevf_pci_tbl,
302 .probe = txgbevf_probe,
303 .remove = txgbevf_remove,
304 .shutdown = wxvf_shutdown,
305 /* Power Management Hooks */
306 .driver.pm = pm_sleep_ptr(&txgbevf_pm_ops)
307 };
308
309 module_pci_driver(txgbevf_driver);
310
311 MODULE_DEVICE_TABLE(pci, txgbevf_pci_tbl);
312 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
313 MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit Virtual Function Network Driver");
314 MODULE_LICENSE("GPL");
315