1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/pci.h>
5 #include <linux/phy.h>
6 #include <linux/ethtool.h>
7
8 #include "wx_type.h"
9 #include "wx_ethtool.h"
10 #include "wx_hw.h"
11 #include "wx_lib.h"
12
13 struct wx_stats {
14 char stat_string[ETH_GSTRING_LEN];
15 size_t sizeof_stat;
16 off_t stat_offset;
17 };
18
19 #define WX_STAT(str, m) { \
20 .stat_string = str, \
21 .sizeof_stat = sizeof(((struct wx *)0)->m), \
22 .stat_offset = offsetof(struct wx, m) }
23
24 static const struct wx_stats wx_gstrings_stats[] = {
25 WX_STAT("rx_dma_pkts", stats.gprc),
26 WX_STAT("tx_dma_pkts", stats.gptc),
27 WX_STAT("rx_dma_bytes", stats.gorc),
28 WX_STAT("tx_dma_bytes", stats.gotc),
29 WX_STAT("rx_total_pkts", stats.tpr),
30 WX_STAT("tx_total_pkts", stats.tpt),
31 WX_STAT("rx_long_length_count", stats.roc),
32 WX_STAT("rx_short_length_count", stats.ruc),
33 WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
34 WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
35 WX_STAT("os2bmc_tx_by_host", stats.o2bspc),
36 WX_STAT("os2bmc_rx_by_host", stats.b2ogprc),
37 WX_STAT("rx_no_dma_resources", stats.rdmdrop),
38 WX_STAT("tx_busy", tx_busy),
39 WX_STAT("non_eop_descs", non_eop_descs),
40 WX_STAT("tx_restart_queue", restart_queue),
41 WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
42 WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
43 WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
44 WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
45 WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
46 WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
47 };
48
49 static const struct wx_stats wx_gstrings_fdir_stats[] = {
50 WX_STAT("fdir_match", stats.fdirmatch),
51 WX_STAT("fdir_miss", stats.fdirmiss),
52 };
53
54 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so
55 * we set the num_rx_queues to evaluate to num_tx_queues. This is
56 * used because we do not have a good way to get the max number of
57 * rx queues with CONFIG_RPS disabled.
58 */
59 #define WX_NUM_RX_QUEUES netdev->num_tx_queues
60 #define WX_NUM_TX_QUEUES netdev->num_tx_queues
61
62 #define WX_QUEUE_STATS_LEN ( \
63 (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
64 (sizeof(struct wx_queue_stats) / sizeof(u64)))
65 #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats)
66 #define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats)
67 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
68
wx_get_sset_count(struct net_device * netdev,int sset)69 int wx_get_sset_count(struct net_device *netdev, int sset)
70 {
71 struct wx *wx = netdev_priv(netdev);
72
73 switch (sset) {
74 case ETH_SS_STATS:
75 return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ?
76 WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
77 default:
78 return -EOPNOTSUPP;
79 }
80 }
81 EXPORT_SYMBOL(wx_get_sset_count);
82
wx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)83 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
84 {
85 struct wx *wx = netdev_priv(netdev);
86 u8 *p = data;
87 int i;
88
89 switch (stringset) {
90 case ETH_SS_STATS:
91 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
92 ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
93 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
94 for (i = 0; i < WX_FDIR_STATS_LEN; i++)
95 ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
96 }
97 for (i = 0; i < netdev->num_tx_queues; i++) {
98 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
99 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
100 }
101 for (i = 0; i < WX_NUM_RX_QUEUES; i++) {
102 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
103 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
104 }
105 break;
106 }
107 }
108 EXPORT_SYMBOL(wx_get_strings);
109
wx_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)110 void wx_get_ethtool_stats(struct net_device *netdev,
111 struct ethtool_stats *stats, u64 *data)
112 {
113 struct wx *wx = netdev_priv(netdev);
114 struct wx_ring *ring;
115 unsigned int start;
116 int i, j, k;
117 char *p;
118
119 wx_update_stats(wx);
120
121 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) {
122 p = (char *)wx + wx_gstrings_stats[i].stat_offset;
123 data[i] = (wx_gstrings_stats[i].sizeof_stat ==
124 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
125 }
126
127 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
128 for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
129 p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
130 data[i++] = *(u64 *)p;
131 }
132 }
133
134 for (j = 0; j < netdev->num_tx_queues; j++) {
135 ring = wx->tx_ring[j];
136 if (!ring) {
137 data[i++] = 0;
138 data[i++] = 0;
139 continue;
140 }
141
142 do {
143 start = u64_stats_fetch_begin(&ring->syncp);
144 data[i] = ring->stats.packets;
145 data[i + 1] = ring->stats.bytes;
146 } while (u64_stats_fetch_retry(&ring->syncp, start));
147 i += 2;
148 }
149 for (j = 0; j < WX_NUM_RX_QUEUES; j++) {
150 ring = wx->rx_ring[j];
151 if (!ring) {
152 data[i++] = 0;
153 data[i++] = 0;
154 continue;
155 }
156
157 do {
158 start = u64_stats_fetch_begin(&ring->syncp);
159 data[i] = ring->stats.packets;
160 data[i + 1] = ring->stats.bytes;
161 } while (u64_stats_fetch_retry(&ring->syncp, start));
162 i += 2;
163 }
164 }
165 EXPORT_SYMBOL(wx_get_ethtool_stats);
166
wx_get_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * mac_stats)167 void wx_get_mac_stats(struct net_device *netdev,
168 struct ethtool_eth_mac_stats *mac_stats)
169 {
170 struct wx *wx = netdev_priv(netdev);
171 struct wx_hw_stats *hwstats;
172
173 wx_update_stats(wx);
174
175 hwstats = &wx->stats;
176 mac_stats->MulticastFramesXmittedOK = hwstats->mptc;
177 mac_stats->BroadcastFramesXmittedOK = hwstats->bptc;
178 mac_stats->MulticastFramesReceivedOK = hwstats->mprc;
179 mac_stats->BroadcastFramesReceivedOK = hwstats->bprc;
180 }
181 EXPORT_SYMBOL(wx_get_mac_stats);
182
wx_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * stats)183 void wx_get_pause_stats(struct net_device *netdev,
184 struct ethtool_pause_stats *stats)
185 {
186 struct wx *wx = netdev_priv(netdev);
187 struct wx_hw_stats *hwstats;
188
189 wx_update_stats(wx);
190
191 hwstats = &wx->stats;
192 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
193 stats->rx_pause_frames = hwstats->lxonoffrxc;
194 }
195 EXPORT_SYMBOL(wx_get_pause_stats);
196
wx_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)197 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
198 {
199 unsigned int stats_len = WX_STATS_LEN;
200 struct wx *wx = netdev_priv(netdev);
201
202 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
203 stats_len += WX_FDIR_STATS_LEN;
204
205 strscpy(info->driver, wx->driver_name, sizeof(info->driver));
206 strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
207 strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
208 if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
209 info->n_stats = stats_len -
210 (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
211 (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
212 } else {
213 info->n_stats = stats_len;
214 }
215 }
216 EXPORT_SYMBOL(wx_get_drvinfo);
217
wx_nway_reset(struct net_device * netdev)218 int wx_nway_reset(struct net_device *netdev)
219 {
220 struct wx *wx = netdev_priv(netdev);
221
222 if (wx->mac.type == wx_mac_aml)
223 return -EOPNOTSUPP;
224
225 return phylink_ethtool_nway_reset(wx->phylink);
226 }
227 EXPORT_SYMBOL(wx_nway_reset);
228
wx_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)229 int wx_get_link_ksettings(struct net_device *netdev,
230 struct ethtool_link_ksettings *cmd)
231 {
232 struct wx *wx = netdev_priv(netdev);
233
234 if (wx->mac.type == wx_mac_aml)
235 return -EOPNOTSUPP;
236
237 return phylink_ethtool_ksettings_get(wx->phylink, cmd);
238 }
239 EXPORT_SYMBOL(wx_get_link_ksettings);
240
wx_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)241 int wx_set_link_ksettings(struct net_device *netdev,
242 const struct ethtool_link_ksettings *cmd)
243 {
244 struct wx *wx = netdev_priv(netdev);
245
246 if (wx->mac.type == wx_mac_aml)
247 return -EOPNOTSUPP;
248
249 return phylink_ethtool_ksettings_set(wx->phylink, cmd);
250 }
251 EXPORT_SYMBOL(wx_set_link_ksettings);
252
wx_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)253 void wx_get_pauseparam(struct net_device *netdev,
254 struct ethtool_pauseparam *pause)
255 {
256 struct wx *wx = netdev_priv(netdev);
257
258 if (wx->mac.type == wx_mac_aml)
259 return;
260
261 phylink_ethtool_get_pauseparam(wx->phylink, pause);
262 }
263 EXPORT_SYMBOL(wx_get_pauseparam);
264
wx_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)265 int wx_set_pauseparam(struct net_device *netdev,
266 struct ethtool_pauseparam *pause)
267 {
268 struct wx *wx = netdev_priv(netdev);
269
270 if (wx->mac.type == wx_mac_aml)
271 return -EOPNOTSUPP;
272
273 return phylink_ethtool_set_pauseparam(wx->phylink, pause);
274 }
275 EXPORT_SYMBOL(wx_set_pauseparam);
276
wx_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)277 void wx_get_ringparam(struct net_device *netdev,
278 struct ethtool_ringparam *ring,
279 struct kernel_ethtool_ringparam *kernel_ring,
280 struct netlink_ext_ack *extack)
281 {
282 struct wx *wx = netdev_priv(netdev);
283
284 ring->rx_max_pending = WX_MAX_RXD;
285 ring->tx_max_pending = WX_MAX_TXD;
286 ring->rx_mini_max_pending = 0;
287 ring->rx_jumbo_max_pending = 0;
288 ring->rx_pending = wx->rx_ring_count;
289 ring->tx_pending = wx->tx_ring_count;
290 ring->rx_mini_pending = 0;
291 ring->rx_jumbo_pending = 0;
292 }
293 EXPORT_SYMBOL(wx_get_ringparam);
294
wx_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)295 int wx_get_coalesce(struct net_device *netdev,
296 struct ethtool_coalesce *ec,
297 struct kernel_ethtool_coalesce *kernel_coal,
298 struct netlink_ext_ack *extack)
299 {
300 struct wx *wx = netdev_priv(netdev);
301
302 ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
303 /* only valid if in constant ITR mode */
304 if (wx->rx_itr_setting <= 1)
305 ec->rx_coalesce_usecs = wx->rx_itr_setting;
306 else
307 ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
308
309 /* if in mixed tx/rx queues per vector mode, report only rx settings */
310 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
311 return 0;
312
313 /* only valid if in constant ITR mode */
314 if (wx->tx_itr_setting <= 1)
315 ec->tx_coalesce_usecs = wx->tx_itr_setting;
316 else
317 ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
318
319 return 0;
320 }
321 EXPORT_SYMBOL(wx_get_coalesce);
322
wx_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)323 int wx_set_coalesce(struct net_device *netdev,
324 struct ethtool_coalesce *ec,
325 struct kernel_ethtool_coalesce *kernel_coal,
326 struct netlink_ext_ack *extack)
327 {
328 struct wx *wx = netdev_priv(netdev);
329 u16 tx_itr_param, rx_itr_param;
330 struct wx_q_vector *q_vector;
331 u16 max_eitr;
332 int i;
333
334 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
335 /* reject Tx specific changes in case of mixed RxTx vectors */
336 if (ec->tx_coalesce_usecs)
337 return -EOPNOTSUPP;
338 }
339
340 if (ec->tx_max_coalesced_frames_irq)
341 wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
342
343 switch (wx->mac.type) {
344 case wx_mac_sp:
345 max_eitr = WX_SP_MAX_EITR;
346 break;
347 case wx_mac_aml:
348 max_eitr = WX_AML_MAX_EITR;
349 break;
350 default:
351 max_eitr = WX_EM_MAX_EITR;
352 break;
353 }
354
355 if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
356 (ec->tx_coalesce_usecs > (max_eitr >> 2)))
357 return -EINVAL;
358
359 if (ec->rx_coalesce_usecs > 1)
360 wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
361 else
362 wx->rx_itr_setting = ec->rx_coalesce_usecs;
363
364 if (wx->rx_itr_setting == 1)
365 rx_itr_param = WX_20K_ITR;
366 else
367 rx_itr_param = wx->rx_itr_setting;
368
369 if (ec->tx_coalesce_usecs > 1)
370 wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
371 else
372 wx->tx_itr_setting = ec->tx_coalesce_usecs;
373
374 if (wx->tx_itr_setting == 1) {
375 switch (wx->mac.type) {
376 case wx_mac_sp:
377 case wx_mac_aml:
378 tx_itr_param = WX_12K_ITR;
379 break;
380 default:
381 tx_itr_param = WX_20K_ITR;
382 break;
383 }
384 } else {
385 tx_itr_param = wx->tx_itr_setting;
386 }
387
388 /* mixed Rx/Tx */
389 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
390 wx->tx_itr_setting = wx->rx_itr_setting;
391
392 for (i = 0; i < wx->num_q_vectors; i++) {
393 q_vector = wx->q_vector[i];
394 if (q_vector->tx.count && !q_vector->rx.count)
395 /* tx only */
396 q_vector->itr = tx_itr_param;
397 else
398 /* rx only or mixed */
399 q_vector->itr = rx_itr_param;
400 wx_write_eitr(q_vector);
401 }
402
403 return 0;
404 }
405 EXPORT_SYMBOL(wx_set_coalesce);
406
wx_max_channels(struct wx * wx)407 static unsigned int wx_max_channels(struct wx *wx)
408 {
409 unsigned int max_combined;
410
411 if (!wx->msix_q_entries) {
412 /* We only support one q_vector without MSI-X */
413 max_combined = 1;
414 } else {
415 /* support up to max allowed queues with RSS */
416 switch (wx->mac.type) {
417 case wx_mac_sp:
418 case wx_mac_aml:
419 max_combined = 63;
420 break;
421 default:
422 max_combined = 8;
423 break;
424 }
425 }
426
427 return max_combined;
428 }
429
wx_get_channels(struct net_device * dev,struct ethtool_channels * ch)430 void wx_get_channels(struct net_device *dev,
431 struct ethtool_channels *ch)
432 {
433 struct wx *wx = netdev_priv(dev);
434
435 /* report maximum channels */
436 ch->max_combined = wx_max_channels(wx);
437
438 /* report info for other vector */
439 if (wx->msix_q_entries) {
440 ch->max_other = 1;
441 ch->other_count = 1;
442 }
443
444 /* record RSS queues */
445 ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
446
447 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
448 ch->combined_count = wx->ring_feature[RING_F_FDIR].indices;
449 }
450 EXPORT_SYMBOL(wx_get_channels);
451
wx_set_channels(struct net_device * dev,struct ethtool_channels * ch)452 int wx_set_channels(struct net_device *dev,
453 struct ethtool_channels *ch)
454 {
455 unsigned int count = ch->combined_count;
456 struct wx *wx = netdev_priv(dev);
457
458 /* verify other_count has not changed */
459 if (ch->other_count != 1)
460 return -EINVAL;
461
462 /* verify the number of channels does not exceed hardware limits */
463 if (count > wx_max_channels(wx))
464 return -EINVAL;
465
466 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
467 wx->ring_feature[RING_F_FDIR].limit = count;
468
469 wx->ring_feature[RING_F_RSS].limit = count;
470
471 return 0;
472 }
473 EXPORT_SYMBOL(wx_set_channels);
474
wx_get_msglevel(struct net_device * netdev)475 u32 wx_get_msglevel(struct net_device *netdev)
476 {
477 struct wx *wx = netdev_priv(netdev);
478
479 return wx->msg_enable;
480 }
481 EXPORT_SYMBOL(wx_get_msglevel);
482
wx_set_msglevel(struct net_device * netdev,u32 data)483 void wx_set_msglevel(struct net_device *netdev, u32 data)
484 {
485 struct wx *wx = netdev_priv(netdev);
486
487 wx->msg_enable = data;
488 }
489 EXPORT_SYMBOL(wx_set_msglevel);
490
wx_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)491 int wx_get_ts_info(struct net_device *dev,
492 struct kernel_ethtool_ts_info *info)
493 {
494 struct wx *wx = netdev_priv(dev);
495
496 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
497 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
498 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
499 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
500 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
501 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
502 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
503 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
504 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
505 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
506 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
507 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
508
509 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
510 SOF_TIMESTAMPING_TX_HARDWARE |
511 SOF_TIMESTAMPING_RX_HARDWARE |
512 SOF_TIMESTAMPING_RAW_HARDWARE;
513
514 if (wx->ptp_clock)
515 info->phc_index = ptp_clock_index(wx->ptp_clock);
516 else
517 info->phc_index = -1;
518
519 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
520 BIT(HWTSTAMP_TX_ON);
521
522 return 0;
523 }
524 EXPORT_SYMBOL(wx_get_ts_info);
525
wx_get_ptp_stats(struct net_device * dev,struct ethtool_ts_stats * ts_stats)526 void wx_get_ptp_stats(struct net_device *dev,
527 struct ethtool_ts_stats *ts_stats)
528 {
529 struct wx *wx = netdev_priv(dev);
530
531 if (wx->ptp_clock) {
532 ts_stats->pkts = wx->tx_hwtstamp_pkts;
533 ts_stats->lost = wx->tx_hwtstamp_timeouts +
534 wx->tx_hwtstamp_skipped +
535 wx->rx_hwtstamp_cleared;
536 ts_stats->err = wx->tx_hwtstamp_errors;
537 }
538 }
539 EXPORT_SYMBOL(wx_get_ptp_stats);
540