1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4 * Copyright (c) 2014, Synopsys, Inc.
5 * All rights reserved
6 */
7
8 #include <linux/spinlock.h>
9 #include <linux/phy.h>
10 #include <linux/net_tstamp.h>
11
12 #include "xgbe.h"
13 #include "xgbe-common.h"
14
15 struct xgbe_stats {
16 char stat_string[ETH_GSTRING_LEN];
17 int stat_size;
18 int stat_offset;
19 };
20
21 #define XGMAC_MMC_STAT(_string, _var) \
22 { _string, \
23 sizeof_field(struct xgbe_mmc_stats, _var), \
24 offsetof(struct xgbe_prv_data, mmc_stats._var), \
25 }
26
27 #define XGMAC_EXT_STAT(_string, _var) \
28 { _string, \
29 sizeof_field(struct xgbe_ext_stats, _var), \
30 offsetof(struct xgbe_prv_data, ext_stats._var), \
31 }
32
33 static const struct xgbe_stats xgbe_gstring_stats[] = {
34 XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
35 XGMAC_MMC_STAT("tx_packets", txframecount_gb),
36 XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
37 XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
38 XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
39 XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
40 XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets),
41 XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
42 XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
43 XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
44 XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
45 XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
46 XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
47 XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
48 XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
49 XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
50
51 XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
52 XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
53 XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
54 XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
55 XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
56 XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
57 XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets),
58 XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
59 XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
60 XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
61 XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
62 XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
63 XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
64 XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
65 XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
66 XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
67 XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
68 XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
69 XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
70 XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
71 XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
72 XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
73 XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors),
74 XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors),
75 XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
76 XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
77 XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
78 };
79
80 #define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
81
xgbe_get_strings(struct net_device * netdev,u32 stringset,u8 * data)82 static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
83 {
84 struct xgbe_prv_data *pdata = netdev_priv(netdev);
85 int i;
86
87 switch (stringset) {
88 case ETH_SS_TEST:
89 xgbe_selftest_get_strings(pdata, data);
90 break;
91 case ETH_SS_STATS:
92 for (i = 0; i < XGBE_STATS_COUNT; i++)
93 ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
94
95 for (i = 0; i < pdata->tx_ring_count; i++) {
96 ethtool_sprintf(&data, "txq_%u_packets", i);
97 ethtool_sprintf(&data, "txq_%u_bytes", i);
98 }
99
100 for (i = 0; i < pdata->rx_ring_count; i++) {
101 ethtool_sprintf(&data, "rxq_%u_packets", i);
102 ethtool_sprintf(&data, "rxq_%u_bytes", i);
103 }
104
105 break;
106 }
107 }
108
xgbe_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)109 static void xgbe_get_ethtool_stats(struct net_device *netdev,
110 struct ethtool_stats *stats, u64 *data)
111 {
112 struct xgbe_prv_data *pdata = netdev_priv(netdev);
113 u8 *stat;
114 int i;
115
116 pdata->hw_if.read_mmc_stats(pdata);
117 for (i = 0; i < XGBE_STATS_COUNT; i++) {
118 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
119 *data++ = *(u64 *)stat;
120 }
121 for (i = 0; i < pdata->tx_ring_count; i++) {
122 *data++ = pdata->ext_stats.txq_packets[i];
123 *data++ = pdata->ext_stats.txq_bytes[i];
124 }
125 for (i = 0; i < pdata->rx_ring_count; i++) {
126 *data++ = pdata->ext_stats.rxq_packets[i];
127 *data++ = pdata->ext_stats.rxq_bytes[i];
128 }
129 }
130
xgbe_get_sset_count(struct net_device * netdev,int stringset)131 static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
132 {
133 struct xgbe_prv_data *pdata = netdev_priv(netdev);
134 int ret;
135
136 switch (stringset) {
137 case ETH_SS_TEST:
138 ret = xgbe_selftest_get_count(pdata);
139 break;
140 case ETH_SS_STATS:
141 ret = XGBE_STATS_COUNT +
142 (pdata->tx_ring_count * 2) +
143 (pdata->rx_ring_count * 2);
144 break;
145
146 default:
147 ret = -EOPNOTSUPP;
148 }
149
150 return ret;
151 }
152
xgbe_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)153 static void xgbe_get_pauseparam(struct net_device *netdev,
154 struct ethtool_pauseparam *pause)
155 {
156 struct xgbe_prv_data *pdata = netdev_priv(netdev);
157
158 pause->autoneg = pdata->phy.pause_autoneg;
159 pause->tx_pause = pdata->phy.tx_pause;
160 pause->rx_pause = pdata->phy.rx_pause;
161 }
162
xgbe_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)163 static int xgbe_set_pauseparam(struct net_device *netdev,
164 struct ethtool_pauseparam *pause)
165 {
166 struct xgbe_prv_data *pdata = netdev_priv(netdev);
167 struct ethtool_link_ksettings *lks = &pdata->phy.lks;
168 int ret = 0;
169
170 if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
171 netdev_err(netdev,
172 "autoneg disabled, pause autoneg not available\n");
173 return -EINVAL;
174 }
175
176 pdata->phy.pause_autoneg = pause->autoneg;
177 pdata->phy.tx_pause = pause->tx_pause;
178 pdata->phy.rx_pause = pause->rx_pause;
179
180 XGBE_CLR_ADV(lks, Pause);
181 XGBE_CLR_ADV(lks, Asym_Pause);
182
183 if (pause->rx_pause) {
184 XGBE_SET_ADV(lks, Pause);
185 XGBE_SET_ADV(lks, Asym_Pause);
186 }
187
188 if (pause->tx_pause) {
189 /* Equivalent to XOR of Asym_Pause */
190 if (XGBE_ADV(lks, Asym_Pause))
191 XGBE_CLR_ADV(lks, Asym_Pause);
192 else
193 XGBE_SET_ADV(lks, Asym_Pause);
194 }
195
196 if (netif_running(netdev))
197 ret = pdata->phy_if.phy_config_aneg(pdata);
198
199 return ret;
200 }
201
xgbe_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)202 static int xgbe_get_link_ksettings(struct net_device *netdev,
203 struct ethtool_link_ksettings *cmd)
204 {
205 struct xgbe_prv_data *pdata = netdev_priv(netdev);
206 struct ethtool_link_ksettings *lks = &pdata->phy.lks;
207
208 cmd->base.phy_address = pdata->phy.address;
209
210 if (netif_carrier_ok(netdev)) {
211 cmd->base.speed = pdata->phy.speed;
212 cmd->base.duplex = pdata->phy.duplex;
213 } else {
214 cmd->base.speed = SPEED_UNKNOWN;
215 cmd->base.duplex = DUPLEX_UNKNOWN;
216 }
217
218 cmd->base.autoneg = pdata->phy.autoneg;
219 cmd->base.port = PORT_NONE;
220
221 XGBE_LM_COPY(cmd, supported, lks, supported);
222 XGBE_LM_COPY(cmd, advertising, lks, advertising);
223 XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising);
224
225 return 0;
226 }
227
xgbe_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)228 static int xgbe_set_link_ksettings(struct net_device *netdev,
229 const struct ethtool_link_ksettings *cmd)
230 {
231 struct xgbe_prv_data *pdata = netdev_priv(netdev);
232 struct ethtool_link_ksettings *lks = &pdata->phy.lks;
233 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
234 u32 speed;
235 int ret;
236
237 speed = cmd->base.speed;
238
239 if (cmd->base.phy_address != pdata->phy.address) {
240 netdev_err(netdev, "invalid phy address %hhu\n",
241 cmd->base.phy_address);
242 return -EINVAL;
243 }
244
245 if ((cmd->base.autoneg != AUTONEG_ENABLE) &&
246 (cmd->base.autoneg != AUTONEG_DISABLE)) {
247 netdev_err(netdev, "unsupported autoneg %hhu\n",
248 cmd->base.autoneg);
249 return -EINVAL;
250 }
251
252 if (cmd->base.autoneg == AUTONEG_DISABLE) {
253 if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
254 netdev_err(netdev, "unsupported speed %u\n", speed);
255 return -EINVAL;
256 }
257
258 if (cmd->base.duplex != DUPLEX_FULL) {
259 netdev_err(netdev, "unsupported duplex %hhu\n",
260 cmd->base.duplex);
261 return -EINVAL;
262 }
263 }
264
265 netif_dbg(pdata, link, netdev,
266 "requested advertisement 0x%*pb, phy supported 0x%*pb\n",
267 __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
268 __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
269
270 linkmode_and(advertising, cmd->link_modes.advertising,
271 lks->link_modes.supported);
272
273 if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
274 bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
275 netdev_err(netdev,
276 "unsupported requested advertisement\n");
277 return -EINVAL;
278 }
279
280 ret = 0;
281 pdata->phy.autoneg = cmd->base.autoneg;
282 pdata->phy.speed = speed;
283 pdata->phy.duplex = cmd->base.duplex;
284 linkmode_copy(lks->link_modes.advertising, advertising);
285
286 if (cmd->base.autoneg == AUTONEG_ENABLE)
287 XGBE_SET_ADV(lks, Autoneg);
288 else
289 XGBE_CLR_ADV(lks, Autoneg);
290
291 if (netif_running(netdev))
292 ret = pdata->phy_if.phy_config_aneg(pdata);
293
294 return ret;
295 }
296
xgbe_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)297 static void xgbe_get_drvinfo(struct net_device *netdev,
298 struct ethtool_drvinfo *drvinfo)
299 {
300 struct xgbe_prv_data *pdata = netdev_priv(netdev);
301 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
302
303 strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
304 strscpy(drvinfo->bus_info, dev_name(pdata->dev),
305 sizeof(drvinfo->bus_info));
306 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
307 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
308 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
309 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
310 }
311
xgbe_get_msglevel(struct net_device * netdev)312 static u32 xgbe_get_msglevel(struct net_device *netdev)
313 {
314 struct xgbe_prv_data *pdata = netdev_priv(netdev);
315
316 return pdata->msg_enable;
317 }
318
xgbe_set_msglevel(struct net_device * netdev,u32 msglevel)319 static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
320 {
321 struct xgbe_prv_data *pdata = netdev_priv(netdev);
322
323 pdata->msg_enable = msglevel;
324 }
325
xgbe_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)326 static int xgbe_get_coalesce(struct net_device *netdev,
327 struct ethtool_coalesce *ec,
328 struct kernel_ethtool_coalesce *kernel_coal,
329 struct netlink_ext_ack *extack)
330 {
331 struct xgbe_prv_data *pdata = netdev_priv(netdev);
332
333 memset(ec, 0, sizeof(struct ethtool_coalesce));
334
335 ec->rx_coalesce_usecs = pdata->rx_usecs;
336 ec->rx_max_coalesced_frames = pdata->rx_frames;
337
338 ec->tx_coalesce_usecs = pdata->tx_usecs;
339 ec->tx_max_coalesced_frames = pdata->tx_frames;
340
341 return 0;
342 }
343
xgbe_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)344 static int xgbe_set_coalesce(struct net_device *netdev,
345 struct ethtool_coalesce *ec,
346 struct kernel_ethtool_coalesce *kernel_coal,
347 struct netlink_ext_ack *extack)
348 {
349 struct xgbe_prv_data *pdata = netdev_priv(netdev);
350 struct xgbe_hw_if *hw_if = &pdata->hw_if;
351 unsigned int rx_frames, rx_riwt, rx_usecs;
352 unsigned int tx_frames, tx_usecs;
353 unsigned int jiffy_us = jiffies_to_usecs(1);
354
355 rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
356 rx_usecs = ec->rx_coalesce_usecs;
357 rx_frames = ec->rx_max_coalesced_frames;
358
359 /* Use smallest possible value if conversion resulted in zero */
360 if (rx_usecs && !rx_riwt)
361 rx_riwt = 1;
362
363 /* Check the bounds of values for Rx */
364 if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
365 NL_SET_ERR_MSG_FMT_MOD(extack,
366 "rx-usec is limited to %d usecs",
367 hw_if->riwt_to_usec(pdata,
368 XGMAC_MAX_DMA_RIWT));
369 return -EINVAL;
370 }
371 if (rx_frames > pdata->rx_desc_count) {
372 NL_SET_ERR_MSG_FMT_MOD(extack,
373 "rx-frames is limited to %d frames",
374 pdata->rx_desc_count);
375 return -EINVAL;
376 }
377
378 tx_usecs = ec->tx_coalesce_usecs;
379 tx_frames = ec->tx_max_coalesced_frames;
380
381 /* Check the bounds of values for Tx */
382 if (!tx_usecs) {
383 NL_SET_ERR_MSG_MOD(extack, "tx-usecs must not be 0");
384 return -EINVAL;
385 }
386 if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
387 NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
388 XGMAC_MAX_COAL_TX_TICK);
389 return -EINVAL;
390 }
391 if (tx_frames > pdata->tx_desc_count) {
392 NL_SET_ERR_MSG_FMT_MOD(extack,
393 "tx-frames is limited to %d frames",
394 pdata->tx_desc_count);
395 return -EINVAL;
396 }
397
398 /* Round tx-usecs to nearest multiple of jiffy granularity */
399 if (tx_usecs % jiffy_us) {
400 tx_usecs = rounddown(tx_usecs, jiffy_us);
401 if (!tx_usecs)
402 tx_usecs = jiffy_us;
403 NL_SET_ERR_MSG_FMT_MOD(extack,
404 "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
405 tx_usecs, jiffy_us);
406 }
407
408 pdata->rx_riwt = rx_riwt;
409 pdata->rx_usecs = rx_usecs;
410 pdata->rx_frames = rx_frames;
411 hw_if->config_rx_coalesce(pdata);
412
413 pdata->tx_usecs = tx_usecs;
414 pdata->tx_frames = tx_frames;
415 hw_if->config_tx_coalesce(pdata);
416
417 return 0;
418 }
419
xgbe_get_rx_ring_count(struct net_device * netdev)420 static u32 xgbe_get_rx_ring_count(struct net_device *netdev)
421 {
422 struct xgbe_prv_data *pdata = netdev_priv(netdev);
423
424 return pdata->rx_ring_count;
425 }
426
xgbe_get_rxfh_key_size(struct net_device * netdev)427 static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
428 {
429 struct xgbe_prv_data *pdata = netdev_priv(netdev);
430
431 return sizeof(pdata->rss_key);
432 }
433
xgbe_get_rxfh_indir_size(struct net_device * netdev)434 static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
435 {
436 struct xgbe_prv_data *pdata = netdev_priv(netdev);
437
438 return ARRAY_SIZE(pdata->rss_table);
439 }
440
xgbe_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)441 static int xgbe_get_rxfh(struct net_device *netdev,
442 struct ethtool_rxfh_param *rxfh)
443 {
444 struct xgbe_prv_data *pdata = netdev_priv(netdev);
445 unsigned int i;
446
447 if (rxfh->indir) {
448 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
449 rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
450 MAC_RSSDR, DMCH);
451 }
452
453 if (rxfh->key)
454 memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
455
456 rxfh->hfunc = ETH_RSS_HASH_TOP;
457
458 return 0;
459 }
460
xgbe_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)461 static int xgbe_set_rxfh(struct net_device *netdev,
462 struct ethtool_rxfh_param *rxfh,
463 struct netlink_ext_ack *extack)
464 {
465 struct xgbe_prv_data *pdata = netdev_priv(netdev);
466 struct xgbe_hw_if *hw_if = &pdata->hw_if;
467 int ret;
468
469 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
470 rxfh->hfunc != ETH_RSS_HASH_TOP) {
471 NL_SET_ERR_MSG_MOD(extack, "unsupported hash function");
472 return -EOPNOTSUPP;
473 }
474
475 if (rxfh->indir) {
476 ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
477 if (ret)
478 return ret;
479 }
480
481 if (rxfh->key) {
482 ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
483 if (ret)
484 return ret;
485 }
486
487 return 0;
488 }
489
xgbe_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * ts_info)490 static int xgbe_get_ts_info(struct net_device *netdev,
491 struct kernel_ethtool_ts_info *ts_info)
492 {
493 struct xgbe_prv_data *pdata = netdev_priv(netdev);
494
495 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
496 SOF_TIMESTAMPING_TX_HARDWARE |
497 SOF_TIMESTAMPING_RX_HARDWARE |
498 SOF_TIMESTAMPING_RAW_HARDWARE;
499
500 if (pdata->ptp_clock)
501 ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
502
503 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
504 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
505 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
506 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
507 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
508 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
509 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
510 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
511 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
512 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
513 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
514 (1 << HWTSTAMP_FILTER_ALL);
515
516 return 0;
517 }
518
xgbe_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)519 static int xgbe_get_module_info(struct net_device *netdev,
520 struct ethtool_modinfo *modinfo)
521 {
522 struct xgbe_prv_data *pdata = netdev_priv(netdev);
523
524 return pdata->phy_if.module_info(pdata, modinfo);
525 }
526
xgbe_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * data)527 static int xgbe_get_module_eeprom(struct net_device *netdev,
528 struct ethtool_eeprom *eeprom, u8 *data)
529 {
530 struct xgbe_prv_data *pdata = netdev_priv(netdev);
531
532 return pdata->phy_if.module_eeprom(pdata, eeprom, data);
533 }
534
535 static void
xgbe_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ringparam,struct kernel_ethtool_ringparam * kernel_ringparam,struct netlink_ext_ack * extack)536 xgbe_get_ringparam(struct net_device *netdev,
537 struct ethtool_ringparam *ringparam,
538 struct kernel_ethtool_ringparam *kernel_ringparam,
539 struct netlink_ext_ack *extack)
540 {
541 struct xgbe_prv_data *pdata = netdev_priv(netdev);
542
543 ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
544 ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
545 ringparam->rx_pending = pdata->rx_desc_count;
546 ringparam->tx_pending = pdata->tx_desc_count;
547 }
548
xgbe_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ringparam,struct kernel_ethtool_ringparam * kernel_ringparam,struct netlink_ext_ack * extack)549 static int xgbe_set_ringparam(struct net_device *netdev,
550 struct ethtool_ringparam *ringparam,
551 struct kernel_ethtool_ringparam *kernel_ringparam,
552 struct netlink_ext_ack *extack)
553 {
554 struct xgbe_prv_data *pdata = netdev_priv(netdev);
555 unsigned int rx, tx;
556
557 if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
558 NL_SET_ERR_MSG_MOD(extack, "unsupported ring parameter");
559 return -EINVAL;
560 }
561
562 if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
563 (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
564 NL_SET_ERR_MSG_FMT_MOD(extack,
565 "rx ring parameter must be between %u and %u",
566 XGBE_RX_DESC_CNT_MIN,
567 XGBE_RX_DESC_CNT_MAX);
568 return -EINVAL;
569 }
570
571 if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
572 (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
573 NL_SET_ERR_MSG_FMT_MOD(extack,
574 "tx ring parameter must be between %u and %u",
575 XGBE_TX_DESC_CNT_MIN,
576 XGBE_TX_DESC_CNT_MAX);
577 return -EINVAL;
578 }
579
580 rx = __rounddown_pow_of_two(ringparam->rx_pending);
581 if (rx != ringparam->rx_pending)
582 NL_SET_ERR_MSG_FMT_MOD(extack,
583 "rx ring parameter rounded to power of two: %u",
584 rx);
585
586 tx = __rounddown_pow_of_two(ringparam->tx_pending);
587 if (tx != ringparam->tx_pending)
588 NL_SET_ERR_MSG_FMT_MOD(extack,
589 "tx ring parameter rounded to power of two: %u",
590 tx);
591
592 if ((rx == pdata->rx_desc_count) &&
593 (tx == pdata->tx_desc_count))
594 goto out;
595
596 pdata->rx_desc_count = rx;
597 pdata->tx_desc_count = tx;
598
599 xgbe_restart_dev(pdata);
600
601 out:
602 return 0;
603 }
604
xgbe_get_channels(struct net_device * netdev,struct ethtool_channels * channels)605 static void xgbe_get_channels(struct net_device *netdev,
606 struct ethtool_channels *channels)
607 {
608 struct xgbe_prv_data *pdata = netdev_priv(netdev);
609 unsigned int rx, tx, combined;
610
611 /* Calculate maximums allowed:
612 * - Take into account the number of available IRQs
613 * - Do not take into account the number of online CPUs so that
614 * the user can over-subscribe if desired
615 * - Tx is additionally limited by the number of hardware queues
616 */
617 rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
618 rx = min(rx, pdata->channel_irq_count);
619 tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
620 tx = min(tx, pdata->channel_irq_count);
621 tx = min(tx, pdata->tx_max_q_count);
622
623 combined = min(rx, tx);
624
625 channels->max_combined = combined;
626 channels->max_rx = rx ? rx - 1 : 0;
627 channels->max_tx = tx ? tx - 1 : 0;
628
629 /* Get current settings based on device state */
630 rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
631 tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
632
633 combined = min(rx, tx);
634 rx -= combined;
635 tx -= combined;
636
637 channels->combined_count = combined;
638 channels->rx_count = rx;
639 channels->tx_count = tx;
640 }
641
xgbe_print_set_channels_input(struct net_device * netdev,struct ethtool_channels * channels)642 static void xgbe_print_set_channels_input(struct net_device *netdev,
643 struct ethtool_channels *channels)
644 {
645 netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
646 channels->combined_count, channels->rx_count,
647 channels->tx_count);
648 }
649
xgbe_set_channels(struct net_device * netdev,struct ethtool_channels * channels)650 static int xgbe_set_channels(struct net_device *netdev,
651 struct ethtool_channels *channels)
652 {
653 struct xgbe_prv_data *pdata = netdev_priv(netdev);
654 unsigned int rx, rx_curr, tx, tx_curr, combined;
655
656 /* Calculate maximums allowed:
657 * - Take into account the number of available IRQs
658 * - Do not take into account the number of online CPUs so that
659 * the user can over-subscribe if desired
660 * - Tx is additionally limited by the number of hardware queues
661 */
662 rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
663 rx = min(rx, pdata->channel_irq_count);
664 tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
665 tx = min(tx, pdata->tx_max_q_count);
666 tx = min(tx, pdata->channel_irq_count);
667
668 combined = min(rx, tx);
669
670 /* Should not be setting other count */
671 if (channels->other_count) {
672 netdev_err(netdev,
673 "other channel count must be zero\n");
674 return -EINVAL;
675 }
676
677 /* Require at least one Combined (Rx and Tx) channel */
678 if (!channels->combined_count) {
679 netdev_err(netdev,
680 "at least one combined Rx/Tx channel is required\n");
681 xgbe_print_set_channels_input(netdev, channels);
682 return -EINVAL;
683 }
684
685 /* Check combined channels */
686 if (channels->combined_count > combined) {
687 netdev_err(netdev,
688 "combined channel count cannot exceed %u\n",
689 combined);
690 xgbe_print_set_channels_input(netdev, channels);
691 return -EINVAL;
692 }
693
694 /* Can have some Rx-only or Tx-only channels, but not both */
695 if (channels->rx_count && channels->tx_count) {
696 netdev_err(netdev,
697 "cannot specify both Rx-only and Tx-only channels\n");
698 xgbe_print_set_channels_input(netdev, channels);
699 return -EINVAL;
700 }
701
702 /* Check that we don't exceed the maximum number of channels */
703 if ((channels->combined_count + channels->rx_count) > rx) {
704 netdev_err(netdev,
705 "total Rx channels (%u) requested exceeds maximum available (%u)\n",
706 channels->combined_count + channels->rx_count, rx);
707 xgbe_print_set_channels_input(netdev, channels);
708 return -EINVAL;
709 }
710
711 if ((channels->combined_count + channels->tx_count) > tx) {
712 netdev_err(netdev,
713 "total Tx channels (%u) requested exceeds maximum available (%u)\n",
714 channels->combined_count + channels->tx_count, tx);
715 xgbe_print_set_channels_input(netdev, channels);
716 return -EINVAL;
717 }
718
719 rx = channels->combined_count + channels->rx_count;
720 tx = channels->combined_count + channels->tx_count;
721
722 rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
723 tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
724
725 if ((rx == rx_curr) && (tx == tx_curr))
726 goto out;
727
728 pdata->new_rx_ring_count = rx;
729 pdata->new_tx_ring_count = tx;
730
731 xgbe_full_restart_dev(pdata);
732
733 out:
734 return 0;
735 }
736
737 static const struct ethtool_ops xgbe_ethtool_ops = {
738 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
739 ETHTOOL_COALESCE_MAX_FRAMES,
740 .get_drvinfo = xgbe_get_drvinfo,
741 .get_msglevel = xgbe_get_msglevel,
742 .set_msglevel = xgbe_set_msglevel,
743 .get_link = ethtool_op_get_link,
744 .get_coalesce = xgbe_get_coalesce,
745 .set_coalesce = xgbe_set_coalesce,
746 .get_pauseparam = xgbe_get_pauseparam,
747 .set_pauseparam = xgbe_set_pauseparam,
748 .get_strings = xgbe_get_strings,
749 .get_ethtool_stats = xgbe_get_ethtool_stats,
750 .get_sset_count = xgbe_get_sset_count,
751 .get_rx_ring_count = xgbe_get_rx_ring_count,
752 .get_rxfh_key_size = xgbe_get_rxfh_key_size,
753 .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
754 .get_rxfh = xgbe_get_rxfh,
755 .set_rxfh = xgbe_set_rxfh,
756 .get_ts_info = xgbe_get_ts_info,
757 .get_link_ksettings = xgbe_get_link_ksettings,
758 .set_link_ksettings = xgbe_set_link_ksettings,
759 .get_module_info = xgbe_get_module_info,
760 .get_module_eeprom = xgbe_get_module_eeprom,
761 .get_ringparam = xgbe_get_ringparam,
762 .set_ringparam = xgbe_set_ringparam,
763 .get_channels = xgbe_get_channels,
764 .set_channels = xgbe_set_channels,
765 .self_test = xgbe_selftest_run,
766 };
767
xgbe_get_ethtool_ops(void)768 const struct ethtool_ops *xgbe_get_ethtool_ops(void)
769 {
770 return &xgbe_ethtool_ops;
771 }
772