1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/module.h>
5 #include <linux/netdevice.h>
6 #include <linux/sfp.h>
7
8 #include "ionic.h"
9 #include "ionic_bus.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethtool.h"
12 #include "ionic_stats.h"
13
14 #define IONIC_MAX_RX_COPYBREAK min(U16_MAX, IONIC_MAX_BUF_LEN)
15
ionic_get_stats_strings(struct ionic_lif * lif,u8 * buf)16 static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
17 {
18 u32 i;
19
20 for (i = 0; i < ionic_num_stats_grps; i++)
21 ionic_stats_groups[i].get_strings(lif, &buf);
22 }
23
ionic_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * buf)24 static void ionic_get_stats(struct net_device *netdev,
25 struct ethtool_stats *stats, u64 *buf)
26 {
27 struct ionic_lif *lif = netdev_priv(netdev);
28 u32 i;
29
30 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
31 return;
32
33 memset(buf, 0, stats->n_stats * sizeof(*buf));
34 for (i = 0; i < ionic_num_stats_grps; i++)
35 ionic_stats_groups[i].get_values(lif, &buf);
36 }
37
ionic_get_stats_count(struct ionic_lif * lif)38 static int ionic_get_stats_count(struct ionic_lif *lif)
39 {
40 int i, num_stats = 0;
41
42 for (i = 0; i < ionic_num_stats_grps; i++)
43 num_stats += ionic_stats_groups[i].get_count(lif);
44
45 return num_stats;
46 }
47
ionic_get_sset_count(struct net_device * netdev,int sset)48 static int ionic_get_sset_count(struct net_device *netdev, int sset)
49 {
50 struct ionic_lif *lif = netdev_priv(netdev);
51 int count = 0;
52
53 switch (sset) {
54 case ETH_SS_STATS:
55 count = ionic_get_stats_count(lif);
56 break;
57 }
58 return count;
59 }
60
ionic_get_strings(struct net_device * netdev,u32 sset,u8 * buf)61 static void ionic_get_strings(struct net_device *netdev,
62 u32 sset, u8 *buf)
63 {
64 struct ionic_lif *lif = netdev_priv(netdev);
65
66 switch (sset) {
67 case ETH_SS_STATS:
68 ionic_get_stats_strings(lif, buf);
69 break;
70 }
71 }
72
ionic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)73 static void ionic_get_drvinfo(struct net_device *netdev,
74 struct ethtool_drvinfo *drvinfo)
75 {
76 struct ionic_lif *lif = netdev_priv(netdev);
77 struct ionic *ionic = lif->ionic;
78
79 strscpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
80 strscpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
81 sizeof(drvinfo->fw_version));
82 strscpy(drvinfo->bus_info, ionic_bus_info(ionic),
83 sizeof(drvinfo->bus_info));
84 }
85
ionic_get_regs_len(struct net_device * netdev)86 static int ionic_get_regs_len(struct net_device *netdev)
87 {
88 return (IONIC_DEV_INFO_REG_COUNT + IONIC_DEV_CMD_REG_COUNT) * sizeof(u32);
89 }
90
ionic_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)91 static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
92 void *p)
93 {
94 struct ionic_lif *lif = netdev_priv(netdev);
95 struct ionic_dev *idev;
96 unsigned int offset;
97 unsigned int size;
98
99 regs->version = IONIC_DEV_CMD_REG_VERSION;
100
101 idev = &lif->ionic->idev;
102 if (!idev->dev_info_regs)
103 return;
104
105 offset = 0;
106 size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
107 memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
108
109 offset += size;
110 size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
111 memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
112 }
113
ionic_get_link_ext_stats(struct net_device * netdev,struct ethtool_link_ext_stats * stats)114 static void ionic_get_link_ext_stats(struct net_device *netdev,
115 struct ethtool_link_ext_stats *stats)
116 {
117 struct ionic_lif *lif = netdev_priv(netdev);
118
119 if (lif->ionic->pdev->is_physfn)
120 stats->link_down_events = lif->link_down_count;
121 }
122
ionic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)123 static int ionic_get_link_ksettings(struct net_device *netdev,
124 struct ethtool_link_ksettings *ks)
125 {
126 struct ionic_lif *lif = netdev_priv(netdev);
127 struct ionic_dev *idev = &lif->ionic->idev;
128 int copper_seen = 0;
129
130 ethtool_link_ksettings_zero_link_mode(ks, supported);
131
132 if (!idev->port_info) {
133 netdev_err(netdev, "port_info not initialized\n");
134 return -EOPNOTSUPP;
135 }
136
137 /* The port_info data is found in a DMA space that the NIC keeps
138 * up-to-date, so there's no need to request the data from the
139 * NIC, we already have it in our memory space.
140 */
141
142 switch (le16_to_cpu(idev->port_info->status.xcvr.pid)) {
143 /* Copper */
144 case IONIC_XCVR_PID_QSFP_100G_CR4:
145 ethtool_link_ksettings_add_link_mode(ks, supported,
146 100000baseCR4_Full);
147 copper_seen++;
148 break;
149 case IONIC_XCVR_PID_QSFP_40GBASE_CR4:
150 ethtool_link_ksettings_add_link_mode(ks, supported,
151 40000baseCR4_Full);
152 copper_seen++;
153 break;
154 case IONIC_XCVR_PID_SFP_25GBASE_CR_S:
155 case IONIC_XCVR_PID_SFP_25GBASE_CR_L:
156 case IONIC_XCVR_PID_SFP_25GBASE_CR_N:
157 ethtool_link_ksettings_add_link_mode(ks, supported,
158 25000baseCR_Full);
159 copper_seen++;
160 break;
161 case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
162 case IONIC_XCVR_PID_QSFP_50G_CR2:
163 ethtool_link_ksettings_add_link_mode(ks, supported,
164 50000baseCR2_Full);
165 copper_seen++;
166 break;
167 case IONIC_XCVR_PID_QSFP_200G_CR4:
168 ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
169 copper_seen++;
170 break;
171 case IONIC_XCVR_PID_QSFP_400G_CR4:
172 ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
173 copper_seen++;
174 break;
175 case IONIC_XCVR_PID_SFP_10GBASE_AOC:
176 case IONIC_XCVR_PID_SFP_10GBASE_CU:
177 ethtool_link_ksettings_add_link_mode(ks, supported,
178 10000baseCR_Full);
179 copper_seen++;
180 break;
181
182 /* Fibre */
183 case IONIC_XCVR_PID_QSFP_100G_SR4:
184 case IONIC_XCVR_PID_QSFP_100G_AOC:
185 ethtool_link_ksettings_add_link_mode(ks, supported,
186 100000baseSR4_Full);
187 break;
188 case IONIC_XCVR_PID_QSFP_100G_CWDM4:
189 case IONIC_XCVR_PID_QSFP_100G_PSM4:
190 case IONIC_XCVR_PID_QSFP_100G_LR4:
191 ethtool_link_ksettings_add_link_mode(ks, supported,
192 100000baseLR4_ER4_Full);
193 break;
194 case IONIC_XCVR_PID_QSFP_100G_ER4:
195 ethtool_link_ksettings_add_link_mode(ks, supported,
196 100000baseLR4_ER4_Full);
197 break;
198 case IONIC_XCVR_PID_QSFP_40GBASE_SR4:
199 case IONIC_XCVR_PID_QSFP_40GBASE_AOC:
200 ethtool_link_ksettings_add_link_mode(ks, supported,
201 40000baseSR4_Full);
202 break;
203 case IONIC_XCVR_PID_QSFP_40GBASE_LR4:
204 ethtool_link_ksettings_add_link_mode(ks, supported,
205 40000baseLR4_Full);
206 break;
207 case IONIC_XCVR_PID_SFP_25GBASE_SR:
208 case IONIC_XCVR_PID_SFP_25GBASE_AOC:
209 case IONIC_XCVR_PID_SFP_25GBASE_ACC:
210 ethtool_link_ksettings_add_link_mode(ks, supported,
211 25000baseSR_Full);
212 break;
213 case IONIC_XCVR_PID_QSFP_200G_AOC:
214 case IONIC_XCVR_PID_QSFP_200G_SR4:
215 ethtool_link_ksettings_add_link_mode(ks, supported,
216 200000baseSR4_Full);
217 break;
218 case IONIC_XCVR_PID_QSFP_200G_FR4:
219 ethtool_link_ksettings_add_link_mode(ks, supported,
220 200000baseLR4_ER4_FR4_Full);
221 break;
222 case IONIC_XCVR_PID_QSFP_200G_DR4:
223 ethtool_link_ksettings_add_link_mode(ks, supported,
224 200000baseDR4_Full);
225 break;
226 case IONIC_XCVR_PID_QSFP_400G_FR4:
227 ethtool_link_ksettings_add_link_mode(ks, supported,
228 400000baseLR4_ER4_FR4_Full);
229 break;
230 case IONIC_XCVR_PID_QSFP_400G_DR4:
231 ethtool_link_ksettings_add_link_mode(ks, supported,
232 400000baseDR4_Full);
233 break;
234 case IONIC_XCVR_PID_QSFP_400G_SR4:
235 ethtool_link_ksettings_add_link_mode(ks, supported,
236 400000baseSR4_Full);
237 break;
238 case IONIC_XCVR_PID_SFP_10GBASE_SR:
239 ethtool_link_ksettings_add_link_mode(ks, supported,
240 10000baseSR_Full);
241 break;
242 case IONIC_XCVR_PID_SFP_10GBASE_LR:
243 ethtool_link_ksettings_add_link_mode(ks, supported,
244 10000baseLR_Full);
245 break;
246 case IONIC_XCVR_PID_SFP_10GBASE_LRM:
247 ethtool_link_ksettings_add_link_mode(ks, supported,
248 10000baseLRM_Full);
249 break;
250 case IONIC_XCVR_PID_SFP_10GBASE_ER:
251 ethtool_link_ksettings_add_link_mode(ks, supported,
252 10000baseER_Full);
253 break;
254 case IONIC_XCVR_PID_SFP_10GBASE_T:
255 ethtool_link_ksettings_add_link_mode(ks, supported,
256 10000baseT_Full);
257 break;
258 case IONIC_XCVR_PID_SFP_1000BASE_T:
259 ethtool_link_ksettings_add_link_mode(ks, supported,
260 1000baseT_Full);
261 break;
262 case IONIC_XCVR_PID_UNKNOWN:
263 /* This means there's no module plugged in */
264 break;
265 default:
266 dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n",
267 idev->port_info->status.xcvr.pid,
268 idev->port_info->status.xcvr.pid);
269 break;
270 }
271
272 linkmode_copy(ks->link_modes.advertising, ks->link_modes.supported);
273
274 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
275 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
276 if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_FC)
277 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER);
278 else if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_RS)
279 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
280
281 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
282 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
283
284 if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_COPPER ||
285 copper_seen)
286 ks->base.port = PORT_DA;
287 else if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_FIBER)
288 ks->base.port = PORT_FIBRE;
289 else
290 ks->base.port = PORT_NONE;
291
292 if (ks->base.port != PORT_NONE) {
293 ks->base.speed = le32_to_cpu(lif->info->status.link_speed);
294
295 if (le16_to_cpu(lif->info->status.link_status))
296 ks->base.duplex = DUPLEX_FULL;
297 else
298 ks->base.duplex = DUPLEX_UNKNOWN;
299
300 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
301
302 if (idev->port_info->config.an_enable) {
303 ethtool_link_ksettings_add_link_mode(ks, advertising,
304 Autoneg);
305 ks->base.autoneg = AUTONEG_ENABLE;
306 }
307 }
308
309 return 0;
310 }
311
ionic_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * ks)312 static int ionic_set_link_ksettings(struct net_device *netdev,
313 const struct ethtool_link_ksettings *ks)
314 {
315 struct ionic_lif *lif = netdev_priv(netdev);
316 struct ionic_dev *idev = &lif->ionic->idev;
317 struct ionic *ionic = lif->ionic;
318 int err = 0;
319
320 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
321 return -EBUSY;
322
323 /* set autoneg */
324 if (ks->base.autoneg != idev->port_info->config.an_enable) {
325 mutex_lock(&ionic->dev_cmd_lock);
326 ionic_dev_cmd_port_autoneg(idev, ks->base.autoneg);
327 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
328 mutex_unlock(&ionic->dev_cmd_lock);
329 if (err)
330 return err;
331 }
332
333 /* set speed */
334 if (ks->base.speed != le32_to_cpu(idev->port_info->config.speed)) {
335 mutex_lock(&ionic->dev_cmd_lock);
336 ionic_dev_cmd_port_speed(idev, ks->base.speed);
337 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
338 mutex_unlock(&ionic->dev_cmd_lock);
339 if (err)
340 return err;
341 }
342
343 return 0;
344 }
345
ionic_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)346 static void ionic_get_pauseparam(struct net_device *netdev,
347 struct ethtool_pauseparam *pause)
348 {
349 struct ionic_lif *lif = netdev_priv(netdev);
350 u8 pause_type;
351
352 pause->autoneg = 0;
353
354 pause_type = lif->ionic->idev.port_info->config.pause_type;
355 if (pause_type) {
356 pause->rx_pause = (pause_type & IONIC_PAUSE_F_RX) ? 1 : 0;
357 pause->tx_pause = (pause_type & IONIC_PAUSE_F_TX) ? 1 : 0;
358 }
359 }
360
ionic_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)361 static int ionic_set_pauseparam(struct net_device *netdev,
362 struct ethtool_pauseparam *pause)
363 {
364 struct ionic_lif *lif = netdev_priv(netdev);
365 struct ionic *ionic = lif->ionic;
366 u32 requested_pause;
367 int err;
368
369 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
370 return -EBUSY;
371
372 if (pause->autoneg)
373 return -EOPNOTSUPP;
374
375 /* change both at the same time */
376 requested_pause = IONIC_PORT_PAUSE_TYPE_LINK;
377 if (pause->rx_pause)
378 requested_pause |= IONIC_PAUSE_F_RX;
379 if (pause->tx_pause)
380 requested_pause |= IONIC_PAUSE_F_TX;
381
382 if (requested_pause == lif->ionic->idev.port_info->config.pause_type)
383 return 0;
384
385 mutex_lock(&ionic->dev_cmd_lock);
386 ionic_dev_cmd_port_pause(&lif->ionic->idev, requested_pause);
387 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
388 mutex_unlock(&ionic->dev_cmd_lock);
389 if (err)
390 return err;
391
392 return 0;
393 }
394
ionic_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * fec)395 static int ionic_get_fecparam(struct net_device *netdev,
396 struct ethtool_fecparam *fec)
397 {
398 struct ionic_lif *lif = netdev_priv(netdev);
399
400 switch (lif->ionic->idev.port_info->config.fec_type) {
401 case IONIC_PORT_FEC_TYPE_NONE:
402 fec->active_fec = ETHTOOL_FEC_OFF;
403 break;
404 case IONIC_PORT_FEC_TYPE_RS:
405 fec->active_fec = ETHTOOL_FEC_RS;
406 break;
407 case IONIC_PORT_FEC_TYPE_FC:
408 fec->active_fec = ETHTOOL_FEC_BASER;
409 break;
410 }
411
412 fec->fec = ETHTOOL_FEC_OFF | ETHTOOL_FEC_RS | ETHTOOL_FEC_BASER;
413
414 return 0;
415 }
416
ionic_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * fec)417 static int ionic_set_fecparam(struct net_device *netdev,
418 struct ethtool_fecparam *fec)
419 {
420 struct ionic_lif *lif = netdev_priv(netdev);
421 u8 fec_type;
422 int ret = 0;
423
424 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
425 return -EBUSY;
426
427 if (lif->ionic->idev.port_info->config.an_enable) {
428 netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n");
429 return -EINVAL;
430 }
431
432 switch (fec->fec) {
433 case ETHTOOL_FEC_NONE:
434 fec_type = IONIC_PORT_FEC_TYPE_NONE;
435 break;
436 case ETHTOOL_FEC_OFF:
437 fec_type = IONIC_PORT_FEC_TYPE_NONE;
438 break;
439 case ETHTOOL_FEC_RS:
440 fec_type = IONIC_PORT_FEC_TYPE_RS;
441 break;
442 case ETHTOOL_FEC_BASER:
443 fec_type = IONIC_PORT_FEC_TYPE_FC;
444 break;
445 case ETHTOOL_FEC_AUTO:
446 default:
447 netdev_err(netdev, "FEC request 0x%04x not supported\n",
448 fec->fec);
449 return -EINVAL;
450 }
451
452 if (fec_type != lif->ionic->idev.port_info->config.fec_type) {
453 mutex_lock(&lif->ionic->dev_cmd_lock);
454 ionic_dev_cmd_port_fec(&lif->ionic->idev, fec_type);
455 ret = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
456 mutex_unlock(&lif->ionic->dev_cmd_lock);
457 }
458
459 return ret;
460 }
461
ionic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)462 static int ionic_get_coalesce(struct net_device *netdev,
463 struct ethtool_coalesce *coalesce,
464 struct kernel_ethtool_coalesce *kernel_coal,
465 struct netlink_ext_ack *extack)
466 {
467 struct ionic_lif *lif = netdev_priv(netdev);
468
469 coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs;
470 coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
471
472 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
473 coalesce->use_adaptive_tx_coalesce = test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
474 else
475 coalesce->use_adaptive_tx_coalesce = 0;
476
477 coalesce->use_adaptive_rx_coalesce = test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
478
479 return 0;
480 }
481
ionic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)482 static int ionic_set_coalesce(struct net_device *netdev,
483 struct ethtool_coalesce *coalesce,
484 struct kernel_ethtool_coalesce *kernel_coal,
485 struct netlink_ext_ack *extack)
486 {
487 struct ionic_lif *lif = netdev_priv(netdev);
488 struct ionic_identity *ident;
489 u32 rx_coal, rx_dim;
490 u32 tx_coal, tx_dim;
491 unsigned int i;
492
493 ident = &lif->ionic->ident;
494 if (ident->dev.intr_coal_div == 0) {
495 netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n",
496 ident->dev.intr_coal_div);
497 return -EIO;
498 }
499
500 /* Tx normally shares Rx interrupt, so only change Rx if not split */
501 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) &&
502 (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs ||
503 coalesce->use_adaptive_tx_coalesce)) {
504 netdev_warn(netdev, "only rx parameters can be changed\n");
505 return -EINVAL;
506 }
507
508 /* Convert the usec request to a HW usable value. If they asked
509 * for non-zero and it resolved to zero, bump it up
510 */
511 rx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
512 if (!rx_coal && coalesce->rx_coalesce_usecs)
513 rx_coal = 1;
514 tx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->tx_coalesce_usecs);
515 if (!tx_coal && coalesce->tx_coalesce_usecs)
516 tx_coal = 1;
517
518 if (rx_coal > IONIC_INTR_CTRL_COAL_MAX ||
519 tx_coal > IONIC_INTR_CTRL_COAL_MAX)
520 return -ERANGE;
521
522 /* Save the new values */
523 lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
524 lif->rx_coalesce_hw = rx_coal;
525
526 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
527 lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs;
528 else
529 lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs;
530 lif->tx_coalesce_hw = tx_coal;
531
532 if (coalesce->use_adaptive_rx_coalesce) {
533 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
534 rx_dim = rx_coal;
535 } else {
536 clear_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
537 rx_dim = 0;
538 }
539
540 if (coalesce->use_adaptive_tx_coalesce) {
541 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
542 tx_dim = tx_coal;
543 } else {
544 clear_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
545 tx_dim = 0;
546 }
547
548 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
549 for (i = 0; i < lif->nxqs; i++) {
550 if (lif->rxqcqs[i]->flags & IONIC_QCQ_F_INTR) {
551 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
552 lif->rxqcqs[i]->intr.index,
553 lif->rx_coalesce_hw);
554 lif->rxqcqs[i]->intr.dim_coal_hw = rx_dim;
555 }
556
557 if (lif->txqcqs[i]->flags & IONIC_QCQ_F_INTR) {
558 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
559 lif->txqcqs[i]->intr.index,
560 lif->tx_coalesce_hw);
561 lif->txqcqs[i]->intr.dim_coal_hw = tx_dim;
562 }
563 }
564 }
565
566 return 0;
567 }
568
ionic_validate_cmb_config(struct ionic_lif * lif,struct ionic_queue_params * qparam)569 static int ionic_validate_cmb_config(struct ionic_lif *lif,
570 struct ionic_queue_params *qparam)
571 {
572 int pages_have, pages_required = 0;
573 unsigned long sz;
574
575 if (!lif->ionic->idev.cmb_inuse &&
576 (qparam->cmb_tx || qparam->cmb_rx)) {
577 netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
578 return -EOPNOTSUPP;
579 }
580
581 if (qparam->cmb_tx) {
582 if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
583 netdev_info(lif->netdev,
584 "CMB rings for tx-push are not supported on this device\n");
585 return -EOPNOTSUPP;
586 }
587
588 sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
589 pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
590 }
591
592 if (qparam->cmb_rx) {
593 if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
594 netdev_info(lif->netdev,
595 "CMB rings for rx-push are not supported on this device\n");
596 return -EOPNOTSUPP;
597 }
598
599 sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
600 pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
601 }
602
603 pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
604 if (pages_required > pages_have) {
605 netdev_info(lif->netdev,
606 "Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
607 pages_required, pages_have);
608 return -ENOMEM;
609 }
610
611 return pages_required;
612 }
613
ionic_cmb_rings_toggle(struct ionic_lif * lif,bool cmb_tx,bool cmb_rx)614 static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
615 {
616 struct ionic_queue_params qparam;
617 int pages_used;
618
619 if (netif_running(lif->netdev)) {
620 netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
621 return -EBUSY;
622 }
623
624 ionic_init_queue_params(lif, &qparam);
625 qparam.cmb_tx = cmb_tx;
626 qparam.cmb_rx = cmb_rx;
627 pages_used = ionic_validate_cmb_config(lif, &qparam);
628 if (pages_used < 0)
629 return pages_used;
630
631 if (cmb_tx)
632 set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
633 else
634 clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
635
636 if (cmb_rx)
637 set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
638 else
639 clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
640
641 if (cmb_tx || cmb_rx)
642 netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
643 cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
644 else
645 netdev_info(lif->netdev, "Disabling CMB rings\n");
646
647 return 0;
648 }
649
ionic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)650 static void ionic_get_ringparam(struct net_device *netdev,
651 struct ethtool_ringparam *ring,
652 struct kernel_ethtool_ringparam *kernel_ring,
653 struct netlink_ext_ack *extack)
654 {
655 struct ionic_lif *lif = netdev_priv(netdev);
656
657 ring->tx_max_pending = IONIC_MAX_TX_DESC;
658 ring->tx_pending = lif->ntxq_descs;
659 ring->rx_max_pending = IONIC_MAX_RX_DESC;
660 ring->rx_pending = lif->nrxq_descs;
661 kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
662 kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
663 }
664
ionic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)665 static int ionic_set_ringparam(struct net_device *netdev,
666 struct ethtool_ringparam *ring,
667 struct kernel_ethtool_ringparam *kernel_ring,
668 struct netlink_ext_ack *extack)
669 {
670 struct ionic_lif *lif = netdev_priv(netdev);
671 struct ionic_queue_params qparam;
672 int err;
673
674 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
675 return -EBUSY;
676
677 ionic_init_queue_params(lif, &qparam);
678
679 if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
680 netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
681 return -EINVAL;
682 }
683
684 if (!is_power_of_2(ring->tx_pending) ||
685 !is_power_of_2(ring->rx_pending)) {
686 netdev_info(netdev, "Descriptor count must be a power of 2\n");
687 return -EINVAL;
688 }
689
690 /* if nothing to do return success */
691 if (ring->tx_pending == lif->ntxq_descs &&
692 ring->rx_pending == lif->nrxq_descs &&
693 kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
694 kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
695 return 0;
696
697 qparam.ntxq_descs = ring->tx_pending;
698 qparam.nrxq_descs = ring->rx_pending;
699 qparam.cmb_tx = kernel_ring->tx_push;
700 qparam.cmb_rx = kernel_ring->rx_push;
701
702 err = ionic_validate_cmb_config(lif, &qparam);
703 if (err < 0)
704 return err;
705
706 if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
707 kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
708 err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
709 kernel_ring->rx_push);
710 if (err < 0)
711 return err;
712 }
713
714 if (ring->tx_pending != lif->ntxq_descs)
715 netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
716 lif->ntxq_descs, ring->tx_pending);
717
718 if (ring->rx_pending != lif->nrxq_descs)
719 netdev_info(netdev, "Changing Rx ring size from %d to %d\n",
720 lif->nrxq_descs, ring->rx_pending);
721
722 /* if we're not running, just set the values and return */
723 if (!netif_running(lif->netdev)) {
724 lif->ntxq_descs = ring->tx_pending;
725 lif->nrxq_descs = ring->rx_pending;
726 return 0;
727 }
728
729 mutex_lock(&lif->queue_lock);
730 err = ionic_reconfigure_queues(lif, &qparam);
731 mutex_unlock(&lif->queue_lock);
732 if (err)
733 netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
734
735 return err;
736 }
737
ionic_get_channels(struct net_device * netdev,struct ethtool_channels * ch)738 static void ionic_get_channels(struct net_device *netdev,
739 struct ethtool_channels *ch)
740 {
741 struct ionic_lif *lif = netdev_priv(netdev);
742
743 /* report maximum channels */
744 ch->max_combined = lif->ionic->ntxqs_per_lif;
745 ch->max_rx = lif->ionic->ntxqs_per_lif / 2;
746 ch->max_tx = lif->ionic->ntxqs_per_lif / 2;
747
748 /* report current channels */
749 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
750 ch->rx_count = lif->nxqs;
751 ch->tx_count = lif->nxqs;
752 } else {
753 ch->combined_count = lif->nxqs;
754 }
755 }
756
ionic_set_channels(struct net_device * netdev,struct ethtool_channels * ch)757 static int ionic_set_channels(struct net_device *netdev,
758 struct ethtool_channels *ch)
759 {
760 struct ionic_lif *lif = netdev_priv(netdev);
761 struct ionic_queue_params qparam;
762 int max_cnt;
763 int err;
764
765 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
766 return -EBUSY;
767
768 ionic_init_queue_params(lif, &qparam);
769
770 if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) {
771 netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n");
772 return -EOPNOTSUPP;
773 }
774
775 if (ch->rx_count != ch->tx_count) {
776 netdev_info(netdev, "The rx and tx count must be equal\n");
777 return -EINVAL;
778 }
779
780 if (ch->combined_count && ch->rx_count) {
781 netdev_info(netdev, "Use either combined or rx and tx, not both\n");
782 return -EINVAL;
783 }
784
785 max_cnt = lif->ionic->ntxqs_per_lif;
786 if (ch->combined_count) {
787 if (ch->combined_count > max_cnt)
788 return -EINVAL;
789
790 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
791 netdev_info(lif->netdev, "Sharing queue interrupts\n");
792 else if (ch->combined_count == lif->nxqs)
793 return 0;
794
795 if (lif->nxqs != ch->combined_count)
796 netdev_info(netdev, "Changing queue count from %d to %d\n",
797 lif->nxqs, ch->combined_count);
798
799 qparam.nxqs = ch->combined_count;
800 qparam.intr_split = false;
801 } else {
802 max_cnt /= 2;
803 if (ch->rx_count > max_cnt)
804 return -EINVAL;
805
806 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
807 netdev_info(lif->netdev, "Splitting queue interrupts\n");
808 else if (ch->rx_count == lif->nxqs)
809 return 0;
810
811 if (lif->nxqs != ch->rx_count)
812 netdev_info(netdev, "Changing queue count from %d to %d\n",
813 lif->nxqs, ch->rx_count);
814
815 qparam.nxqs = ch->rx_count;
816 qparam.intr_split = true;
817 }
818
819 err = ionic_validate_cmb_config(lif, &qparam);
820 if (err < 0)
821 return err;
822
823 /* if we're not running, just set the values and return */
824 if (!netif_running(lif->netdev)) {
825 lif->nxqs = qparam.nxqs;
826
827 if (qparam.intr_split) {
828 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
829 } else {
830 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
831 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
832 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
833 }
834 return 0;
835 }
836
837 mutex_lock(&lif->queue_lock);
838 err = ionic_reconfigure_queues(lif, &qparam);
839 mutex_unlock(&lif->queue_lock);
840 if (err)
841 netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
842
843 return err;
844 }
845
ionic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * info,u32 * rules)846 static int ionic_get_rxnfc(struct net_device *netdev,
847 struct ethtool_rxnfc *info, u32 *rules)
848 {
849 struct ionic_lif *lif = netdev_priv(netdev);
850 int err = 0;
851
852 switch (info->cmd) {
853 case ETHTOOL_GRXRINGS:
854 info->data = lif->nxqs;
855 break;
856 default:
857 netdev_dbg(netdev, "Command parameter %d is not supported\n",
858 info->cmd);
859 err = -EOPNOTSUPP;
860 }
861
862 return err;
863 }
864
ionic_get_rxfh_indir_size(struct net_device * netdev)865 static u32 ionic_get_rxfh_indir_size(struct net_device *netdev)
866 {
867 struct ionic_lif *lif = netdev_priv(netdev);
868
869 return le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
870 }
871
ionic_get_rxfh_key_size(struct net_device * netdev)872 static u32 ionic_get_rxfh_key_size(struct net_device *netdev)
873 {
874 return IONIC_RSS_HASH_KEY_SIZE;
875 }
876
ionic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)877 static int ionic_get_rxfh(struct net_device *netdev,
878 struct ethtool_rxfh_param *rxfh)
879 {
880 struct ionic_lif *lif = netdev_priv(netdev);
881 unsigned int i, tbl_sz;
882
883 if (rxfh->indir) {
884 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
885 for (i = 0; i < tbl_sz; i++)
886 rxfh->indir[i] = lif->rss_ind_tbl[i];
887 }
888
889 if (rxfh->key)
890 memcpy(rxfh->key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
891
892 rxfh->hfunc = ETH_RSS_HASH_TOP;
893
894 return 0;
895 }
896
ionic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)897 static int ionic_set_rxfh(struct net_device *netdev,
898 struct ethtool_rxfh_param *rxfh,
899 struct netlink_ext_ack *extack)
900 {
901 struct ionic_lif *lif = netdev_priv(netdev);
902
903 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
904 rxfh->hfunc != ETH_RSS_HASH_TOP)
905 return -EOPNOTSUPP;
906
907 return ionic_lif_rss_config(lif, lif->rss_types,
908 rxfh->key, rxfh->indir);
909 }
910
ionic_set_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,const void * data)911 static int ionic_set_tunable(struct net_device *dev,
912 const struct ethtool_tunable *tuna,
913 const void *data)
914 {
915 struct ionic_lif *lif = netdev_priv(dev);
916 u32 rx_copybreak;
917
918 switch (tuna->id) {
919 case ETHTOOL_RX_COPYBREAK:
920 rx_copybreak = *(u32 *)data;
921 if (rx_copybreak > IONIC_MAX_RX_COPYBREAK) {
922 netdev_err(dev, "Max supported rx_copybreak size: %u\n",
923 IONIC_MAX_RX_COPYBREAK);
924 return -EINVAL;
925 }
926 lif->rx_copybreak = (u16)rx_copybreak;
927 break;
928 default:
929 return -EOPNOTSUPP;
930 }
931
932 return 0;
933 }
934
ionic_get_tunable(struct net_device * netdev,const struct ethtool_tunable * tuna,void * data)935 static int ionic_get_tunable(struct net_device *netdev,
936 const struct ethtool_tunable *tuna, void *data)
937 {
938 struct ionic_lif *lif = netdev_priv(netdev);
939
940 switch (tuna->id) {
941 case ETHTOOL_RX_COPYBREAK:
942 *(u32 *)data = lif->rx_copybreak;
943 break;
944 default:
945 return -EOPNOTSUPP;
946 }
947
948 return 0;
949 }
950
ionic_do_module_copy(u8 * dst,u8 * src,u32 len)951 static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len)
952 {
953 char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)];
954 int count = 10;
955
956 /* The NIC keeps the module prom up-to-date in the DMA space
957 * so we can simply copy the module bytes into the data buffer.
958 */
959 do {
960 memcpy(dst, src, len);
961 memcpy(tbuf, src, len);
962
963 /* Let's make sure we got a consistent copy */
964 if (!memcmp(dst, tbuf, len))
965 break;
966
967 } while (--count);
968
969 if (!count)
970 return -ETIMEDOUT;
971
972 return 0;
973 }
974
ionic_get_module_eeprom_by_page(struct net_device * netdev,const struct ethtool_module_eeprom * page_data,struct netlink_ext_ack * extack)975 static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
976 const struct ethtool_module_eeprom *page_data,
977 struct netlink_ext_ack *extack)
978 {
979 struct ionic_lif *lif = netdev_priv(netdev);
980 struct ionic_dev *idev = &lif->ionic->idev;
981 u32 err = -EINVAL;
982 u8 *src;
983
984 if (!page_data->length)
985 return -EINVAL;
986
987 if (page_data->bank != 0) {
988 NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported");
989 return -EINVAL;
990 }
991
992 switch (page_data->page) {
993 case 0:
994 src = &idev->port_info->status.xcvr.sprom[page_data->offset];
995 break;
996 case 1:
997 src = &idev->port_info->sprom_page1[page_data->offset - 128];
998 break;
999 case 2:
1000 src = &idev->port_info->sprom_page2[page_data->offset - 128];
1001 break;
1002 case 17:
1003 src = &idev->port_info->sprom_page17[page_data->offset - 128];
1004 break;
1005 default:
1006 return -EOPNOTSUPP;
1007 }
1008
1009 memset(page_data->data, 0, page_data->length);
1010 err = ionic_do_module_copy(page_data->data, src, page_data->length);
1011 if (err)
1012 return err;
1013
1014 return page_data->length;
1015 }
1016
ionic_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)1017 static int ionic_get_ts_info(struct net_device *netdev,
1018 struct kernel_ethtool_ts_info *info)
1019 {
1020 struct ionic_lif *lif = netdev_priv(netdev);
1021 struct ionic *ionic = lif->ionic;
1022 __le64 mask;
1023
1024 if (!lif->phc || !lif->phc->ptp)
1025 return ethtool_op_get_ts_info(netdev, info);
1026
1027 info->phc_index = ptp_clock_index(lif->phc->ptp);
1028
1029 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1030 SOF_TIMESTAMPING_TX_HARDWARE |
1031 SOF_TIMESTAMPING_RX_HARDWARE |
1032 SOF_TIMESTAMPING_RAW_HARDWARE;
1033
1034 /* tx modes */
1035
1036 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1037 BIT(HWTSTAMP_TX_ON);
1038
1039 mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_SYNC));
1040 if (ionic->ident.lif.eth.hwstamp_tx_modes & mask)
1041 info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
1042
1043 mask = cpu_to_le64(BIT_ULL(IONIC_TXSTAMP_ONESTEP_P2P));
1044 if (ionic->ident.lif.eth.hwstamp_tx_modes & mask)
1045 info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_P2P);
1046
1047 /* rx filters */
1048
1049 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1050 BIT(HWTSTAMP_FILTER_ALL);
1051
1052 mask = cpu_to_le64(IONIC_PKT_CLS_NTP_ALL);
1053 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1054 info->rx_filters |= BIT(HWTSTAMP_FILTER_NTP_ALL);
1055
1056 mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_SYNC);
1057 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1058 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC);
1059
1060 mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_DREQ);
1061 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1062 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
1063
1064 mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_ALL);
1065 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1066 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
1067
1068 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_SYNC);
1069 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1070 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC);
1071
1072 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_DREQ);
1073 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1074 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1075
1076 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_ALL);
1077 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1078 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1079
1080 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_SYNC);
1081 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1082 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC);
1083
1084 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_DREQ);
1085 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1086 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
1087
1088 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_ALL);
1089 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1090 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1091
1092 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_SYNC);
1093 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1094 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_SYNC);
1095
1096 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_DREQ);
1097 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1098 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
1099
1100 mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_ALL);
1101 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask)
1102 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
1103
1104 return 0;
1105 }
1106
ionic_nway_reset(struct net_device * netdev)1107 static int ionic_nway_reset(struct net_device *netdev)
1108 {
1109 struct ionic_lif *lif = netdev_priv(netdev);
1110 struct ionic *ionic = lif->ionic;
1111 int err = 0;
1112
1113 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1114 return -EBUSY;
1115
1116 /* flap the link to force auto-negotiation */
1117
1118 mutex_lock(&ionic->dev_cmd_lock);
1119
1120 ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_DOWN);
1121 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
1122
1123 if (!err) {
1124 ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
1125 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
1126 }
1127
1128 mutex_unlock(&ionic->dev_cmd_lock);
1129
1130 return err;
1131 }
1132
1133 static const struct ethtool_ops ionic_ethtool_ops = {
1134 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1135 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
1136 ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
1137 .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
1138 ETHTOOL_RING_USE_RX_PUSH,
1139 .get_drvinfo = ionic_get_drvinfo,
1140 .get_regs_len = ionic_get_regs_len,
1141 .get_regs = ionic_get_regs,
1142 .get_link = ethtool_op_get_link,
1143 .get_link_ext_stats = ionic_get_link_ext_stats,
1144 .get_link_ksettings = ionic_get_link_ksettings,
1145 .set_link_ksettings = ionic_set_link_ksettings,
1146 .get_coalesce = ionic_get_coalesce,
1147 .set_coalesce = ionic_set_coalesce,
1148 .get_ringparam = ionic_get_ringparam,
1149 .set_ringparam = ionic_set_ringparam,
1150 .get_channels = ionic_get_channels,
1151 .set_channels = ionic_set_channels,
1152 .get_strings = ionic_get_strings,
1153 .get_ethtool_stats = ionic_get_stats,
1154 .get_sset_count = ionic_get_sset_count,
1155 .get_rxnfc = ionic_get_rxnfc,
1156 .get_rxfh_indir_size = ionic_get_rxfh_indir_size,
1157 .get_rxfh_key_size = ionic_get_rxfh_key_size,
1158 .get_rxfh = ionic_get_rxfh,
1159 .set_rxfh = ionic_set_rxfh,
1160 .get_tunable = ionic_get_tunable,
1161 .set_tunable = ionic_set_tunable,
1162 .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page,
1163 .get_pauseparam = ionic_get_pauseparam,
1164 .set_pauseparam = ionic_set_pauseparam,
1165 .get_fecparam = ionic_get_fecparam,
1166 .set_fecparam = ionic_set_fecparam,
1167 .get_ts_info = ionic_get_ts_info,
1168 .nway_reset = ionic_nway_reset,
1169 };
1170
ionic_ethtool_set_ops(struct net_device * netdev)1171 void ionic_ethtool_set_ops(struct net_device *netdev)
1172 {
1173 netdev->ethtool_ops = &ionic_ethtool_ops;
1174 }
1175