1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/pci.h>
5 #include <linux/phylink.h>
6 #include <linux/netdevice.h>
7
8 #include "../libwx/wx_ethtool.h"
9 #include "../libwx/wx_type.h"
10 #include "../libwx/wx_lib.h"
11 #include "txgbe_type.h"
12 #include "txgbe_fdir.h"
13 #include "txgbe_ethtool.h"
14
txgbe_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)15 int txgbe_get_link_ksettings(struct net_device *netdev,
16 struct ethtool_link_ksettings *cmd)
17 {
18 struct wx *wx = netdev_priv(netdev);
19 struct txgbe *txgbe = wx->priv;
20 int err;
21
22 if (wx->mac.type == wx_mac_aml40)
23 return -EOPNOTSUPP;
24
25 err = wx_get_link_ksettings(netdev, cmd);
26 if (err)
27 return err;
28
29 if (wx->mac.type == wx_mac_sp)
30 return 0;
31
32 cmd->base.port = txgbe->link_port;
33 cmd->base.autoneg = AUTONEG_DISABLE;
34 linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support);
35 linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
36
37 return 0;
38 }
39
txgbe_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)40 static int txgbe_set_ringparam(struct net_device *netdev,
41 struct ethtool_ringparam *ring,
42 struct kernel_ethtool_ringparam *kernel_ring,
43 struct netlink_ext_ack *extack)
44 {
45 struct wx *wx = netdev_priv(netdev);
46 u32 new_rx_count, new_tx_count;
47 struct wx_ring *temp_ring;
48 int i, err = 0;
49
50 new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
51 new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
52
53 new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
54 new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
55
56 if (new_tx_count == wx->tx_ring_count &&
57 new_rx_count == wx->rx_ring_count)
58 return 0;
59
60 err = wx_set_state_reset(wx);
61 if (err)
62 return err;
63
64 if (!netif_running(wx->netdev)) {
65 for (i = 0; i < wx->num_tx_queues; i++)
66 wx->tx_ring[i]->count = new_tx_count;
67 for (i = 0; i < wx->num_rx_queues; i++)
68 wx->rx_ring[i]->count = new_rx_count;
69 wx->tx_ring_count = new_tx_count;
70 wx->rx_ring_count = new_rx_count;
71
72 goto clear_reset;
73 }
74
75 /* allocate temporary buffer to store rings in */
76 i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
77 temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
78 if (!temp_ring) {
79 err = -ENOMEM;
80 goto clear_reset;
81 }
82
83 txgbe_down(wx);
84
85 wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
86 kvfree(temp_ring);
87
88 txgbe_up(wx);
89
90 clear_reset:
91 clear_bit(WX_STATE_RESETTING, wx->state);
92 return err;
93 }
94
txgbe_set_channels(struct net_device * dev,struct ethtool_channels * ch)95 static int txgbe_set_channels(struct net_device *dev,
96 struct ethtool_channels *ch)
97 {
98 int err;
99
100 err = wx_set_channels(dev, ch);
101 if (err < 0)
102 return err;
103
104 /* use setup TC to update any traffic class queue mapping */
105 return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
106 }
107
txgbe_get_ethtool_fdir_entry(struct txgbe * txgbe,struct ethtool_rxnfc * cmd)108 static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe,
109 struct ethtool_rxnfc *cmd)
110 {
111 struct ethtool_rx_flow_spec *fsp =
112 (struct ethtool_rx_flow_spec *)&cmd->fs;
113 union txgbe_atr_input *mask = &txgbe->fdir_mask;
114 struct txgbe_fdir_filter *rule = NULL;
115 struct hlist_node *node;
116
117 /* report total rule count */
118 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
119
120 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
121 fdir_node) {
122 if (fsp->location <= rule->sw_idx)
123 break;
124 }
125
126 if (!rule || fsp->location != rule->sw_idx)
127 return -EINVAL;
128
129 /* set flow type field */
130 switch (rule->filter.formatted.flow_type) {
131 case TXGBE_ATR_FLOW_TYPE_TCPV4:
132 fsp->flow_type = TCP_V4_FLOW;
133 break;
134 case TXGBE_ATR_FLOW_TYPE_UDPV4:
135 fsp->flow_type = UDP_V4_FLOW;
136 break;
137 case TXGBE_ATR_FLOW_TYPE_SCTPV4:
138 fsp->flow_type = SCTP_V4_FLOW;
139 break;
140 case TXGBE_ATR_FLOW_TYPE_IPV4:
141 fsp->flow_type = IP_USER_FLOW;
142 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
143 fsp->h_u.usr_ip4_spec.proto = 0;
144 fsp->m_u.usr_ip4_spec.proto = 0;
145 break;
146 default:
147 return -EINVAL;
148 }
149
150 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
151 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
152 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
153 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
154 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
155 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
156 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
157 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
158 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
159 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
160 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
161 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
162 fsp->flow_type |= FLOW_EXT;
163
164 /* record action */
165 if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE)
166 fsp->ring_cookie = RX_CLS_FLOW_DISC;
167 else
168 fsp->ring_cookie = rule->action;
169
170 return 0;
171 }
172
txgbe_get_ethtool_fdir_all(struct txgbe * txgbe,struct ethtool_rxnfc * cmd,u32 * rule_locs)173 static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe,
174 struct ethtool_rxnfc *cmd,
175 u32 *rule_locs)
176 {
177 struct txgbe_fdir_filter *rule;
178 struct hlist_node *node;
179 int cnt = 0;
180
181 /* report total rule count */
182 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
183
184 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
185 fdir_node) {
186 if (cnt == cmd->rule_cnt)
187 return -EMSGSIZE;
188 rule_locs[cnt] = rule->sw_idx;
189 cnt++;
190 }
191
192 cmd->rule_cnt = cnt;
193
194 return 0;
195 }
196
txgbe_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)197 static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
198 u32 *rule_locs)
199 {
200 struct wx *wx = netdev_priv(dev);
201 struct txgbe *txgbe = wx->priv;
202 int ret = -EOPNOTSUPP;
203
204 switch (cmd->cmd) {
205 case ETHTOOL_GRXRINGS:
206 cmd->data = wx->num_rx_queues;
207 ret = 0;
208 break;
209 case ETHTOOL_GRXCLSRLCNT:
210 cmd->rule_cnt = txgbe->fdir_filter_count;
211 ret = 0;
212 break;
213 case ETHTOOL_GRXCLSRULE:
214 ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd);
215 break;
216 case ETHTOOL_GRXCLSRLALL:
217 ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs);
218 break;
219 default:
220 break;
221 }
222
223 return ret;
224 }
225
txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec * fsp,u8 * flow_type)226 static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
227 u8 *flow_type)
228 {
229 switch (fsp->flow_type & ~FLOW_EXT) {
230 case TCP_V4_FLOW:
231 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
232 break;
233 case UDP_V4_FLOW:
234 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
235 break;
236 case SCTP_V4_FLOW:
237 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
238 break;
239 case IP_USER_FLOW:
240 switch (fsp->h_u.usr_ip4_spec.proto) {
241 case IPPROTO_TCP:
242 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
243 break;
244 case IPPROTO_UDP:
245 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
246 break;
247 case IPPROTO_SCTP:
248 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
249 break;
250 case 0:
251 if (!fsp->m_u.usr_ip4_spec.proto) {
252 *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
253 break;
254 }
255 fallthrough;
256 default:
257 return -EINVAL;
258 }
259 break;
260 default:
261 return -EINVAL;
262 }
263
264 return 0;
265 }
266
txgbe_match_ethtool_fdir_entry(struct txgbe * txgbe,struct txgbe_fdir_filter * input)267 static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe,
268 struct txgbe_fdir_filter *input)
269 {
270 struct txgbe_fdir_filter *rule = NULL;
271 struct hlist_node *node2;
272
273 hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list,
274 fdir_node) {
275 if (rule->filter.formatted.bkt_hash ==
276 input->filter.formatted.bkt_hash &&
277 rule->action == input->action) {
278 wx_dbg(txgbe->wx, "FDIR entry already exist\n");
279 return true;
280 }
281 }
282 return false;
283 }
284
txgbe_update_ethtool_fdir_entry(struct txgbe * txgbe,struct txgbe_fdir_filter * input,u16 sw_idx)285 static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe,
286 struct txgbe_fdir_filter *input,
287 u16 sw_idx)
288 {
289 struct hlist_node *node = NULL, *parent = NULL;
290 struct txgbe_fdir_filter *rule;
291 struct wx *wx = txgbe->wx;
292 bool deleted = false;
293 int err;
294
295 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
296 fdir_node) {
297 /* hash found, or no matching entry */
298 if (rule->sw_idx >= sw_idx)
299 break;
300 parent = node;
301 }
302
303 /* if there is an old rule occupying our place remove it */
304 if (rule && rule->sw_idx == sw_idx) {
305 /* hardware filters are only configured when interface is up,
306 * and we should not issue filter commands while the interface
307 * is down
308 */
309 if (netif_running(wx->netdev) &&
310 (!input || rule->filter.formatted.bkt_hash !=
311 input->filter.formatted.bkt_hash)) {
312 err = txgbe_fdir_erase_perfect_filter(wx,
313 &rule->filter,
314 sw_idx);
315 if (err)
316 return -EINVAL;
317 }
318
319 hlist_del(&rule->fdir_node);
320 kfree(rule);
321 txgbe->fdir_filter_count--;
322 deleted = true;
323 }
324
325 /* If we weren't given an input, then this was a request to delete a
326 * filter. We should return -EINVAL if the filter wasn't found, but
327 * return 0 if the rule was successfully deleted.
328 */
329 if (!input)
330 return deleted ? 0 : -EINVAL;
331
332 /* initialize node and set software index */
333 INIT_HLIST_NODE(&input->fdir_node);
334
335 /* add filter to the list */
336 if (parent)
337 hlist_add_behind(&input->fdir_node, parent);
338 else
339 hlist_add_head(&input->fdir_node,
340 &txgbe->fdir_filter_list);
341
342 /* update counts */
343 txgbe->fdir_filter_count++;
344
345 return 0;
346 }
347
txgbe_add_ethtool_fdir_entry(struct txgbe * txgbe,struct ethtool_rxnfc * cmd)348 static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
349 struct ethtool_rxnfc *cmd)
350 {
351 struct ethtool_rx_flow_spec *fsp =
352 (struct ethtool_rx_flow_spec *)&cmd->fs;
353 struct txgbe_fdir_filter *input;
354 union txgbe_atr_input mask;
355 struct wx *wx = txgbe->wx;
356 int err = -EINVAL;
357 u16 ptype = 0;
358 u8 queue;
359
360 if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
361 return -EOPNOTSUPP;
362
363 /* ring_cookie is a masked into a set of queues and txgbe pools or
364 * we use drop index
365 */
366 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
367 queue = TXGBE_RDB_FDIR_DROP_QUEUE;
368 } else {
369 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
370 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
371
372 if (!vf && ring >= wx->num_rx_queues)
373 return -EINVAL;
374 else if (vf && (vf > wx->num_vfs ||
375 ring >= wx->num_rx_queues_per_pool))
376 return -EINVAL;
377
378 /* Map the ring onto the absolute queue index */
379 if (!vf)
380 queue = wx->rx_ring[ring]->reg_idx;
381 else
382 queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring;
383 }
384
385 /* Don't allow indexes to exist outside of available space */
386 if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) {
387 wx_err(wx, "Location out of range\n");
388 return -EINVAL;
389 }
390
391 input = kzalloc(sizeof(*input), GFP_ATOMIC);
392 if (!input)
393 return -ENOMEM;
394
395 memset(&mask, 0, sizeof(union txgbe_atr_input));
396
397 /* set SW index */
398 input->sw_idx = fsp->location;
399
400 /* record flow type */
401 if (txgbe_flowspec_to_flow_type(fsp,
402 &input->filter.formatted.flow_type)) {
403 wx_err(wx, "Unrecognized flow type\n");
404 goto err_out;
405 }
406
407 mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK |
408 TXGBE_ATR_L4TYPE_MASK;
409
410 if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4)
411 mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK;
412
413 /* Copy input into formatted structures */
414 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
415 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
416 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
417 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
418 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
419 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
420 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
421 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
422
423 if (fsp->flow_type & FLOW_EXT) {
424 input->filter.formatted.vm_pool =
425 (unsigned char)ntohl(fsp->h_ext.data[1]);
426 mask.formatted.vm_pool =
427 (unsigned char)ntohl(fsp->m_ext.data[1]);
428 input->filter.formatted.flex_bytes =
429 fsp->h_ext.vlan_etype;
430 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
431 }
432
433 switch (input->filter.formatted.flow_type) {
434 case TXGBE_ATR_FLOW_TYPE_TCPV4:
435 ptype = WX_PTYPE_L2_IPV4_TCP;
436 break;
437 case TXGBE_ATR_FLOW_TYPE_UDPV4:
438 ptype = WX_PTYPE_L2_IPV4_UDP;
439 break;
440 case TXGBE_ATR_FLOW_TYPE_SCTPV4:
441 ptype = WX_PTYPE_L2_IPV4_SCTP;
442 break;
443 case TXGBE_ATR_FLOW_TYPE_IPV4:
444 ptype = WX_PTYPE_L2_IPV4;
445 break;
446 default:
447 break;
448 }
449
450 input->filter.formatted.vlan_id = htons(ptype);
451 if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK)
452 mask.formatted.vlan_id = htons(0xFFFF);
453 else
454 mask.formatted.vlan_id = htons(0xFFF8);
455
456 /* determine if we need to drop or route the packet */
457 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
458 input->action = TXGBE_RDB_FDIR_DROP_QUEUE;
459 else
460 input->action = fsp->ring_cookie;
461
462 spin_lock(&txgbe->fdir_perfect_lock);
463
464 if (hlist_empty(&txgbe->fdir_filter_list)) {
465 /* save mask and program input mask into HW */
466 memcpy(&txgbe->fdir_mask, &mask, sizeof(mask));
467 err = txgbe_fdir_set_input_mask(wx, &mask);
468 if (err)
469 goto err_unlock;
470 } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) {
471 wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n");
472 goto err_unlock;
473 }
474
475 /* apply mask and compute/store hash */
476 txgbe_atr_compute_perfect_hash(&input->filter, &mask);
477
478 /* check if new entry does not exist on filter list */
479 if (txgbe_match_ethtool_fdir_entry(txgbe, input))
480 goto err_unlock;
481
482 /* only program filters to hardware if the net device is running, as
483 * we store the filters in the Rx buffer which is not allocated when
484 * the device is down
485 */
486 if (netif_running(wx->netdev)) {
487 err = txgbe_fdir_write_perfect_filter(wx, &input->filter,
488 input->sw_idx, queue);
489 if (err)
490 goto err_unlock;
491 }
492
493 txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx);
494
495 spin_unlock(&txgbe->fdir_perfect_lock);
496
497 return 0;
498 err_unlock:
499 spin_unlock(&txgbe->fdir_perfect_lock);
500 err_out:
501 kfree(input);
502 return err;
503 }
504
txgbe_del_ethtool_fdir_entry(struct txgbe * txgbe,struct ethtool_rxnfc * cmd)505 static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe,
506 struct ethtool_rxnfc *cmd)
507 {
508 struct ethtool_rx_flow_spec *fsp =
509 (struct ethtool_rx_flow_spec *)&cmd->fs;
510 int err = 0;
511
512 spin_lock(&txgbe->fdir_perfect_lock);
513 err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location);
514 spin_unlock(&txgbe->fdir_perfect_lock);
515
516 return err;
517 }
518
txgbe_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)519 static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
520 {
521 struct wx *wx = netdev_priv(dev);
522 struct txgbe *txgbe = wx->priv;
523 int ret = -EOPNOTSUPP;
524
525 switch (cmd->cmd) {
526 case ETHTOOL_SRXCLSRLINS:
527 ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd);
528 break;
529 case ETHTOOL_SRXCLSRLDEL:
530 ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd);
531 break;
532 default:
533 break;
534 }
535
536 return ret;
537 }
538
539 static const struct ethtool_ops txgbe_ethtool_ops = {
540 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
541 ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
542 .get_drvinfo = wx_get_drvinfo,
543 .nway_reset = wx_nway_reset,
544 .get_link = ethtool_op_get_link,
545 .get_link_ksettings = txgbe_get_link_ksettings,
546 .set_link_ksettings = wx_set_link_ksettings,
547 .get_sset_count = wx_get_sset_count,
548 .get_strings = wx_get_strings,
549 .get_ethtool_stats = wx_get_ethtool_stats,
550 .get_eth_mac_stats = wx_get_mac_stats,
551 .get_pause_stats = wx_get_pause_stats,
552 .get_pauseparam = wx_get_pauseparam,
553 .set_pauseparam = wx_set_pauseparam,
554 .get_ringparam = wx_get_ringparam,
555 .set_ringparam = txgbe_set_ringparam,
556 .get_coalesce = wx_get_coalesce,
557 .set_coalesce = wx_set_coalesce,
558 .get_channels = wx_get_channels,
559 .set_channels = txgbe_set_channels,
560 .get_rxnfc = txgbe_get_rxnfc,
561 .set_rxnfc = txgbe_set_rxnfc,
562 .get_msglevel = wx_get_msglevel,
563 .set_msglevel = wx_set_msglevel,
564 .get_ts_info = wx_get_ts_info,
565 .get_ts_stats = wx_get_ptp_stats,
566 };
567
txgbe_set_ethtool_ops(struct net_device * netdev)568 void txgbe_set_ethtool_ops(struct net_device *netdev)
569 {
570 netdev->ethtool_ops = &txgbe_ethtool_ops;
571 }
572